From 023ea4b588ffeb2cca4eddab54f70464cdbcd78b Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Fri, 18 Oct 2013 19:36:37 -0500 Subject: [PATCH 01/47] Fix formatting in the protocol BiFs docs --- src/analyzer/protocol/ftp/functions.bif | 26 ++++++++++++------------- src/analyzer/protocol/tcp/functions.bif | 9 ++++----- 2 files changed, 17 insertions(+), 18 deletions(-) diff --git a/src/analyzer/protocol/ftp/functions.bif b/src/analyzer/protocol/ftp/functions.bif index a667d8ca88..b57b24df20 100644 --- a/src/analyzer/protocol/ftp/functions.bif +++ b/src/analyzer/protocol/ftp/functions.bif @@ -116,11 +116,12 @@ static Val* parse_eftp(const char* line) } %%} -## Converts a string representation of the FTP PORT command to an ``ftp_port``. +## Converts a string representation of the FTP PORT command to an +## :bro:type:`ftp_port`. ## ## s: The string of the FTP PORT command, e.g., ``"10,0,0,1,4,31"``. ## -## Returns: The FTP PORT, e.g., ``[h=10.0.0.1, p=1055/tcp, valid=T]`` +## Returns: The FTP PORT, e.g., ``[h=10.0.0.1, p=1055/tcp, valid=T]``. ## ## .. bro:see:: parse_eftp_port parse_ftp_pasv parse_ftp_epsv fmt_ftp_port function parse_ftp_port%(s: string%): ftp_port @@ -128,14 +129,14 @@ function parse_ftp_port%(s: string%): ftp_port return parse_port(s->CheckString()); %} -## Converts a string representation of the FTP EPRT command to an ``ftp_port``. -## See `RFC 2428 `_. -## The format is ``EPRT``, +## Converts a string representation of the FTP EPRT command (see :rfc:`2428`) +## to an :bro:type:`ftp_port`. The format is +## ``"EPRT"``, ## where ```` is a delimiter in the ASCII range 33-126 (usually ``|``). ## ## s: The string of the FTP EPRT command, e.g., ``"|1|10.0.0.1|1055|"``. ## -## Returns: The FTP PORT, e.g., ``[h=10.0.0.1, p=1055/tcp, valid=T]`` +## Returns: The FTP PORT, e.g., ``[h=10.0.0.1, p=1055/tcp, valid=T]``. ## ## .. bro:see:: parse_ftp_port parse_ftp_pasv parse_ftp_epsv fmt_ftp_port function parse_eftp_port%(s: string%): ftp_port @@ -143,11 +144,11 @@ function parse_eftp_port%(s: string%): ftp_port return parse_eftp(s->CheckString()); %} -## Converts the result of the FTP PASV command to an ``ftp_port``. +## Converts the result of the FTP PASV command to an :bro:type:`ftp_port`. ## ## str: The string containing the result of the FTP PASV command. ## -## Returns: The FTP PORT, e.g., ``[h=10.0.0.1, p=1055/tcp, valid=T]`` +## Returns: The FTP PORT, e.g., ``[h=10.0.0.1, p=1055/tcp, valid=T]``. ## ## .. bro:see:: parse_ftp_port parse_eftp_port parse_ftp_epsv fmt_ftp_port function parse_ftp_pasv%(str: string%): ftp_port @@ -168,14 +169,13 @@ function parse_ftp_pasv%(str: string%): ftp_port return parse_port(line); %} -## Converts the result of the FTP EPSV command to an ``ftp_port``. -## See `RFC 2428 `_. -## The format is `` ()``, where ```` is a -## delimiter in the ASCII range 33-126 (usually ``|``). +## Converts the result of the FTP EPSV command (see :rfc:`2428`) to an +## :bro:type:`ftp_port`. The format is ``" ()"``, +## where ```` is a delimiter in the ASCII range 33-126 (usually ``|``). ## ## str: The string containing the result of the FTP EPSV command. ## -## Returns: The FTP PORT, e.g., ``[h=10.0.0.1, p=1055/tcp, valid=T]`` +## Returns: The FTP PORT, e.g., ``[h=10.0.0.1, p=1055/tcp, valid=T]``. ## ## .. bro:see:: parse_ftp_port parse_eftp_port parse_ftp_pasv fmt_ftp_port function parse_ftp_epsv%(str: string%): ftp_port diff --git a/src/analyzer/protocol/tcp/functions.bif b/src/analyzer/protocol/tcp/functions.bif index f5b0033ae8..9fca05329a 100644 --- a/src/analyzer/protocol/tcp/functions.bif +++ b/src/analyzer/protocol/tcp/functions.bif @@ -93,13 +93,12 @@ function get_gap_summary%(%): gap_info ## ## - ``CONTENTS_NONE``: Stop recording the connection's content. ## - ``CONTENTS_ORIG``: Record the data sent by the connection -## originator (often the client). +## originator (often the client). ## - ``CONTENTS_RESP``: Record the data sent by the connection -## responder (often the server). +## responder (often the server). ## - ``CONTENTS_BOTH``: Record the data sent in both directions. -## Results in the two directions being -## intermixed in the file, in the order the -## data was seen by Bro. +## Results in the two directions being intermixed in the file, +## in the order the data was seen by Bro. ## ## f: The file handle of the file to write the contents to. ## From d367f033dcf8954672fee58588edf2d4bcb77d97 Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Sun, 20 Oct 2013 01:32:17 -0500 Subject: [PATCH 02/47] Fix typos and formatting in init-bare.bro and init-default.bro --- scripts/base/init-bare.bro | 562 +++++++++++++++++----------------- scripts/base/init-default.bro | 6 +- 2 files changed, 292 insertions(+), 276 deletions(-) diff --git a/scripts/base/init-bare.bro b/scripts/base/init-bare.bro index e499c7cc7d..4b5b87e073 100644 --- a/scripts/base/init-bare.bro +++ b/scripts/base/init-bare.bro @@ -3,61 +3,61 @@ # Type declarations -## An ordered array of strings. The entries are indexed by succesive numbers. Note -## that it depends on the usage whether the first index is zero or one. +## An ordered array of strings. The entries are indexed by successive numbers. +## Note that it depends on the usage whether the first index is zero or one. ## -## .. todo:: We need this type definition only for declaring builtin functions via -## ``bifcl``. We should extend ``bifcl`` to understand composite types directly and -## then remove this alias. +## .. todo:: We need this type definition only for declaring builtin functions +## via ``bifcl``. We should extend ``bifcl`` to understand composite types +## directly and then remove this alias. type string_array: table[count] of string; ## A set of strings. ## -## .. todo:: We need this type definition only for declaring builtin functions via -## ``bifcl``. We should extend ``bifcl`` to understand composite types directly and -## then remove this alias. +## .. todo:: We need this type definition only for declaring builtin functions +## via ``bifcl``. We should extend ``bifcl`` to understand composite types +## directly and then remove this alias. type string_set: set[string]; ## A set of addresses. ## -## .. todo:: We need this type definition only for declaring builtin functions via -## ``bifcl``. We should extend ``bifcl`` to understand composite types directly and -## then remove this alias. +## .. todo:: We need this type definition only for declaring builtin functions +## via ``bifcl``. We should extend ``bifcl`` to understand composite types +## directly and then remove this alias. type addr_set: set[addr]; ## A set of counts. ## -## .. todo:: We need this type definition only for declaring builtin functions via -## ``bifcl``. We should extend ``bifcl`` to understand composite types directly and -## then remove this alias. +## .. todo:: We need this type definition only for declaring builtin functions +## via ``bifcl``. We should extend ``bifcl`` to understand composite types +## directly and then remove this alias. type count_set: set[count]; ## A vector of counts, used by some builtin functions to store a list of indices. ## -## .. todo:: We need this type definition only for declaring builtin functions via -## ``bifcl``. We should extend ``bifcl`` to understand composite types directly and -## then remove this alias. +## .. todo:: We need this type definition only for declaring builtin functions +## via ``bifcl``. We should extend ``bifcl`` to understand composite types +## directly and then remove this alias. type index_vec: vector of count; ## A vector of strings. ## -## .. todo:: We need this type definition only for declaring builtin functions via -## ``bifcl``. We should extend ``bifcl`` to understand composite types directly and -## then remove this alias. +## .. todo:: We need this type definition only for declaring builtin functions +## via ``bifcl``. We should extend ``bifcl`` to understand composite types +## directly and then remove this alias. type string_vec: vector of string; ## A vector of addresses. ## -## .. todo:: We need this type definition only for declaring builtin functions via -## ``bifcl``. We should extend ``bifcl`` to understand composite types directly and -## then remove this alias. +## .. todo:: We need this type definition only for declaring builtin functions +## via ``bifcl``. We should extend ``bifcl`` to understand composite types +## directly and then remove this alias. type addr_vec: vector of addr; ## A table of strings indexed by strings. ## -## .. todo:: We need this type definition only for declaring builtin functions via -## ``bifcl``. We should extend ``bifcl`` to understand composite types directly and -## then remove this alias. +## .. todo:: We need this type definition only for declaring builtin functions +## via ``bifcl``. We should extend ``bifcl`` to understand composite types +## directly and then remove this alias. type table_string_of_string: table[string] of string; ## A connection's transport-layer protocol. Note that Bro uses the term @@ -72,8 +72,8 @@ type transport_proto: enum { ## A connection's identifying 4-tuple of endpoints and ports. ## ## .. note:: It's actually a 5-tuple: the transport-layer protocol is stored as -## part of the port values, `orig_p` and `resp_p`, and can be extracted from them -## with :bro:id:`get_port_transport_proto`. +## part of the port values, `orig_p` and `resp_p`, and can be extracted from +## them with :bro:id:`get_port_transport_proto`. type conn_id: record { orig_h: addr; ##< The originator's IP address. orig_p: port; ##< The originator's port number. @@ -104,15 +104,15 @@ type icmp_context: record { id: conn_id; ##< The packet's 4-tuple. len: count; ##< The length of the IP packet (headers + payload). proto: count; ##< The packet's transport-layer protocol. - frag_offset: count; ##< The packet's fragementation offset. + frag_offset: count; ##< The packet's fragmentation offset. ## True if the packet's IP header is not fully included in the context ## or if there is not enough of the transport header to determine source - ## and destination ports. If that is the cast, the appropriate fields + ## and destination ports. If that is the case, the appropriate fields ## of this record will be set to null values. bad_hdr_len: bool; bad_checksum: bool; ##< True if the packet's IP checksum is not correct. - MF: bool; ##< True if the packets *more fragements* flag is set. - DF: bool; ##< True if the packets *don't fragment* flag is set. + MF: bool; ##< True if the packet's *more fragments* flag is set. + DF: bool; ##< True if the packet's *don't fragment* flag is set. }; ## Values extracted from a Prefix Information option in an ICMPv6 neighbor @@ -129,8 +129,8 @@ type icmp6_nd_prefix_info: record { ## Length of time in seconds that the prefix is valid for purpose of ## on-link determination (0xffffffff represents infinity). valid_lifetime: interval; - ## Length of time in seconds that the addresses generated from the prefix - ## via stateless address autoconfiguration remain preferred + ## Length of time in seconds that the addresses generated from the + ## prefix via stateless address autoconfiguration remain preferred ## (0xffffffff represents infinity). preferred_lifetime: interval; ## An IP address or prefix of an IP address. Use the *prefix_len* field @@ -147,8 +147,8 @@ type icmp6_nd_prefix_info: record { type icmp6_nd_option: record { ## 8-bit identifier of the type of option. otype: count; - ## 8-bit integer representing the length of the option (including the type - ## and length fields) in units of 8 octets. + ## 8-bit integer representing the length of the option (including the + ## type and length fields) in units of 8 octets. len: count; ## Source Link-Layer Address (Type 1) or Target Link-Layer Address (Type 2). ## Byte ordering of this is dependent on the actual link-layer. @@ -176,34 +176,35 @@ type icmp6_nd_options: vector of icmp6_nd_option; # .. bro:see:: dns_mapping_altered dns_mapping_lost_name dns_mapping_new_name # dns_mapping_unverified dns_mapping_valid type dns_mapping: record { - ## The time when the mapping was created, which corresponds to the when the DNS - ## query was sent out. + ## The time when the mapping was created, which corresponds to when + ## the DNS query was sent out. creation_time: time; - ## If the mapping is the result of a name lookup, the queried host name; otherwise - ## empty. + ## If the mapping is the result of a name lookup, the queried host name; + ## otherwise empty. req_host: string; - ## If the mapping is the result of a pointer lookup, the queried address; otherwise - ## null. + ## If the mapping is the result of a pointer lookup, the queried + ## address; otherwise null. req_addr: addr; - ## True if the lookup returned success. Only then, the result ields are valid. + ## True if the lookup returned success. Only then are the result fields + ## valid. valid: bool; - ## If the mapping is the result of a pointer lookup, the resolved hostname; - ## otherwise empty. + ## If the mapping is the result of a pointer lookup, the resolved + ## hostname; otherwise empty. hostname: string; - ## If the mapping is the result of an address lookup, the resolved address(es); - ## otherwise empty. + ## If the mapping is the result of an address lookup, the resolved + ## address(es); otherwise empty. addrs: addr_set; }; ## A parsed host/port combination describing server endpoint for an upcoming -## data transfert. +## data transfer. ## ## .. bro:see:: fmt_ftp_port parse_eftp_port parse_ftp_epsv parse_ftp_pasv ## parse_ftp_port type ftp_port: record { h: addr; ##< The host's address. p: port; ##< The host's port. - valid: bool; ##< True if format was right. Only then, *h* and *p* are valid. + valid: bool; ##< True if format was right. Only then are *h* and *p* valid. }; ## Statistics about what a TCP endpoint sent. @@ -211,14 +212,15 @@ type ftp_port: record { ## .. bro:see:: conn_stats type endpoint_stats: record { num_pkts: count; ##< Number of packets. - num_rxmit: count; ##< Number of retransmission. + num_rxmit: count; ##< Number of retransmissions. num_rxmit_bytes: count; ##< Number of retransmitted bytes. num_in_order: count; ##< Number of in-order packets. - num_OO: count; ##< Number out-of-order packets. + num_OO: count; ##< Number of out-of-order packets. num_repl: count; ##< Number of replicated packets (last packet was sent again). - ## Endian type used by the endpoint, if it it could be determined from the sequence - ## numbers used. This is one of :bro:see:`ENDIAN_UNKNOWN`, :bro:see:`ENDIAN_BIG`, - ## :bro:see:`ENDIAN_LITTLE`, and :bro:see:`ENDIAN_CONFUSED`. + ## Endian type used by the endpoint, if it could be determined from + ## the sequence numbers used. This is one of :bro:see:`ENDIAN_UNKNOWN`, + ## :bro:see:`ENDIAN_BIG`, :bro:see:`ENDIAN_LITTLE`, and + ## :bro:see:`ENDIAN_CONFUSED`. endian_type: count; }; @@ -226,10 +228,11 @@ module Tunnel; export { ## Records the identity of an encapsulating parent of a tunneled connection. type EncapsulatingConn: record { - ## The 4-tuple of the encapsulating "connection". In case of an IP-in-IP - ## tunnel the ports will be set to 0. The direction (i.e., orig and - ## resp) are set according to the first tunneled packet seen - ## and not according to the side that established the tunnel. + ## The 4-tuple of the encapsulating "connection". In case of an + ## IP-in-IP tunnel the ports will be set to 0. The direction + ## (i.e., orig and resp) are set according to the first tunneled + ## packet seen and not according to the side that established + ## the tunnel. cid: conn_id; ## The type of tunnel. tunnel_type: Tunnel::Type; @@ -240,7 +243,7 @@ export { } # end export module GLOBAL; -## A type alias for a vector of encapsulating "connections", i.e for when +## A type alias for a vector of encapsulating "connections", i.e. for when ## there are tunnels within tunnels. ## ## .. todo:: We need this type definition only for declaring builtin functions @@ -253,16 +256,17 @@ type EncapsulatingConnVector: vector of Tunnel::EncapsulatingConn; ## .. bro:see:: connection type endpoint: record { size: count; ##< Logical size of data sent (for TCP: derived from sequence numbers). - ## Endpoint state. For TCP connection, one of the constants: - ## :bro:see:`TCP_INACTIVE` :bro:see:`TCP_SYN_SENT` :bro:see:`TCP_SYN_ACK_SENT` - ## :bro:see:`TCP_PARTIAL` :bro:see:`TCP_ESTABLISHED` :bro:see:`TCP_CLOSED` - ## :bro:see:`TCP_RESET`. For UDP, one of :bro:see:`UDP_ACTIVE` and - ## :bro:see:`UDP_INACTIVE`. + ## Endpoint state. For a TCP connection, one of the constants: + ## :bro:see:`TCP_INACTIVE` :bro:see:`TCP_SYN_SENT` + ## :bro:see:`TCP_SYN_ACK_SENT` :bro:see:`TCP_PARTIAL` + ## :bro:see:`TCP_ESTABLISHED` :bro:see:`TCP_CLOSED` :bro:see:`TCP_RESET`. + ## For UDP, one of :bro:see:`UDP_ACTIVE` and :bro:see:`UDP_INACTIVE`. state: count; - ## Number of packets sent. Only set if :bro:id:`use_conn_size_analyzer` is true. + ## Number of packets sent. Only set if :bro:id:`use_conn_size_analyzer` + ## is true. num_pkts: count &optional; - ## Number of IP-level bytes sent. Only set if :bro:id:`use_conn_size_analyzer` is - ## true. + ## Number of IP-level bytes sent. Only set if + ## :bro:id:`use_conn_size_analyzer` is true. num_bytes_ip: count &optional; ## The current IPv6 flow label that the connection endpoint is using. ## Always 0 if the connection is over IPv4. @@ -271,37 +275,38 @@ type endpoint: record { ## A connection. This is Bro's basic connection type describing IP- and ## transport-layer information about the conversation. Note that Bro uses a -## liberal interpreation of "connection" and associates instances of this type +## liberal interpretation of "connection" and associates instances of this type ## also with UDP and ICMP flows. type connection: record { id: conn_id; ##< The connection's identifying 4-tuple. orig: endpoint; ##< Statistics about originator side. resp: endpoint; ##< Statistics about responder side. start_time: time; ##< The timestamp of the connection's first packet. - ## The duration of the conversation. Roughly speaking, this is the interval between - ## first and last data packet (low-level TCP details may adjust it somewhat in - ## ambigious cases). + ## The duration of the conversation. Roughly speaking, this is the + ## interval between first and last data packet (low-level TCP details + ## may adjust it somewhat in ambiguous cases). duration: interval; - ## The set of services the connection is using as determined by Bro's dynamic - ## protocol detection. Each entry is the label of an analyzer that confirmed that - ## it could parse the connection payload. While typically, there will be at - ## most one entry for each connection, in principle it is possible that more than - ## one protocol analyzer is able to parse the same data. If so, all will - ## be recorded. Also note that the recorced services are independent of any - ## transport-level protocols. + ## The set of services the connection is using as determined by Bro's + ## dynamic protocol detection. Each entry is the label of an analyzer + ## that confirmed that it could parse the connection payload. While + ## typically, there will be at most one entry for each connection, in + ## principle it is possible that more than one protocol analyzer is able + ## to parse the same data. If so, all will be recorded. Also note that + ## the recorded services are independent of any transport-level protocols. service: set[string]; addl: string; ##< Deprecated. hot: count; ##< Deprecated. history: string; ##< State history of connections. See *history* in :bro:see:`Conn::Info`. - ## A globally unique connection identifier. For each connection, Bro creates an ID - ## that is very likely unique across independent Bro runs. These IDs can thus be - ## used to tag and locate information associated with that connection. + ## A globally unique connection identifier. For each connection, Bro + ## creates an ID that is very likely unique across independent Bro runs. + ## These IDs can thus be used to tag and locate information associated + ## with that connection. uid: string; ## If the connection is tunneled, this field contains information about ## the encapsulating "connection(s)" with the outermost one starting - ## at index zero. It's also always the first such enapsulation seen - ## for the connection unless the :bro:id:`tunnel_changed` event is handled - ## and re-assigns this field to the new encapsulation. + ## at index zero. It's also always the first such encapsulation seen + ## for the connection unless the :bro:id:`tunnel_changed` event is + ## handled and reassigns this field to the new encapsulation. tunnel: EncapsulatingConnVector &optional; }; @@ -330,7 +335,7 @@ type fa_file: record { ## path which was read, or some other input source. source: string; - ## If the source of this file is is a network connection, this field + ## If the source of this file is a network connection, this field ## may be set to indicate the directionality. is_orig: bool &optional; @@ -359,7 +364,7 @@ type fa_file: record { timeout_interval: interval &default=default_file_timeout_interval; ## The number of bytes at the beginning of a file to save for later - ## inspection in *bof_buffer* field. + ## inspection in the *bof_buffer* field. bof_buffer_size: count &default=default_file_bof_buffer_size; ## The content of the beginning of a file up to *bof_buffer_size* bytes. @@ -382,7 +387,7 @@ type SYN_packet: record { size: count; ##< The size of the packet's payload as specified in the IP header. win_size: count; ##< The window size from the TCP header. win_scale: int; ##< The window scale option if present, or -1 if not. - MSS: count; ##< The maximum segement size if present, or 0 if not. + MSS: count; ##< The maximum segment size if present, or 0 if not. SACK_OK: bool; ##< True if the *SACK* option is present. }; @@ -394,9 +399,9 @@ type NetStats: record { pkts_dropped: count &default=0; ##< Packets reported dropped by the system. ## Packets seen on the link. Note that this may differ ## from *pkts_recvd* because of a potential capture_filter. See - ## :doc:`/scripts/base/frameworks/packet-filter/main`. Depending on the packet - ## capture system, this value may not be available and will then be always set to - ## zero. + ## :doc:`/scripts/base/frameworks/packet-filter/main`. Depending on the + ## packet capture system, this value may not be available and will then + ## be always set to zero. pkts_link: count &default=0; }; @@ -425,7 +430,7 @@ type bro_resources: record { num_UDP_conns: count; ##< Current number of UDP flows in memory. num_ICMP_conns: count; ##< Current number of ICMP flows in memory. num_fragments: count; ##< Current number of fragments pending reassembly. - num_packets: count; ##< Total number packets processed to date. + num_packets: count; ##< Total number of packets processed to date. num_timers: count; ##< Current number of pending timers. num_events_queued: count; ##< Total number of events queued so far. num_events_dispatched: count; ##< Total number of events dispatched so far. @@ -433,7 +438,7 @@ type bro_resources: record { max_TCP_conns: count; ##< Maximum number of concurrent TCP connections so far. max_UDP_conns: count; ##< Maximum number of concurrent UDP connections so far. max_ICMP_conns: count; ##< Maximum number of concurrent ICMP connections so far. - max_fragments: count; ##< Maximum number of concurrently buffered fragements so far. + max_fragments: count; ##< Maximum number of concurrently buffered fragments so far. max_timers: count; ##< Maximum number of concurrent timers pending so far. }; @@ -475,9 +480,9 @@ type packet: record { ## ## .. bro:see:: global_sizes ## -## .. todo:: We need this type definition only for declaring builtin functions via -## ``bifcl``. We should extend ``bifcl`` to understand composite types directly and -## then remove this alias. +## .. todo:: We need this type definition only for declaring builtin functions +## via ``bifcl``. We should extend ``bifcl`` to understand composite types +## directly and then remove this alias. type var_sizes: table[string] of count; ## Meta-information about a script-level identifier. @@ -497,9 +502,9 @@ type script_id: record { ## ## .. bro:see:: global_ids script_id ## -## .. todo:: We need this type definition only for declaring builtin functions via -## ``bifcl``. We should extend ``bifcl`` to understand composite types directly and -## then remove this alias. +## .. todo:: We need this type definition only for declaring builtin functions +## via ``bifcl``. We should extend ``bifcl`` to understand composite types +## directly and then remove this alias. type id_table: table[string] of script_id; ## Meta-information about a record-field. @@ -507,24 +512,24 @@ type id_table: table[string] of script_id; ## .. bro:see:: record_fields record_field_table type record_field: record { type_name: string; ##< The name of the field's type. - log: bool; ##< True of the field is declared with :bro:attr:`&log` attribute. + log: bool; ##< True if the field is declared with :bro:attr:`&log` attribute. ## The current value of the field in the record instance passed into ## :bro:see:`record_fields` (if it has one). value: any &optional; default_val: any &optional; ##< The value of the :bro:attr:`&default` attribute if defined. }; -## Table type used to map record field declarations to meta-information describing -## them. +## Table type used to map record field declarations to meta-information +## describing them. ## ## .. bro:see:: record_fields record_field ## -## .. todo:: We need this type definition only for declaring builtin functions via -## ``bifcl``. We should extend ``bifcl`` to understand composite types directly and -## then remove this alias. +## .. todo:: We need this type definition only for declaring builtin functions +## via ``bifcl``. We should extend ``bifcl`` to understand composite types +## directly and then remove this alias. type record_field_table: table[string] of record_field; -# todo::Do we still needs these here? Can they move into the packet filter +# todo:: Do we still need these here? Can they move into the packet filter # framework? # # The following two variables are defined here until the core is not @@ -540,15 +545,15 @@ type record_field_table: table[string] of record_field; ## PacketFilter::unrestricted_filter restrict_filters global capture_filters: table[string] of string &redef; -## Set of BPF filters to restrict capturing, indexed by a user-definable ID (which -## must be unique). +## Set of BPF filters to restrict capturing, indexed by a user-definable ID +## (which must be unique). ## ## .. bro:see:: PacketFilter PacketFilter::enable_auto_protocol_capture_filters ## PacketFilter::unrestricted_filter capture_filters global restrict_filters: table[string] of string &redef; ## Enum type identifying dynamic BPF filters. These are used by -## :bro:see:`precompile_pcap_filter` and :bro:see:`precompile_pcap_filter`. +## :bro:see:`precompile_pcap_filter` and :bro:see:`precompile_pcap_filter`. type PcapFilterID: enum { None }; ## Deprecated. @@ -588,7 +593,7 @@ type peer_id: count; ## send_capture_filter send_current_packet send_id send_ping send_state ## set_accept_state set_compression_level ## -## .. todo::The type's name is to narrow these days, should rename. +## .. todo::The type's name is too narrow these days, should rename. type event_peer: record { id: peer_id; ##< Locally unique ID of peer (returned by :bro:id:`connect`). host: addr; ##< The IP address of the peer. @@ -618,7 +623,7 @@ type rotate_info: record { # SW_MULTIPLE, # }; -## Paramerts for the Smith-Waterman algorithm. +## Parameters for the Smith-Waterman algorithm. ## ## .. bro:see:: str_smith_waterman type sw_params: record { @@ -656,20 +661,21 @@ type sw_substring: record { ## ## .. bro:see:: str_smith_waterman sw_substring sw_align_vec sw_align sw_params ## -## .. todo:: We need this type definition only for declaring builtin functions via -## ``bifcl``. We should extend ``bifcl`` to understand composite types directly and -## then remove this alias. +## .. todo:: We need this type definition only for declaring builtin functions +## via ``bifcl``. We should extend ``bifcl`` to understand composite types +## directly and then remove this alias. type sw_substring_vec: vector of sw_substring; -## Policy-level representation of a packet passed on by libpcap. The data includes -## the complete packet as returned by libpcap, including the link-layer header. +## Policy-level representation of a packet passed on by libpcap. The data +## includes the complete packet as returned by libpcap, including the link-layer +## header. ## ## .. bro:see:: dump_packet get_current_packet type pcap_packet: record { ts_sec: count; ##< The non-fractional part of the packet's timestamp (i.e., full seconds since the epoch). ts_usec: count; ##< The fractional part of the packet's timestamp. caplen: count; ##< The number of bytes captured (<= *len*). - len: count; ##< The length of the packet in bytes, including ``, which were seen during the sample. +# number>``), which were seen during the sample. type load_sample_info: set[string]; ## ID for NetFlow header. This is primarily a means to sort together NetFlow ## headers and flow records at the script level. type nfheader_id: record { - ## Name of the NetFlow file (e.g., ``netflow.dat``) or the receiving socket address - ## (e.g., ``127.0.0.1:5555``), or an explicit name if specified to - ## ``-y`` or ``-Y``. + ## Name of the NetFlow file (e.g., ``netflow.dat``) or the receiving + ## socket address (e.g., ``127.0.0.1:5555``), or an explicit name if + ## specified to ``-y`` or ``-Y``. rcvr_id: string; ## A serial number, ignoring any overflows. pdu_id: count; @@ -2755,7 +2768,7 @@ const log_max_size = 0.0 &redef; const log_encryption_key = "" &redef; ## Write profiling info into this file in regular intervals. The easiest way to -## activate profiling is loading :doc:`/scripts/policy/misc/profiling`. +## activate profiling is loading :doc:`/scripts/policy/misc/profiling`. ## ## .. bro:see:: profiling_interval expensive_profiling_multiple segment_profiling global profiling_file: file &redef; @@ -2766,8 +2779,8 @@ global profiling_file: file &redef; ## .. bro:see:: profiling_file expensive_profiling_multiple segment_profiling const profiling_interval = 0 secs &redef; -## Multiples of profiling_interval at which (more expensive) memory profiling is -## done (0 disables). +## Multiples of :bro:see:`profiling_interval` at which (more expensive) memory +## profiling is done (0 disables). ## ## .. bro:see:: profiling_interval profiling_file segment_profiling const expensive_profiling_multiple = 0 &redef; @@ -2805,19 +2818,19 @@ global pkt_profile_file: file &redef; ## Rate at which to generate :bro:see:`load_sample` events. As all ## events, the event is only generated if you've also defined a -## :bro:see:`load_sample` handler. Units are inverse number of packets; e.g., a -## value of 20 means "roughly one in every 20 packets". +## :bro:see:`load_sample` handler. Units are inverse number of packets; e.g., +## a value of 20 means "roughly one in every 20 packets". ## ## .. bro:see:: load_sample global load_sample_freq = 20 &redef; -## Rate at which to generate :bro:see:`gap_report` events assessing to what degree -## the measurement process appears to exhibit loss. +## Rate at which to generate :bro:see:`gap_report` events assessing to what +## degree the measurement process appears to exhibit loss. ## ## .. bro:see:: gap_report const gap_report_freq = 1.0 sec &redef; -## Whether we want :bro:see:`content_gap` and :bro:see:`gap_report` for partial +## Whether we want :bro:see:`content_gap` and :bro:see:`gap_report` for partial ## connections. A connection is partial if it is missing a full handshake. Note ## that gap reports for partial connections might not be reliable. ## @@ -2826,7 +2839,7 @@ const report_gaps_for_partial = F &redef; ## Flag to prevent Bro from exiting automatically when input is exhausted. ## Normally Bro terminates when all packets sources have gone dry -## and communication isn't enabled. If this flag is set, Bro's main loop will +## and communication isn't enabled. If this flag is set, Bro's main loop will ## instead keep idleing until :bro:see:`terminate` is explicitly called. ## ## This is mainly for testing purposes when termination behaviour needs to be @@ -2849,8 +2862,9 @@ const ssl_private_key = "" &redef; ## .. bro:see:: ssl_private_key ssl_ca_certificate const ssl_passphrase = "" &redef; -## Default mode for Bro's user-space dynamic packet filter. If true, packets that -## aren't explicitly allowed through, are dropped from any further processing. +## Default mode for Bro's user-space dynamic packet filter. If true, packets +## that aren't explicitly allowed through, are dropped from any further +## processing. ## ## .. note:: This is not the BPF packet filter but an additional dynamic filter ## that Bro optionally applies just before normal processing starts. @@ -2873,24 +2887,24 @@ const peer_description = "bro" &redef; ## ## .. bro:see:: forward_remote_state_changes ## -## .. note:: This option is only temporary and will disappear once we get a more -## sophisticated script-level communication framework. +## .. note:: This option is only temporary and will disappear once we get a +## more sophisticated script-level communication framework. const forward_remote_events = F &redef; ## If true, broadcast state updates received from one peer to all other peers. ## ## .. bro:see:: forward_remote_events ## -## .. note:: This option is only temporary and will disappear once we get a more -## sophisticated script-level communication framework. +## .. note:: This option is only temporary and will disappear once we get a +## more sophisticated script-level communication framework. const forward_remote_state_changes = F &redef; ## Place-holder constant indicating "no peer". const PEER_ID_NONE = 0; # Signature payload pattern types. -# todo::use enum to help autodoc -# todo::Still used? +# todo:: use enum to help autodoc +# todo:: Still used? #const SIG_PATTERN_PAYLOAD = 0; #const SIG_PATTERN_HTTP = 1; #const SIG_PATTERN_FTP = 2; @@ -2902,7 +2916,7 @@ const REMOTE_LOG_INFO = 1; ##< Deprecated. const REMOTE_LOG_ERROR = 2; ##< Deprecated. # Source of logging messages from the communication framework. -# todo::these should go into an enum to make them autodoc'able. +# todo:: these should go into an enum to make them autodoc'able. const REMOTE_SRC_CHILD = 1; ##< Message from the child process. const REMOTE_SRC_PARENT = 2; ##< Message from the parent process. const REMOTE_SRC_SCRIPT = 3; ##< Message from a policy script. @@ -2923,8 +2937,8 @@ const remote_trace_sync_peers = 0 &redef; const remote_check_sync_consistency = F &redef; ## Reassemble the beginning of all TCP connections before doing -## signature-matching. Enabling this provides more accurate matching at the -## expensive of CPU cycles. +## signature matching. Enabling this provides more accurate matching at the +## expense of CPU cycles. ## ## .. bro:see:: dpd_buffer_size ## dpd_match_only_beginning dpd_ignore_ports @@ -2937,15 +2951,16 @@ const dpd_reassemble_first_packets = T &redef; ## connection, Bro buffers this initial amount of payload in memory so that ## complete protocol analysis can start even after the initial packets have ## already passed through (i.e., when a DPD signature matches only later). -## However, once the buffer is full, data is deleted and lost to analyzers that are -## activated afterwards. Then only analyzers that can deal with partial +## However, once the buffer is full, data is deleted and lost to analyzers that +## are activated afterwards. Then only analyzers that can deal with partial ## connections will be able to analyze the session. ## ## .. bro:see:: dpd_reassemble_first_packets dpd_match_only_beginning ## dpd_ignore_ports const dpd_buffer_size = 1024 &redef; -## If true, stops signature matching if dpd_buffer_size has been reached. +## If true, stops signature matching if :bro:see:`dpd_buffer_size` has been +## reached. ## ## .. bro:see:: dpd_reassemble_first_packets dpd_buffer_size ## dpd_ignore_ports @@ -2962,14 +2977,14 @@ const dpd_match_only_beginning = T &redef; const dpd_ignore_ports = F &redef; ## Ports which the core considers being likely used by servers. For ports in -## this set, is may heuristically decide to flip the direction of the +## this set, it may heuristically decide to flip the direction of the ## connection if it misses the initial handshake. const likely_server_ports: set[port] &redef; ## Per-incident timer managers are drained after this amount of inactivity. const timer_mgr_inactivity_timeout = 1 min &redef; -## If true, output profiling for time-machine queries. +## If true, output profiling for Time-Machine queries. const time_machine_profiling = F &redef; ## If true, warns about unused event handlers at startup. @@ -2982,24 +2997,25 @@ const check_for_unused_event_handlers = F &redef; ## Deprecated. const suppress_local_output = F &redef; -## Holds the filename of the trace file given with -w (empty if none). +## Holds the filename of the trace file given with ``-w`` (empty if none). ## ## .. bro:see:: record_all_packets const trace_output_file = ""; -## If a trace file is given with ``-w``, dump *all* packets seen by Bro into it. By -## default, Bro applies (very few) heuristics to reduce the volume. A side effect -## of setting this to true is that we can write the packets out before we actually -## process them, which can be helpful for debugging in case the analysis triggers a -## crash. +## If a trace file is given with ``-w``, dump *all* packets seen by Bro into it. +## By default, Bro applies (very few) heuristics to reduce the volume. A side +## effect of setting this to true is that we can write the packets out before we +## actually process them, which can be helpful for debugging in case the +## analysis triggers a crash. ## ## .. bro:see:: trace_output_file const record_all_packets = F &redef; -## Ignore certain TCP retransmissions for :bro:see:`conn_stats`. Some connections -## (e.g., SSH) retransmit the acknowledged last byte to keep the connection alive. -## If *ignore_keep_alive_rexmit* is set to true, such retransmissions will be -## excluded in the rexmit counter in :bro:see:`conn_stats`. +## Ignore certain TCP retransmissions for :bro:see:`conn_stats`. Some +## connections (e.g., SSH) retransmit the acknowledged last byte to keep the +## connection alive. If *ignore_keep_alive_rexmit* is set to true, such +## retransmissions will be excluded in the rexmit counter in +## :bro:see:`conn_stats`. ## ## .. bro:see:: conn_stats const ignore_keep_alive_rexmit = F &redef; @@ -3033,7 +3049,7 @@ export { ## With this set, the Teredo analyzer waits until it sees both sides ## of a connection using a valid Teredo encapsulation before issuing ## a :bro:see:`protocol_confirmation`. If it's false, the first - ## occurence of a packet with valid Teredo encapsulation causes a + ## occurrence of a packet with valid Teredo encapsulation causes a ## confirmation. Both cases are still subject to effects of ## :bro:see:`Tunnel::yielding_teredo_decapsulation`. const delay_teredo_confirmation = T &redef; @@ -3041,7 +3057,7 @@ export { ## With this set, the GTP analyzer waits until the most-recent upflow ## and downflow packets are a valid GTPv1 encapsulation before ## issuing :bro:see:`protocol_confirmation`. If it's false, the - ## first occurence of a packet with valid GTPv1 encapsulation causes + ## first occurrence of a packet with valid GTPv1 encapsulation causes ## confirmation. Since the same inner connection can be carried ## differing outer upflow/downflow connections, setting to false ## may work better. @@ -3059,8 +3075,8 @@ export { ## external harness and shouldn't output anything to the console. const info_to_stderr = T &redef; - ## Tunable for sending reporter warning messages to STDERR. The option to - ## turn it off is presented here in case Bro is being run by some + ## Tunable for sending reporter warning messages to STDERR. The option + ## to turn it off is presented here in case Bro is being run by some ## external harness and shouldn't output anything to the console. const warnings_to_stderr = T &redef; diff --git a/scripts/base/init-default.bro b/scripts/base/init-default.bro index 202f8eaaab..d0120d930b 100644 --- a/scripts/base/init-default.bro +++ b/scripts/base/init-default.bro @@ -1,8 +1,8 @@ ##! This script loads everything in the base/ script directory. If you want ##! to run Bro without all of these scripts loaded by default, you can use -##! the -b (--bare-mode) command line argument. You can also copy the "@load" -##! lines from this script to your own script to load only the scripts that -##! you actually want. +##! the ``-b`` (``--bare-mode``) command line argument. You can also copy the +##! "@load" lines from this script to your own script to load only the scripts +##! that you actually want. @load base/utils/site @load base/utils/active-http From 4434d231b2f0cd6aa7e4ca86b914ac478df4e7fd Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Sun, 20 Oct 2013 21:07:39 -0500 Subject: [PATCH 03/47] Correct more typos in init-bare.bro --- scripts/base/init-bare.bro | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/scripts/base/init-bare.bro b/scripts/base/init-bare.bro index 4b5b87e073..de26e6a41d 100644 --- a/scripts/base/init-bare.bro +++ b/scripts/base/init-bare.bro @@ -950,7 +950,7 @@ const tcp_content_deliver_all_resp = F &redef; ## udp_content_deliver_all_resp udp_contents const udp_content_delivery_ports_orig: table[port] of bool = {} &redef; -## Defines UDP destination ports for which the contents of the originator stream +## Defines UDP destination ports for which the contents of the responder stream ## should be delivered via :bro:see:`udp_contents`. ## ## .. bro:see:: tcp_content_delivery_ports_orig @@ -960,7 +960,7 @@ const udp_content_delivery_ports_orig: table[port] of bool = {} &redef; const udp_content_delivery_ports_resp: table[port] of bool = {} &redef; ## If true, all UDP originator-side traffic is reported via -## :bro:see:`tcp_contents`. +## :bro:see:`udp_contents`. ## ## .. bro:see:: tcp_content_delivery_ports_orig ## tcp_content_delivery_ports_resp tcp_content_deliver_all_resp @@ -970,7 +970,7 @@ const udp_content_delivery_ports_resp: table[port] of bool = {} &redef; const udp_content_deliver_all_orig = F &redef; ## If true, all UDP responder-side traffic is reported via -## :bro:see:`tcp_contents`. +## :bro:see:`udp_contents`. ## ## .. bro:see:: tcp_content_delivery_ports_orig ## tcp_content_delivery_ports_resp tcp_content_deliver_all_resp @@ -985,7 +985,7 @@ const udp_content_deliver_all_resp = F &redef; const table_expire_interval = 10 secs &redef; ## When expiring/serializing table entries, don't work on more than this many -## table at a time. +## table entries at a time. ## ## .. bro:see:: table_expire_interval table_expire_delay const table_incremental_step = 5000 &redef; @@ -1953,7 +1953,7 @@ export { ## Record summarizing the general results and status of NFSv3 ## request/reply pairs. ## - ## Note that when *rpc_stats* or *nfs_stats* indicates not successful, + ## Note that when *rpc_stat* or *nfs_stat* indicates not successful, ## the reply record passed to the corresponding event will be empty and ## contain uninitialized fields, so don't use it. Also note that time ## and duration values might not be fully accurate. For TCP, we record @@ -2793,7 +2793,7 @@ const segment_profiling = F &redef; ## Output modes for packet profiling information. ## -## .. bro:see:: pkt_profile_mode pkt_profile_freq pkt_profile_mode pkt_profile_file +## .. bro:see:: pkt_profile_mode pkt_profile_freq pkt_profile_file type pkt_profile_modes: enum { PKT_PROFILE_MODE_NONE, ##< No output. PKT_PROFILE_MODE_SECS, ##< Output every :bro:see:`pkt_profile_freq` seconds. @@ -2801,19 +2801,19 @@ type pkt_profile_modes: enum { PKT_PROFILE_MODE_BYTES, ##< Output every :bro:see:`pkt_profile_freq` bytes. }; -## Output modes for packet profiling information. +## Output mode for packet profiling information. ## -## .. bro:see:: pkt_profile_modes pkt_profile_freq pkt_profile_mode pkt_profile_file +## .. bro:see:: pkt_profile_modes pkt_profile_freq pkt_profile_file const pkt_profile_mode = PKT_PROFILE_MODE_NONE &redef; ## Frequency associated with packet profiling. ## -## .. bro:see:: pkt_profile_modes pkt_profile_mode pkt_profile_mode pkt_profile_file +## .. bro:see:: pkt_profile_modes pkt_profile_mode pkt_profile_file const pkt_profile_freq = 0.0 &redef; ## File where packet profiles are logged. ## -## .. bro:see:: pkt_profile_modes pkt_profile_freq pkt_profile_mode pkt_profile_mode +## .. bro:see:: pkt_profile_modes pkt_profile_freq pkt_profile_mode global pkt_profile_file: file &redef; ## Rate at which to generate :bro:see:`load_sample` events. As all @@ -2838,9 +2838,9 @@ const gap_report_freq = 1.0 sec &redef; const report_gaps_for_partial = F &redef; ## Flag to prevent Bro from exiting automatically when input is exhausted. -## Normally Bro terminates when all packets sources have gone dry +## Normally Bro terminates when all packet sources have gone dry ## and communication isn't enabled. If this flag is set, Bro's main loop will -## instead keep idleing until :bro:see:`terminate` is explicitly called. +## instead keep idling until :bro:see:`terminate` is explicitly called. ## ## This is mainly for testing purposes when termination behaviour needs to be ## controlled for reproducing results. From 24da7ab83968e0d82ff1acc9f2b6a5069c1893b1 Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Mon, 21 Oct 2013 01:23:08 -0500 Subject: [PATCH 04/47] Fix typos and formatting in the policy/frameworks docs Also updated some tests related to these changes. --- .../policy/frameworks/control/controller.bro | 3 ++- .../frameworks/dpd/packet-segment-logging.bro | 10 ++++---- .../policy/frameworks/files/detect-MHR.bro | 8 +++---- .../frameworks/files/hash-all-files.bro | 2 +- scripts/policy/frameworks/intel/do_notice.bro | 2 +- .../policy/frameworks/packet-filter/shunt.bro | 23 ++++++++++--------- .../frameworks/software/version-changes.bro | 12 +++++----- .../policy/frameworks/software/vulnerable.bro | 6 ++--- .../output | 8 +++---- .../output | 8 +++---- ...licy_frameworks_files_detect-MHR_bro.btest | 8 +++---- ...cy_frameworks_files_detect-MHR_bro@3.btest | 8 +++---- 12 files changed, 50 insertions(+), 48 deletions(-) diff --git a/scripts/policy/frameworks/control/controller.bro b/scripts/policy/frameworks/control/controller.bro index 22b19bf973..cc94767370 100644 --- a/scripts/policy/frameworks/control/controller.bro +++ b/scripts/policy/frameworks/control/controller.bro @@ -1,8 +1,9 @@ ##! This is a utility script that implements the controller interface for the -##! control framework. It's intended to be run to control a remote Bro +##! control framework. It's intended to be run to control a remote Bro ##! and then shutdown. ##! ##! It's intended to be used from the command line like this:: +##! ##! bro frameworks/control/controller Control::host= Control::port= Control::cmd= [Control::arg=] @load base/frameworks/control diff --git a/scripts/policy/frameworks/dpd/packet-segment-logging.bro b/scripts/policy/frameworks/dpd/packet-segment-logging.bro index a605d84a74..c1d6699352 100644 --- a/scripts/policy/frameworks/dpd/packet-segment-logging.bro +++ b/scripts/policy/frameworks/dpd/packet-segment-logging.bro @@ -1,6 +1,6 @@ -##! This script enables logging of packet segment data when a protocol -##! parsing violation is encountered. The amount of -##! data from the packet logged is set by the packet_segment_size variable. +##! This script enables logging of packet segment data when a protocol +##! parsing violation is encountered. The amount of data from the +##! packet logged is set by the :bro:see:`DPD::packet_segment_size` variable. ##! A caveat to logging packet data is that in some cases, the packet may ##! not be the packet that actually caused the protocol violation. @@ -10,8 +10,8 @@ module DPD; export { redef record Info += { - ## A chunk of the payload the most likely resulted in the protocol - ## violation. + ## A chunk of the payload that most likely resulted in the + ## protocol violation. packet_segment: string &optional &log; }; diff --git a/scripts/policy/frameworks/files/detect-MHR.bro b/scripts/policy/frameworks/files/detect-MHR.bro index 8a2e33b7f4..5ed8715c9b 100644 --- a/scripts/policy/frameworks/files/detect-MHR.bro +++ b/scripts/policy/frameworks/files/detect-MHR.bro @@ -23,10 +23,10 @@ export { /application\/jar/ | /video\/mp4/ &redef; - ## The malware hash registry runs each malware sample through several A/V engines. - ## Team Cymru returns a percentage to indicate how many A/V engines flagged the - ## sample as malicious. This threshold allows you to require a minimum detection - ## rate. + ## The malware hash registry runs each malware sample through several + ## A/V engines. Team Cymru returns a percentage to indicate how + ## many A/V engines flagged the sample as malicious. This threshold + ## allows you to require a minimum detection rate. const notice_threshold = 10 &redef; } diff --git a/scripts/policy/frameworks/files/hash-all-files.bro b/scripts/policy/frameworks/files/hash-all-files.bro index 931857c2bc..74bea47bb9 100644 --- a/scripts/policy/frameworks/files/hash-all-files.bro +++ b/scripts/policy/frameworks/files/hash-all-files.bro @@ -1,4 +1,4 @@ -# Perform MD5 and SHA1 hashing on all files. +##! Perform MD5 and SHA1 hashing on all files. event file_new(f: fa_file) { diff --git a/scripts/policy/frameworks/intel/do_notice.bro b/scripts/policy/frameworks/intel/do_notice.bro index 720e29c35c..89910ede32 100644 --- a/scripts/policy/frameworks/intel/do_notice.bro +++ b/scripts/policy/frameworks/intel/do_notice.bro @@ -18,7 +18,7 @@ export { do_notice: bool &default=F; ## Restrictions on when notices are created to only create - ## them if the do_notice field is T and the notice was + ## them if the *do_notice* field is T and the notice was ## seen in the indicated location. if_in: Intel::Where &optional; }; diff --git a/scripts/policy/frameworks/packet-filter/shunt.bro b/scripts/policy/frameworks/packet-filter/shunt.bro index 85ec189a17..97ae0c792d 100644 --- a/scripts/policy/frameworks/packet-filter/shunt.bro +++ b/scripts/policy/frameworks/packet-filter/shunt.bro @@ -8,23 +8,23 @@ export { const max_bpf_shunts = 100 &redef; ## Call this function to use BPF to shunt a connection (to prevent the - ## data packets from reaching Bro). For TCP connections, control packets - ## are still allowed through so that Bro can continue logging the connection - ## and it can stop shunting once the connection ends. + ## data packets from reaching Bro). For TCP connections, control + ## packets are still allowed through so that Bro can continue logging + ## the connection and it can stop shunting once the connection ends. global shunt_conn: function(id: conn_id): bool; - ## This function will use a BPF expresssion to shunt traffic between + ## This function will use a BPF expression to shunt traffic between ## the two hosts given in the `conn_id` so that the traffic is never ## exposed to Bro's traffic processing. global shunt_host_pair: function(id: conn_id): bool; ## Remove shunting for a host pair given as a `conn_id`. The filter - ## is not immediately removed. It waits for the occassional filter + ## is not immediately removed. It waits for the occasional filter ## update done by the `PacketFilter` framework. global unshunt_host_pair: function(id: conn_id): bool; - ## Performs the same function as the `unshunt_host_pair` function, but - ## it forces an immediate filter update. + ## Performs the same function as the :bro:id:`PacketFilter::unshunt_host_pair` + ## function, but it forces an immediate filter update. global force_unshunt_host_pair: function(id: conn_id): bool; ## Retrieve the currently shunted connections. @@ -34,12 +34,13 @@ export { global current_shunted_host_pairs: function(): set[conn_id]; redef enum Notice::Type += { - ## Indicative that :bro:id:`PacketFilter::max_bpf_shunts` connections - ## are already being shunted with BPF filters and no more are allowed. + ## Indicative that :bro:id:`PacketFilter::max_bpf_shunts` + ## connections are already being shunted with BPF filters and + ## no more are allowed. No_More_Conn_Shunts_Available, - ## Limitations in BPF make shunting some connections with BPF impossible. - ## This notice encompasses those various cases. + ## Limitations in BPF make shunting some connections with BPF + ## impossible. This notice encompasses those various cases. Cannot_BPF_Shunt_Conn, }; } diff --git a/scripts/policy/frameworks/software/version-changes.bro b/scripts/policy/frameworks/software/version-changes.bro index 974a23dc76..73bb72e01b 100644 --- a/scripts/policy/frameworks/software/version-changes.bro +++ b/scripts/policy/frameworks/software/version-changes.bro @@ -1,4 +1,4 @@ -##! Provides the possibly to define software names that are interesting to +##! Provides the possibility to define software names that are interesting to ##! watch for changes. A notice is generated if software versions change on a ##! host. @@ -9,15 +9,15 @@ module Software; export { redef enum Notice::Type += { - ## For certain software, a version changing may matter. In that case, - ## this notice will be generated. Software that matters if the version - ## changes can be configured with the + ## For certain software, a version changing may matter. In that + ## case, this notice will be generated. Software that matters + ## if the version changes can be configured with the ## :bro:id:`Software::interesting_version_changes` variable. Software_Version_Change, }; - ## Some software is more interesting when the version changes and this is - ## a set of all software that should raise a notice when a different + ## Some software is more interesting when the version changes and this + ## is a set of all software that should raise a notice when a different ## version is seen on a host. const interesting_version_changes: set[string] = { } &redef; } diff --git a/scripts/policy/frameworks/software/vulnerable.bro b/scripts/policy/frameworks/software/vulnerable.bro index 47c64885f5..ee8d90b21f 100644 --- a/scripts/policy/frameworks/software/vulnerable.bro +++ b/scripts/policy/frameworks/software/vulnerable.bro @@ -1,5 +1,5 @@ -##! Provides a variable to define vulnerable versions of software and if a -##! a version of that software as old or older than the defined version a +##! Provides a variable to define vulnerable versions of software and if +##! a version of that software is as old or older than the defined version a ##! notice will be generated. @load base/frameworks/control @@ -21,7 +21,7 @@ export { min: Software::Version &optional; ## The maximum vulnerable version. This field is deliberately ## not optional because a maximum vulnerable version must - ## always be defined. This assumption may become incorrent + ## always be defined. This assumption may become incorrect ## if all future versions of some software are to be considered ## vulnerable. :) max: Software::Version; diff --git a/testing/btest/Baseline/doc.sphinx.include-scripts_policy_frameworks_files_detect-MHR_bro/output b/testing/btest/Baseline/doc.sphinx.include-scripts_policy_frameworks_files_detect-MHR_bro/output index 26911a534c..9f05c43669 100644 --- a/testing/btest/Baseline/doc.sphinx.include-scripts_policy_frameworks_files_detect-MHR_bro/output +++ b/testing/btest/Baseline/doc.sphinx.include-scripts_policy_frameworks_files_detect-MHR_bro/output @@ -27,10 +27,10 @@ export { /application\/jar/ | /video\/mp4/ &redef; - ## The malware hash registry runs each malware sample through several A/V engines. - ## Team Cymru returns a percentage to indicate how many A/V engines flagged the - ## sample as malicious. This threshold allows you to require a minimum detection - ## rate. + ## The malware hash registry runs each malware sample through several + ## A/V engines. Team Cymru returns a percentage to indicate how + ## many A/V engines flagged the sample as malicious. This threshold + ## allows you to require a minimum detection rate. const notice_threshold = 10 &redef; } diff --git a/testing/btest/Baseline/doc.sphinx.include-scripts_policy_frameworks_files_detect-MHR_bro@3/output b/testing/btest/Baseline/doc.sphinx.include-scripts_policy_frameworks_files_detect-MHR_bro@3/output index b8a0abf363..92f077de7d 100644 --- a/testing/btest/Baseline/doc.sphinx.include-scripts_policy_frameworks_files_detect-MHR_bro@3/output +++ b/testing/btest/Baseline/doc.sphinx.include-scripts_policy_frameworks_files_detect-MHR_bro@3/output @@ -18,9 +18,9 @@ export { /application\/jar/ | /video\/mp4/ &redef; - ## The malware hash registry runs each malware sample through several A/V engines. - ## Team Cymru returns a percentage to indicate how many A/V engines flagged the - ## sample as malicious. This threshold allows you to require a minimum detection - ## rate. + ## The malware hash registry runs each malware sample through several + ## A/V engines. Team Cymru returns a percentage to indicate how + ## many A/V engines flagged the sample as malicious. This threshold + ## allows you to require a minimum detection rate. const notice_threshold = 10 &redef; } diff --git a/testing/btest/doc/sphinx/include-scripts_policy_frameworks_files_detect-MHR_bro.btest b/testing/btest/doc/sphinx/include-scripts_policy_frameworks_files_detect-MHR_bro.btest index 26911a534c..9f05c43669 100644 --- a/testing/btest/doc/sphinx/include-scripts_policy_frameworks_files_detect-MHR_bro.btest +++ b/testing/btest/doc/sphinx/include-scripts_policy_frameworks_files_detect-MHR_bro.btest @@ -27,10 +27,10 @@ export { /application\/jar/ | /video\/mp4/ &redef; - ## The malware hash registry runs each malware sample through several A/V engines. - ## Team Cymru returns a percentage to indicate how many A/V engines flagged the - ## sample as malicious. This threshold allows you to require a minimum detection - ## rate. + ## The malware hash registry runs each malware sample through several + ## A/V engines. Team Cymru returns a percentage to indicate how + ## many A/V engines flagged the sample as malicious. This threshold + ## allows you to require a minimum detection rate. const notice_threshold = 10 &redef; } diff --git a/testing/btest/doc/sphinx/include-scripts_policy_frameworks_files_detect-MHR_bro@3.btest b/testing/btest/doc/sphinx/include-scripts_policy_frameworks_files_detect-MHR_bro@3.btest index b8a0abf363..92f077de7d 100644 --- a/testing/btest/doc/sphinx/include-scripts_policy_frameworks_files_detect-MHR_bro@3.btest +++ b/testing/btest/doc/sphinx/include-scripts_policy_frameworks_files_detect-MHR_bro@3.btest @@ -18,9 +18,9 @@ export { /application\/jar/ | /video\/mp4/ &redef; - ## The malware hash registry runs each malware sample through several A/V engines. - ## Team Cymru returns a percentage to indicate how many A/V engines flagged the - ## sample as malicious. This threshold allows you to require a minimum detection - ## rate. + ## The malware hash registry runs each malware sample through several + ## A/V engines. Team Cymru returns a percentage to indicate how + ## many A/V engines flagged the sample as malicious. This threshold + ## allows you to require a minimum detection rate. const notice_threshold = 10 &redef; } From 9374a7d5844c2a6162b259b6a12a268251e8ad18 Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Mon, 21 Oct 2013 02:32:56 -0500 Subject: [PATCH 05/47] Fix typos and formatting in the policy/protocols docs Also updated a test related to these changes, and adjusted line numbers. --- doc/scripting/index.rst | 2 +- scripts/policy/protocols/conn/known-hosts.bro | 4 ++-- .../dhcp/known-devices-and-hostnames.bro | 2 +- .../protocols/dns/detect-external-names.bro | 6 ++--- .../protocols/ftp/detect-bruteforcing.bro | 8 +++---- scripts/policy/protocols/http/detect-sqli.bro | 14 +++++++----- .../policy/protocols/http/header-names.bro | 8 +++---- .../protocols/http/var-extraction-cookies.bro | 2 +- .../protocols/http/var-extraction-uri.bro | 2 +- .../policy/protocols/modbus/track-memmap.bro | 11 +++++----- .../protocols/smtp/detect-suspicious-orig.bro | 4 ++-- scripts/policy/protocols/smtp/software.bro | 22 +++++++++---------- .../protocols/ssh/detect-bruteforcing.bro | 14 ++++++------ scripts/policy/protocols/ssh/geo-data.bro | 9 ++++---- .../protocols/ssh/interesting-hostnames.bro | 4 ++-- .../policy/protocols/ssl/expiring-certs.bro | 11 +++++----- .../protocols/ssl/extract-certs-pem.bro | 6 ++--- scripts/policy/protocols/ssl/known-certs.bro | 7 +++--- .../policy/protocols/ssl/validate-certs.bro | 11 +++++----- .../output | 4 ++-- ...tocols_ssh_interesting-hostnames_bro.btest | 4 ++-- 21 files changed, 82 insertions(+), 73 deletions(-) diff --git a/doc/scripting/index.rst b/doc/scripting/index.rst index a24032f524..749fae6457 100644 --- a/doc/scripting/index.rst +++ b/doc/scripting/index.rst @@ -1222,7 +1222,7 @@ from the connection relative to the behavior that has been observed by Bro. .. btest-include:: ${BRO_SRC_ROOT}/scripts/policy/protocols/ssl/expiring-certs.bro - :lines: 59-62 + :lines: 60-63 In the :doc:`/scripts/policy/protocols/ssl/expiring-certs` script which identifies when SSL certificates are set to expire and raises diff --git a/scripts/policy/protocols/conn/known-hosts.bro b/scripts/policy/protocols/conn/known-hosts.bro index 8914a5a22a..e4fef85f87 100644 --- a/scripts/policy/protocols/conn/known-hosts.bro +++ b/scripts/policy/protocols/conn/known-hosts.bro @@ -15,8 +15,8 @@ export { type HostsInfo: record { ## The timestamp at which the host was detected. ts: time &log; - ## The address that was detected originating or responding to a TCP - ## connection. + ## The address that was detected originating or responding to a + ## TCP connection. host: addr &log; }; diff --git a/scripts/policy/protocols/dhcp/known-devices-and-hostnames.bro b/scripts/policy/protocols/dhcp/known-devices-and-hostnames.bro index 519429981c..63b794cb9f 100644 --- a/scripts/policy/protocols/dhcp/known-devices-and-hostnames.bro +++ b/scripts/policy/protocols/dhcp/known-devices-and-hostnames.bro @@ -7,7 +7,7 @@ module Known; export { redef record DevicesInfo += { - ## The value of the DHCP host name option, if seen + ## The value of the DHCP host name option, if seen. dhcp_host_name: string &log &optional; }; } diff --git a/scripts/policy/protocols/dns/detect-external-names.bro b/scripts/policy/protocols/dns/detect-external-names.bro index a1897d06e7..ea56e5676f 100644 --- a/scripts/policy/protocols/dns/detect-external-names.bro +++ b/scripts/policy/protocols/dns/detect-external-names.bro @@ -10,9 +10,9 @@ module DNS; export { redef enum Notice::Type += { - ## Raised when a non-local name is found to be pointing at a local host. - ## :bro:id:`Site::local_zones` variable **must** be set appropriately - ## for this detection. + ## Raised when a non-local name is found to be pointing at a + ## local host. The :bro:id:`Site::local_zones` variable + ## **must** be set appropriately for this detection. External_Name, }; } diff --git a/scripts/policy/protocols/ftp/detect-bruteforcing.bro b/scripts/policy/protocols/ftp/detect-bruteforcing.bro index 1af9bb081e..eb70688d47 100644 --- a/scripts/policy/protocols/ftp/detect-bruteforcing.bro +++ b/scripts/policy/protocols/ftp/detect-bruteforcing.bro @@ -1,5 +1,5 @@ -##! FTP brute-forcing detector, triggering when too many rejected usernames or -##! failed passwords have occured from a single address. +##! FTP brute-forcing detector, triggering when too many rejected usernames or +##! failed passwords have occurred from a single address. @load base/protocols/ftp @load base/frameworks/sumstats @@ -10,8 +10,8 @@ module FTP; export { redef enum Notice::Type += { - ## Indicates a host bruteforcing FTP logins by watching for too many - ## rejected usernames or failed passwords. + ## Indicates a host bruteforcing FTP logins by watching for too + ## many rejected usernames or failed passwords. Bruteforcing }; diff --git a/scripts/policy/protocols/http/detect-sqli.bro b/scripts/policy/protocols/http/detect-sqli.bro index 79d8d6f2f9..5d1b3b6b8c 100644 --- a/scripts/policy/protocols/http/detect-sqli.bro +++ b/scripts/policy/protocols/http/detect-sqli.bro @@ -8,10 +8,12 @@ module HTTP; export { redef enum Notice::Type += { - ## Indicates that a host performing SQL injection attacks was detected. + ## Indicates that a host performing SQL injection attacks was + ## detected. SQL_Injection_Attacker, - ## Indicates that a host was seen to have SQL injection attacks against - ## it. This is tracked by IP address as opposed to hostname. + ## Indicates that a host was seen to have SQL injection attacks + ## against it. This is tracked by IP address as opposed to + ## hostname. SQL_Injection_Victim, }; @@ -19,9 +21,11 @@ export { ## Indicator of a URI based SQL injection attack. URI_SQLI, ## Indicator of client body based SQL injection attack. This is - ## typically the body content of a POST request. Not implemented yet. + ## typically the body content of a POST request. Not implemented + ## yet. POST_SQLI, - ## Indicator of a cookie based SQL injection attack. Not implemented yet. + ## Indicator of a cookie based SQL injection attack. Not + ## implemented yet. COOKIE_SQLI, }; diff --git a/scripts/policy/protocols/http/header-names.bro b/scripts/policy/protocols/http/header-names.bro index bd0e55f02f..5aefdad538 100644 --- a/scripts/policy/protocols/http/header-names.bro +++ b/scripts/policy/protocols/http/header-names.bro @@ -8,12 +8,12 @@ module HTTP; export { redef record Info += { - ## The vector of HTTP header names sent by the client. No header - ## values are included here, just the header names. + ## The vector of HTTP header names sent by the client. No + ## header values are included here, just the header names. client_header_names: vector of string &log &optional; - ## The vector of HTTP header names sent by the server. No header - ## values are included here, just the header names. + ## The vector of HTTP header names sent by the server. No + ## header values are included here, just the header names. server_header_names: vector of string &log &optional; }; diff --git a/scripts/policy/protocols/http/var-extraction-cookies.bro b/scripts/policy/protocols/http/var-extraction-cookies.bro index 610c6e1381..2ed7656128 100644 --- a/scripts/policy/protocols/http/var-extraction-cookies.bro +++ b/scripts/policy/protocols/http/var-extraction-cookies.bro @@ -1,4 +1,4 @@ -##! Extracts and logs variables names from cookies sent by clients. +##! Extracts and logs variable names from cookies sent by clients. @load base/protocols/http/main @load base/protocols/http/utils diff --git a/scripts/policy/protocols/http/var-extraction-uri.bro b/scripts/policy/protocols/http/var-extraction-uri.bro index 27ee89d6f2..98eba48fed 100644 --- a/scripts/policy/protocols/http/var-extraction-uri.bro +++ b/scripts/policy/protocols/http/var-extraction-uri.bro @@ -1,4 +1,4 @@ -##! Extracts and log variables from the requested URI in the default HTTP +##! Extracts and logs variables from the requested URI in the default HTTP ##! logging stream. @load base/protocols/http diff --git a/scripts/policy/protocols/modbus/track-memmap.bro b/scripts/policy/protocols/modbus/track-memmap.bro index 6ba788f4ed..7714ce7537 100644 --- a/scripts/policy/protocols/modbus/track-memmap.bro +++ b/scripts/policy/protocols/modbus/track-memmap.bro @@ -15,9 +15,9 @@ export { const track_memmap: Host = ALL_HOSTS &redef; type MemmapInfo: record { - ## Timestamp for the detected register change + ## Timestamp for the detected register change. ts: time &log; - ## Unique ID for the connection + ## Unique ID for the connection. uid: string &log; ## Connection ID. id: conn_id &log; @@ -27,7 +27,8 @@ export { old_val: count &log; ## The new value stored in the register. new_val: count &log; - ## The time delta between when the 'old_val' and 'new_val' were seen. + ## The time delta between when the *old_val* and *new_val* were + ## seen. delta: interval &log; }; @@ -42,8 +43,8 @@ export { ## The memory map of slaves is tracked with this variable. global device_registers: table[addr] of Registers; - ## This event is generated every time a register is seen to be different than - ## it was previously seen to be. + ## This event is generated every time a register is seen to be different + ## than it was previously seen to be. global changed_register: event(c: connection, register: count, old_val: count, new_val: count, delta: interval); } diff --git a/scripts/policy/protocols/smtp/detect-suspicious-orig.bro b/scripts/policy/protocols/smtp/detect-suspicious-orig.bro index 4635b17a83..6fe37e02a8 100644 --- a/scripts/policy/protocols/smtp/detect-suspicious-orig.bro +++ b/scripts/policy/protocols/smtp/detect-suspicious-orig.bro @@ -8,8 +8,8 @@ export { Suspicious_Origination }; - ## Places where it's suspicious for mail to originate from represented as - ## all-capital, two character country codes (e.x. US). It requires + ## Places where it's suspicious for mail to originate from represented + ## as all-capital, two character country codes (e.g., US). It requires ## libGeoIP support built in. const suspicious_origination_countries: set[string] = {} &redef; const suspicious_origination_networks: set[subnet] = {} &redef; diff --git a/scripts/policy/protocols/smtp/software.bro b/scripts/policy/protocols/smtp/software.bro index f520485338..fdae55746f 100644 --- a/scripts/policy/protocols/smtp/software.bro +++ b/scripts/policy/protocols/smtp/software.bro @@ -5,7 +5,7 @@ ##! TODO: ##! ##! * Find some heuristic to determine if email was sent through -##! a MS Exhange webmail interface as opposed to a desktop client. +##! a MS Exchange webmail interface as opposed to a desktop client. @load base/frameworks/software/main @load base/protocols/smtp/main @@ -20,19 +20,19 @@ export { }; redef record Info += { - ## Boolean indicator of if the message was sent through a webmail - ## interface. + ## Boolean indicator of if the message was sent through a + ## webmail interface. is_webmail: bool &log &default=F; }; - ## Assuming that local mail servers are more trustworthy with the headers - ## they insert into messages envelopes, this default makes Bro not attempt - ## to detect software in inbound message bodies. If mail coming in from - ## external addresses gives incorrect data in the Received headers, it - ## could populate your SOFTWARE logging stream with incorrect data. - ## If you would like to detect mail clients for incoming messages - ## (network traffic originating from a non-local address), set this - ## variable to EXTERNAL_HOSTS or ALL_HOSTS. + ## Assuming that local mail servers are more trustworthy with the + ## headers they insert into message envelopes, this default makes Bro + ## not attempt to detect software in inbound message bodies. If mail + ## coming in from external addresses gives incorrect data in + ## the Received headers, it could populate your SOFTWARE logging stream + ## with incorrect data. If you would like to detect mail clients for + ## incoming messages (network traffic originating from a non-local + ## address), set this variable to EXTERNAL_HOSTS or ALL_HOSTS. const detect_clients_in_messages_from = LOCAL_HOSTS &redef; ## A regular expression to match USER-AGENT-like headers to find if a diff --git a/scripts/policy/protocols/ssh/detect-bruteforcing.bro b/scripts/policy/protocols/ssh/detect-bruteforcing.bro index 7988ecb0ad..ba889cbf3c 100644 --- a/scripts/policy/protocols/ssh/detect-bruteforcing.bro +++ b/scripts/policy/protocols/ssh/detect-bruteforcing.bro @@ -11,12 +11,12 @@ module SSH; export { redef enum Notice::Type += { ## Indicates that a host has been identified as crossing the - ## :bro:id:`SSH::password_guesses_limit` threshold with heuristically - ## determined failed logins. + ## :bro:id:`SSH::password_guesses_limit` threshold with + ## heuristically determined failed logins. Password_Guessing, - ## Indicates that a host previously identified as a "password guesser" - ## has now had a heuristically successful login attempt. This is not - ## currently implemented. + ## Indicates that a host previously identified as a "password + ## guesser" has now had a heuristically successful login + ## attempt. This is not currently implemented. Login_By_Password_Guesser, }; @@ -29,8 +29,8 @@ export { ## guessing passwords. const password_guesses_limit: double = 30 &redef; - ## The amount of time to remember presumed non-successful logins to build - ## model of a password guesser. + ## The amount of time to remember presumed non-successful logins to + ## build a model of a password guesser. const guessing_timeout = 30 mins &redef; ## This value can be used to exclude hosts or entire networks from being diff --git a/scripts/policy/protocols/ssh/geo-data.bro b/scripts/policy/protocols/ssh/geo-data.bro index 0f8bb932fe..3abc19d337 100644 --- a/scripts/policy/protocols/ssh/geo-data.bro +++ b/scripts/policy/protocols/ssh/geo-data.bro @@ -7,14 +7,15 @@ module SSH; export { redef enum Notice::Type += { - ## If an SSH login is seen to or from a "watched" country based on the - ## :bro:id:`SSH::watched_countries` variable then this notice will - ## be generated. + ## If an SSH login is seen to or from a "watched" country based + ## on the :bro:id:`SSH::watched_countries` variable then this + ## notice will be generated. Watched_Country_Login, }; redef record Info += { - ## Add geographic data related to the "remote" host of the connection. + ## Add geographic data related to the "remote" host of the + ## connection. remote_location: geo_location &log &optional; }; diff --git a/scripts/policy/protocols/ssh/interesting-hostnames.bro b/scripts/policy/protocols/ssh/interesting-hostnames.bro index f79c67ede9..f9b3636e62 100644 --- a/scripts/policy/protocols/ssh/interesting-hostnames.bro +++ b/scripts/policy/protocols/ssh/interesting-hostnames.bro @@ -10,8 +10,8 @@ module SSH; export { redef enum Notice::Type += { - ## Generated if a login originates or responds with a host where the - ## reverse hostname lookup resolves to a name matched by the + ## Generated if a login originates or responds with a host where + ## the reverse hostname lookup resolves to a name matched by the ## :bro:id:`SSH::interesting_hostnames` regular expression. Interesting_Hostname_Login, }; diff --git a/scripts/policy/protocols/ssl/expiring-certs.bro b/scripts/policy/protocols/ssl/expiring-certs.bro index 80616e6a99..be6526877b 100644 --- a/scripts/policy/protocols/ssl/expiring-certs.bro +++ b/scripts/policy/protocols/ssl/expiring-certs.bro @@ -12,13 +12,14 @@ module SSL; export { redef enum Notice::Type += { - ## Indicates that a certificate's NotValidAfter date has lapsed and - ## the certificate is now invalid. + ## Indicates that a certificate's NotValidAfter date has lapsed + ## and the certificate is now invalid. Certificate_Expired, ## Indicates that a certificate is going to expire within ## :bro:id:`SSL::notify_when_cert_expiring_in`. Certificate_Expires_Soon, - ## Indicates that a certificate's NotValidBefore date is future dated. + ## Indicates that a certificate's NotValidBefore date is future + ## dated. Certificate_Not_Valid_Yet, }; @@ -29,8 +30,8 @@ export { ## Choices are: LOCAL_HOSTS, REMOTE_HOSTS, ALL_HOSTS, NO_HOSTS const notify_certs_expiration = LOCAL_HOSTS &redef; - ## The time before a certificate is going to expire that you would like to - ## start receiving :bro:enum:`SSL::Certificate_Expires_Soon` notices. + ## The time before a certificate is going to expire that you would like + ## to start receiving :bro:enum:`SSL::Certificate_Expires_Soon` notices. const notify_when_cert_expiring_in = 30days &redef; } diff --git a/scripts/policy/protocols/ssl/extract-certs-pem.bro b/scripts/policy/protocols/ssl/extract-certs-pem.bro index d67310f502..32293ebef3 100644 --- a/scripts/policy/protocols/ssl/extract-certs-pem.bro +++ b/scripts/policy/protocols/ssl/extract-certs-pem.bro @@ -5,8 +5,8 @@ ##! .. note:: ##! ##! - It doesn't work well on a cluster because each worker will write its -##! own certificate files and no duplicate checking is done across -##! clusters so each node would log each certificate. +##! own certificate files and no duplicate checking is done across the +##! cluster so each node would log each certificate. ##! @load base/protocols/ssl @@ -18,7 +18,7 @@ module SSL; export { ## Control if host certificates offered by the defined hosts ## will be written to the PEM certificates file. - ## Choices are: LOCAL_HOSTS, REMOTE_HOSTS, ALL_HOSTS, NO_HOSTS + ## Choices are: LOCAL_HOSTS, REMOTE_HOSTS, ALL_HOSTS, NO_HOSTS. const extract_certs_pem = LOCAL_HOSTS &redef; } diff --git a/scripts/policy/protocols/ssl/known-certs.bro b/scripts/policy/protocols/ssl/known-certs.bro index 3986a9aa1e..478074f55a 100644 --- a/scripts/policy/protocols/ssl/known-certs.bro +++ b/scripts/policy/protocols/ssl/known-certs.bro @@ -1,4 +1,5 @@ -##! Log information about certificates while attempting to avoid duplicate logging. +##! Log information about certificates while attempting to avoid duplicate +##! logging. @load base/utils/directions-and-hosts @load base/protocols/ssl @@ -26,7 +27,7 @@ export { }; ## The certificates whose existence should be logged and tracked. - ## Choices are: LOCAL_HOSTS, REMOTE_HOSTS, ALL_HOSTS, NO_HOSTS + ## Choices are: LOCAL_HOSTS, REMOTE_HOSTS, ALL_HOSTS, NO_HOSTS. const cert_tracking = LOCAL_HOSTS &redef; ## The set of all known certificates to store for preventing duplicate @@ -35,7 +36,7 @@ export { ## in the set is for storing the DER formatted certificate's MD5 hash. global certs: set[addr, string] &create_expire=1day &synchronized &redef; - ## Event that can be handled to access the loggable record as it is sent + ## Event that can be handled to access the loggable record as it is sent ## on to the logging framework. global log_known_certs: event(rec: CertsInfo); } diff --git a/scripts/policy/protocols/ssl/validate-certs.bro b/scripts/policy/protocols/ssl/validate-certs.bro index 03624eac84..b34ec5a09a 100644 --- a/scripts/policy/protocols/ssl/validate-certs.bro +++ b/scripts/policy/protocols/ssl/validate-certs.bro @@ -8,8 +8,9 @@ module SSL; export { redef enum Notice::Type += { - ## This notice indicates that the result of validating the certificate - ## along with it's full certificate chain was invalid. + ## This notice indicates that the result of validating the + ## certificate along with its full certificate chain was + ## invalid. Invalid_Server_Cert }; @@ -18,9 +19,9 @@ export { validation_status: string &log &optional; }; - ## MD5 hash values for recently validated certs along with the validation - ## status message are kept in this table to avoid constant validation - ## everytime the same certificate is seen. + ## MD5 hash values for recently validated certs along with the + ## validation status message are kept in this table to avoid constant + ## validation every time the same certificate is seen. global recently_validated_certs: table[string] of string = table() &read_expire=5mins &synchronized &redef; } diff --git a/testing/btest/Baseline/doc.sphinx.include-scripts_policy_protocols_ssh_interesting-hostnames_bro/output b/testing/btest/Baseline/doc.sphinx.include-scripts_policy_protocols_ssh_interesting-hostnames_bro/output index 2ff1a711be..af9ea0dc83 100644 --- a/testing/btest/Baseline/doc.sphinx.include-scripts_policy_protocols_ssh_interesting-hostnames_bro/output +++ b/testing/btest/Baseline/doc.sphinx.include-scripts_policy_protocols_ssh_interesting-hostnames_bro/output @@ -14,8 +14,8 @@ module SSH; export { redef enum Notice::Type += { - ## Generated if a login originates or responds with a host where the - ## reverse hostname lookup resolves to a name matched by the + ## Generated if a login originates or responds with a host where + ## the reverse hostname lookup resolves to a name matched by the ## :bro:id:`SSH::interesting_hostnames` regular expression. Interesting_Hostname_Login, }; diff --git a/testing/btest/doc/sphinx/include-scripts_policy_protocols_ssh_interesting-hostnames_bro.btest b/testing/btest/doc/sphinx/include-scripts_policy_protocols_ssh_interesting-hostnames_bro.btest index 2ff1a711be..af9ea0dc83 100644 --- a/testing/btest/doc/sphinx/include-scripts_policy_protocols_ssh_interesting-hostnames_bro.btest +++ b/testing/btest/doc/sphinx/include-scripts_policy_protocols_ssh_interesting-hostnames_bro.btest @@ -14,8 +14,8 @@ module SSH; export { redef enum Notice::Type += { - ## Generated if a login originates or responds with a host where the - ## reverse hostname lookup resolves to a name matched by the + ## Generated if a login originates or responds with a host where + ## the reverse hostname lookup resolves to a name matched by the ## :bro:id:`SSH::interesting_hostnames` regular expression. Interesting_Hostname_Login, }; From 02d7e16997f95009d2f53ca568866905be585230 Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Mon, 21 Oct 2013 02:37:00 -0500 Subject: [PATCH 06/47] Fix typos and formatting in the other policy docs --- scripts/policy/integration/barnyard2/main.bro | 4 +- .../policy/integration/barnyard2/types.bro | 2 +- .../integration/collective-intel/main.bro | 4 +- scripts/policy/misc/capture-loss.bro | 6 +- .../policy/misc/detect-traceroute/main.bro | 26 +++++---- scripts/policy/misc/known-devices.bro | 19 ++++--- scripts/policy/misc/load-balancing.bro | 7 ++- scripts/policy/misc/loaded-scripts.bro | 8 +-- scripts/policy/misc/profiling.bro | 3 +- scripts/policy/misc/scan.bro | 56 ++++++++++--------- scripts/policy/misc/stats.bro | 23 ++++---- scripts/policy/misc/trim-trace-file.bro | 8 +-- .../policy/tuning/logs-to-elasticsearch.bro | 4 +- 13 files changed, 90 insertions(+), 80 deletions(-) diff --git a/scripts/policy/integration/barnyard2/main.bro b/scripts/policy/integration/barnyard2/main.bro index 1d38d80809..42364e8d76 100644 --- a/scripts/policy/integration/barnyard2/main.bro +++ b/scripts/policy/integration/barnyard2/main.bro @@ -15,8 +15,8 @@ export { alert: AlertData &log; }; - ## This can convert a Barnyard :bro:type:`Barnyard2::PacketID` value to a - ## :bro:type:`conn_id` value in the case that you might need to index + ## This can convert a Barnyard :bro:type:`Barnyard2::PacketID` value to + ## a :bro:type:`conn_id` value in the case that you might need to index ## into an existing data structure elsewhere within Bro. global pid2cid: function(p: PacketID): conn_id; } diff --git a/scripts/policy/integration/barnyard2/types.bro b/scripts/policy/integration/barnyard2/types.bro index 6cfcbb9535..da7015b302 100644 --- a/scripts/policy/integration/barnyard2/types.bro +++ b/scripts/policy/integration/barnyard2/types.bro @@ -11,7 +11,7 @@ export { generator_id: count; ##< Which generator generated the alert? signature_revision: count; ##< Sig revision for this id. classification_id: count; ##< Event classification. - classification: string; ##< Descriptive classification string, + classification: string; ##< Descriptive classification string. priority_id: count; ##< Event priority. event_id: count; ##< Event ID. } &log; diff --git a/scripts/policy/integration/collective-intel/main.bro b/scripts/policy/integration/collective-intel/main.bro index a1ee7a4ab9..48459c378a 100644 --- a/scripts/policy/integration/collective-intel/main.bro +++ b/scripts/policy/integration/collective-intel/main.bro @@ -3,8 +3,8 @@ module Intel; -## These are some fields to add extended compatibility between Bro and the Collective -## Intelligence Framework +## These are some fields to add extended compatibility between Bro and the +## Collective Intelligence Framework. redef record Intel::MetaData += { ## Maps to the Impact field in the Collective Intelligence Framework. cif_impact: string &optional; diff --git a/scripts/policy/misc/capture-loss.bro b/scripts/policy/misc/capture-loss.bro index 1f0726299d..fd578ebf25 100644 --- a/scripts/policy/misc/capture-loss.bro +++ b/scripts/policy/misc/capture-loss.bro @@ -4,7 +4,7 @@ ##! the packet capture or it could even be beyond the host. If you are ##! capturing from a switch with a SPAN port, it's very possible that ##! the switch itself could be overloaded and dropping packets. -##! Reported loss is computed in terms of number of "gap events" (ACKs +##! Reported loss is computed in terms of the number of "gap events" (ACKs ##! for a sequence number that's above a gap). @load base/frameworks/notice @@ -26,7 +26,7 @@ export { ## The time delay between this measurement and the last. ts_delta: interval &log; ## In the event that there are multiple Bro instances logging - ## to the same host, this distinguishes each peer with it's + ## to the same host, this distinguishes each peer with its ## individual name. peer: string &log; ## Number of missed ACKs from the previous measurement interval. @@ -43,7 +43,7 @@ export { ## The percentage of missed data that is considered "too much" ## when the :bro:enum:`CaptureLoss::Too_Much_Loss` notice should be ## generated. The value is expressed as a double between 0 and 1 with 1 - ## being 100% + ## being 100%. const too_much_loss: double = 0.1 &redef; } diff --git a/scripts/policy/misc/detect-traceroute/main.bro b/scripts/policy/misc/detect-traceroute/main.bro index 6b472f2948..aa403e6a08 100644 --- a/scripts/policy/misc/detect-traceroute/main.bro +++ b/scripts/policy/misc/detect-traceroute/main.bro @@ -1,7 +1,8 @@ -##! This script detects a large number of ICMP Time Exceeded messages heading toward -##! hosts that have sent low TTL packets. It generates a notice when the number of -##! ICMP Time Exceeded messages for a source-destination pair exceeds a -##! threshold. +##! This script detects a large number of ICMP Time Exceeded messages heading +##! toward hosts that have sent low TTL packets. It generates a notice when the +##! number of ICMP Time Exceeded messages for a source-destination pair exceeds +##! a threshold. + @load base/frameworks/sumstats @load base/frameworks/signatures @load-sigs ./detect-low-ttls.sig @@ -20,15 +21,16 @@ export { Detected }; - ## By default this script requires that any host detected running traceroutes - ## first send low TTL packets (TTL < 10) to the traceroute destination host. - ## Changing this this setting to `F` will relax the detection a bit by - ## solely relying on ICMP time-exceeded messages to detect traceroute. + ## By default this script requires that any host detected running + ## traceroutes first send low TTL packets (TTL < 10) to the traceroute + ## destination host. Changing this setting to F will relax the + ## detection a bit by solely relying on ICMP time-exceeded messages to + ## detect traceroute. const require_low_ttl_packets = T &redef; - ## Defines the threshold for ICMP Time Exceeded messages for a src-dst pair. - ## This threshold only comes into play after a host is found to be - ## sending low ttl packets. + ## Defines the threshold for ICMP Time Exceeded messages for a src-dst + ## pair. This threshold only comes into play after a host is found to + ## be sending low TTL packets. const icmp_time_exceeded_threshold: double = 3 &redef; ## Interval at which to watch for the @@ -40,7 +42,7 @@ export { type Info: record { ## Timestamp ts: time &log; - ## Address initiaing the traceroute. + ## Address initiating the traceroute. src: addr &log; ## Destination address of the traceroute. dst: addr &log; diff --git a/scripts/policy/misc/known-devices.bro b/scripts/policy/misc/known-devices.bro index a7c0b314b7..16c5250d1c 100644 --- a/scripts/policy/misc/known-devices.bro +++ b/scripts/policy/misc/known-devices.bro @@ -1,7 +1,7 @@ -##! This script provides infrastructure for logging devices for which Bro has been -##! able to determine the MAC address, and it logs them once per day (by default). -##! The log that is output provides an easy way to determine a count of the devices -##! in use on a network per day. +##! This script provides infrastructure for logging devices for which Bro has +##! been able to determine the MAC address, and it logs them once per day (by +##! default). The log that is output provides an easy way to determine a count +##! of the devices in use on a network per day. ##! ##! .. note:: ##! @@ -15,7 +15,8 @@ export { ## The known-hosts logging stream identifier. redef enum Log::ID += { DEVICES_LOG }; - ## The record type which contains the column fields of the known-devices log. + ## The record type which contains the column fields of the known-devices + ## log. type DevicesInfo: record { ## The timestamp at which the host was detected. ts: time &log; @@ -24,10 +25,10 @@ export { }; ## The set of all known MAC addresses. It can accessed from other - ## to add, and check for, addresses seen in use. - ## - ## We maintain each entry for 24 hours by default so that the existence of - ## individual addressed is logged each day. + ## scripts to add, and check for, addresses seen in use. + ## + ## We maintain each entry for 24 hours by default so that the existence + ## of individual addresses is logged each day. global known_devices: set[string] &create_expire=1day &synchronized &redef; ## An event that can be handled to access the :bro:type:`Known::DevicesInfo` diff --git a/scripts/policy/misc/load-balancing.bro b/scripts/policy/misc/load-balancing.bro index 889d18119a..c2adf23f09 100644 --- a/scripts/policy/misc/load-balancing.bro +++ b/scripts/policy/misc/load-balancing.bro @@ -29,9 +29,10 @@ export { #global confirm_filter_installation: event(success: bool); redef record Cluster::Node += { - ## A BPF filter for load balancing traffic sniffed on a single interface - ## across a number of processes. In normal uses, this will be assigned - ## dynamically by the manager and installed by the workers. + ## A BPF filter for load balancing traffic sniffed on a single + ## interface across a number of processes. In normal uses, this + ## will be assigned dynamically by the manager and installed by + ## the workers. lb_filter: string &optional; }; } diff --git a/scripts/policy/misc/loaded-scripts.bro b/scripts/policy/misc/loaded-scripts.bro index 516826aa7e..bd6943e928 100644 --- a/scripts/policy/misc/loaded-scripts.bro +++ b/scripts/policy/misc/loaded-scripts.bro @@ -7,9 +7,9 @@ export { redef enum Log::ID += { LOG }; type Info: record { - ## Name of the script loaded potentially with spaces included before - ## the file name to indicate load depth. The convention is two spaces - ## per level of depth. + ## Name of the script loaded potentially with spaces included + ## before the file name to indicate load depth. The convention + ## is two spaces per level of depth. name: string &log; }; } @@ -36,4 +36,4 @@ event bro_init() &priority=5 event bro_script_loaded(path: string, level: count) { Log::write(LoadedScripts::LOG, [$name=cat(depth[level], compress_path(path))]); - } \ No newline at end of file + } diff --git a/scripts/policy/misc/profiling.bro b/scripts/policy/misc/profiling.bro index 31451f1a55..613e78f860 100644 --- a/scripts/policy/misc/profiling.bro +++ b/scripts/policy/misc/profiling.bro @@ -8,7 +8,8 @@ redef profiling_file = open_log_file("prof"); ## Set the cheap profiling interval. redef profiling_interval = 15 secs; -## Set the expensive profiling interval. +## Set the expensive profiling interval (multiple of +## :bro:id:`profiling_interval`). redef expensive_profiling_multiple = 20; event bro_init() diff --git a/scripts/policy/misc/scan.bro b/scripts/policy/misc/scan.bro index b1b63b74da..e458f6c450 100644 --- a/scripts/policy/misc/scan.bro +++ b/scripts/policy/misc/scan.bro @@ -1,8 +1,8 @@ -##! TCP Scan detection -##! -##! ..Authors: Sheharbano Khattak -##! Seth Hall -##! All the authors of the old scan.bro +##! TCP Scan detection. + +# ..Authors: Sheharbano Khattak +# Seth Hall +# All the authors of the old scan.bro @load base/frameworks/notice @load base/frameworks/sumstats @@ -13,37 +13,38 @@ module Scan; export { redef enum Notice::Type += { - ## Address scans detect that a host appears to be scanning some number - ## of destinations on a single port. This notice is generated when more - ## than :bro:id:`Scan::addr_scan_threshold` unique hosts are seen over - ## the previous :bro:id:`Scan::addr_scan_interval` time range. + ## Address scans detect that a host appears to be scanning some + ## number of destinations on a single port. This notice is + ## generated when more than :bro:id:`Scan::addr_scan_threshold` + ## unique hosts are seen over the previous + ## :bro:id:`Scan::addr_scan_interval` time range. Address_Scan, - ## Port scans detect that an attacking host appears to be scanning a - ## single victim host on several ports. This notice is generated when - ## an attacking host attempts to connect to + ## Port scans detect that an attacking host appears to be + ## scanning a single victim host on several ports. This notice + ## is generated when an attacking host attempts to connect to ## :bro:id:`Scan::port_scan_threshold` ## unique ports on a single host over the previous ## :bro:id:`Scan::port_scan_interval` time range. Port_Scan, }; - ## Failed connection attempts are tracked over this time interval for the address - ## scan detection. A higher interval will detect slower scanners, but may also - ## yield more false positives. + ## Failed connection attempts are tracked over this time interval for + ## the address scan detection. A higher interval will detect slower + ## scanners, but may also yield more false positives. const addr_scan_interval = 5min &redef; - ## Failed connection attempts are tracked over this time interval for the port scan - ## detection. A higher interval will detect slower scanners, but may also yield - ## more false positives. + ## Failed connection attempts are tracked over this time interval for + ## the port scan detection. A higher interval will detect slower + ## scanners, but may also yield more false positives. const port_scan_interval = 5min &redef; - ## The threshold of a unique number of hosts a scanning host has to have failed - ## connections with on a single port. + ## The threshold of the unique number of hosts a scanning host has to + ## have failed connections with on a single port. const addr_scan_threshold = 25.0 &redef; - ## The threshold of a number of unique ports a scanning host has to have failed - ## connections with on a single victim host. + ## The threshold of the number of unique ports a scanning host has to + ## have failed connections with on a single victim host. const port_scan_threshold = 15.0 &redef; global Scan::addr_scan_policy: hook(scanner: addr, victim: addr, scanned_port: port); @@ -148,7 +149,7 @@ function is_reverse_failed_conn(c: connection): bool ## Generated for an unsuccessful connection attempt. This ## event is raised when an originator unsuccessfully attempted -## to establish a connection. “Unsuccessful” is defined as at least +## to establish a connection. "Unsuccessful" is defined as at least ## tcp_attempt_delay seconds having elapsed since the originator first sent a ## connection establishment packet to the destination without seeing a reply. event connection_attempt(c: connection) @@ -160,9 +161,9 @@ event connection_attempt(c: connection) add_sumstats(c$id, is_reverse_scan); } -## Generated for a rejected TCP connection. This event is raised when an originator -## attempted to setup a TCP connection but the responder replied with a RST packet -## denying it. +## Generated for a rejected TCP connection. This event is raised when an +## originator attempted to setup a TCP connection but the responder replied with +## a RST packet denying it. event connection_rejected(c: connection) { local is_reverse_scan = F; @@ -173,7 +174,8 @@ event connection_rejected(c: connection) } ## Generated when an endpoint aborted a TCP connection. The event is raised when -## one endpoint of an *established* TCP connection aborted by sending a RST packet. +## one endpoint of an *established* TCP connection aborted by sending a RST +## packet. event connection_reset(c: connection) { if ( is_failed_conn(c) ) diff --git a/scripts/policy/misc/stats.bro b/scripts/policy/misc/stats.bro index d7866fd136..7e1e4b6689 100644 --- a/scripts/policy/misc/stats.bro +++ b/scripts/policy/misc/stats.bro @@ -1,4 +1,5 @@ -##! Log memory/packet/lag statistics. Differs from profiling.bro in that this +##! Log memory/packet/lag statistics. Differs from +##! :doc:`/scripts/policy/misc/profiling` in that this ##! is lighter-weight (much less info, and less load to generate). @load base/frameworks/notice @@ -20,21 +21,23 @@ export { mem: count &log; ## Number of packets processed since the last stats interval. pkts_proc: count &log; - ## Number of events that been processed since the last stats interval. + ## Number of events processed since the last stats interval. events_proc: count &log; - ## Number of events that have been queued since the last stats interval. + ## Number of events that have been queued since the last stats + ## interval. events_queued: count &log; - ## Lag between the wall clock and packet timestamps if reading live traffic. + ## Lag between the wall clock and packet timestamps if reading + ## live traffic. lag: interval &log &optional; - ## Number of packets received since the last stats interval if reading - ## live traffic. + ## Number of packets received since the last stats interval if + ## reading live traffic. pkts_recv: count &log &optional; - ## Number of packets dropped since the last stats interval if reading - ## live traffic. + ## Number of packets dropped since the last stats interval if + ## reading live traffic. pkts_dropped: count &log &optional; - ## Number of packets seen on the link since the last stats interval - ## if reading live traffic. + ## Number of packets seen on the link since the last stats + ## interval if reading live traffic. pkts_link: count &log &optional; }; diff --git a/scripts/policy/misc/trim-trace-file.bro b/scripts/policy/misc/trim-trace-file.bro index 8a7781b628..8f534ec005 100644 --- a/scripts/policy/misc/trim-trace-file.bro +++ b/scripts/policy/misc/trim-trace-file.bro @@ -1,4 +1,4 @@ -##! Deletes the -w tracefile at regular intervals and starts a new file +##! Deletes the ``-w`` tracefile at regular intervals and starts a new file ##! from scratch. module TrimTraceFile; @@ -8,9 +8,9 @@ export { const trim_interval = 10 mins &redef; ## This event can be generated externally to this script if on-demand - ## tracefile rotation is required with the caveat that the script doesn't - ## currently attempt to get back on schedule automatically and the next - ## trim will likely won't happen on the + ## tracefile rotation is required with the caveat that the script + ## doesn't currently attempt to get back on schedule automatically and + ## the next trim likely won't happen on the ## :bro:id:`TrimTraceFile::trim_interval`. global go: event(first_trim: bool); } diff --git a/scripts/policy/tuning/logs-to-elasticsearch.bro b/scripts/policy/tuning/logs-to-elasticsearch.bro index 2a4b70362a..b770b8f84b 100644 --- a/scripts/policy/tuning/logs-to-elasticsearch.bro +++ b/scripts/policy/tuning/logs-to-elasticsearch.bro @@ -12,8 +12,8 @@ export { ## If you want to explicitly only send certain :bro:type:`Log::ID` ## streams, add them to this set. If the set remains empty, all will - ## be sent. The :bro:id:`LogElasticSearch::excluded_log_ids` option will remain in - ## effect as well. + ## be sent. The :bro:id:`LogElasticSearch::excluded_log_ids` option + ## will remain in effect as well. const send_logs: set[Log::ID] &redef; } From 9cfedccf375440aad6e3df7bc763b042bd1f7a65 Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Mon, 21 Oct 2013 16:50:14 -0500 Subject: [PATCH 07/47] Fix typos and formatting in the events docs --- src/analyzer/protocol/dnp3/events.bif | 64 ++++++++++++++++++------- src/analyzer/protocol/modbus/events.bif | 25 +++++----- 2 files changed, 59 insertions(+), 30 deletions(-) diff --git a/src/analyzer/protocol/dnp3/events.bif b/src/analyzer/protocol/dnp3/events.bif index 874e0aeef7..a41f70897b 100644 --- a/src/analyzer/protocol/dnp3/events.bif +++ b/src/analyzer/protocol/dnp3/events.bif @@ -2,65 +2,93 @@ ## Generated for a DNP3 request header. ## ## c: The connection the DNP3 communication is part of. +## ## is_orig: True if this reflects originator-side activity. +## ## fc: function code. +## event dnp3_application_request_header%(c: connection, is_orig: bool, fc: count%); ## Generated for a DNP3 response header. ## ## c: The connection the DNP3 communication is part of. +## ## is_orig: True if this reflects originator-side activity. +## ## fc: function code. -## iin: internal indication number +## +## iin: internal indication number. +## event dnp3_application_response_header%(c: connection, is_orig: bool, fc: count, iin: count%); ## Generated for the object header found in both DNP3 requests and responses. ## ## c: The connection the DNP3 communication is part of. +## ## is_orig: True if this reflects originator-side activity. -## obj_type: type of object, which is classified based on an 8-bit group number and an 8-bit variation number -## qua_field: qualifier field +## +## obj_type: type of object, which is classified based on an 8-bit group number +## and an 8-bit variation number. +## +## qua_field: qualifier field. +## ## rf_low: the structure of the range field depends on the qualified field. -## In some cases, range field contains only one logic part, e.g., -## number of objects, so only *rf_low* contains the useful values. -## rf_high: in some cases, range field contain two logic parts, e.g., start -## index and stop index, so *rf_low* contains the start index while +## In some cases, the range field contains only one logic part, e.g., +## number of objects, so only *rf_low* contains useful values. +## +## rf_high: in some cases, the range field contains two logic parts, e.g., start +## index and stop index, so *rf_low* contains the start index ## while *rf_high* contains the stop index. +## event dnp3_object_header%(c: connection, is_orig: bool, obj_type: count, qua_field: count, number: count, rf_low: count, rf_high: count%); ## Generated for the prefix before a DNP3 object. The structure and the meaning ## of the prefix are defined by the qualifier field. ## ## c: The connection the DNP3 communication is part of. +## ## is_orig: True if this reflects originator-side activity. +## ## prefix_value: The prefix. +## event dnp3_object_prefix%(c: connection, is_orig: bool, prefix_value: count%); ## Generated for an additional header that the DNP3 analyzer passes to the -## script-level. This headers mimics the DNP3 transport-layer yet is only passed +## script-level. This header mimics the DNP3 transport-layer yet is only passed ## once for each sequence of DNP3 records (which are otherwise reassembled and ## treated as a single entity). ## ## c: The connection the DNP3 communication is part of. +## ## is_orig: True if this reflects originator-side activity. -## start: the first two bytes of the DNP3 Pseudo Link Layer; its value is fixed as 0x0564 -## len: the "length" field in the DNP3 Pseudo Link Layer -## ctrl: the "control" field in the DNP3 Pseudo Link Layer -## dest_addr: the "destination" field in the DNP3 Pseudo Link Layer -## src_addr: the "source" field in the DNP3 Pseudo Link Layer +## +## start: the first two bytes of the DNP3 Pseudo Link Layer; its value is fixed +## as 0x0564. +## +## len: the "length" field in the DNP3 Pseudo Link Layer. +## +## ctrl: the "control" field in the DNP3 Pseudo Link Layer. +## +## dest_addr: the "destination" field in the DNP3 Pseudo Link Layer. +## +## src_addr: the "source" field in the DNP3 Pseudo Link Layer. +## event dnp3_header_block%(c: connection, is_orig: bool, start: count, len: count, ctrl: count, dest_addr: count, src_addr: count%); ## Generated for a DNP3 "Response_Data_Object". ## The "Response_Data_Object" contains two parts: object prefix and object -## data. In most cases, objects data are defined by new record types. But -## in a few cases, objects data are directly basic types, such as int16, or -## int8; thus we use a additional data_value to record the values of those +## data. In most cases, object data are defined by new record types. But +## in a few cases, object data are directly basic types, such as int16, or +## int8; thus we use an additional *data_value* to record the values of those ## object data. ## ## c: The connection the DNP3 communication is part of. +## ## is_orig: True if this reflects originator-side activity. +## ## data_value: The value for those objects that carry their information here ## directly. +## event dnp3_response_data_object%(c: connection, is_orig: bool, data_value: count%); ## Generated for DNP3 attributes. @@ -238,6 +266,6 @@ event dnp3_frozen_analog_input_event_DPwTime%(c: connection, is_orig: bool, flag event dnp3_file_transport%(c: connection, is_orig: bool, file_handle: count, block_num: count, file_data: string%); ## Debugging event generated by the DNP3 analyzer. The "Debug_Byte" binpac unit -## generates this for unknown "cases". The user can use it to debug the byte string -## to check what cause the malformed network packets. +## generates this for unknown "cases". The user can use it to debug the byte +## string to check what caused the malformed network packets. event dnp3_debug_byte%(c: connection, is_orig: bool, debug: string%); diff --git a/src/analyzer/protocol/modbus/events.bif b/src/analyzer/protocol/modbus/events.bif index 1cd17381ee..dbbd7b78bb 100644 --- a/src/analyzer/protocol/modbus/events.bif +++ b/src/analyzer/protocol/modbus/events.bif @@ -1,4 +1,4 @@ -## Generated for any modbus message regardless if the particular function +## Generated for any Modbus message regardless if the particular function ## is further supported or not. ## ## c: The connection. @@ -8,7 +8,7 @@ ## is_orig: True if the event is raised for the originator side. event modbus_message%(c: connection, headers: ModbusHeaders, is_orig: bool%); -## Generated for any modbus exception message. +## Generated for any Modbus exception message. ## ## c: The connection. ## @@ -23,7 +23,7 @@ event modbus_exception%(c: connection, headers: ModbusHeaders, code: count%); ## ## headers: The headers for the modbus function. ## -## start_address: The memory address where of the first coil to be read. +## start_address: The memory address of the first coil to be read. ## ## quantity: The number of coils to be read. event modbus_read_coils_request%(c: connection, headers: ModbusHeaders, start_address: count, quantity: count%); @@ -191,8 +191,8 @@ event modbus_write_multiple_registers_response%(c: connection, headers: ModbusHe ## ## headers: The headers for the modbus function. ## -## .. note: This event is incomplete. The information from the data structure is not -## yet passed through to the event. +## .. note: This event is incomplete. The information from the data structure +## is not yet passed through to the event. event modbus_read_file_record_request%(c: connection, headers: ModbusHeaders%); ## Generated for a Modbus read file record response. @@ -201,8 +201,8 @@ event modbus_read_file_record_request%(c: connection, headers: ModbusHeaders%); ## ## headers: The headers for the modbus function. ## -## .. note: This event is incomplete. The information from the data structure is not -## yet passed through to the event. +## .. note: This event is incomplete. The information from the data structure +## is not yet passed through to the event. event modbus_read_file_record_response%(c: connection, headers: ModbusHeaders%); ## Generated for a Modbus write file record request. @@ -211,8 +211,8 @@ event modbus_read_file_record_response%(c: connection, headers: ModbusHeaders%); ## ## headers: The headers for the modbus function. ## -## .. note: This event is incomplete. The information from the data structure is not -## yet passed through to the event. +## .. note: This event is incomplete. The information from the data structure +## is not yet passed through to the event. event modbus_write_file_record_request%(c: connection, headers: ModbusHeaders%); ## Generated for a Modbus write file record response. @@ -221,8 +221,8 @@ event modbus_write_file_record_request%(c: connection, headers: ModbusHeaders%); ## ## headers: The headers for the modbus function. ## -## .. note: This event is incomplete. The information from the data structure is not -## yet passed through to the event. +## .. note: This event is incomplete. The information from the data structure +## is not yet passed through to the event. event modbus_write_file_record_response%(c: connection, headers: ModbusHeaders%); ## Generated for a Modbus mask write register request. @@ -272,7 +272,8 @@ event modbus_read_write_multiple_registers_request%(c: connection, headers: Modb ## ## headers: The headers for the modbus function. ## -## written_registers: The register values read from the registers specified in the request. +## written_registers: The register values read from the registers specified in +## the request. event modbus_read_write_multiple_registers_response%(c: connection, headers: ModbusHeaders, written_registers: ModbusRegisters%); ## Generated for a Modbus read FIFO queue request. From 5a0e3dda7e7dc41b30ae80b062a3e0861ac22251 Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Tue, 22 Oct 2013 09:16:29 -0500 Subject: [PATCH 08/47] Fix typos and formatting in the notice framework docs --- .../base/frameworks/notice/actions/drop.bro | 6 +- .../base/frameworks/notice/actions/page.bro | 8 +- .../frameworks/notice/actions/pp-alarms.bro | 2 +- scripts/base/frameworks/notice/cluster.bro | 2 +- scripts/base/frameworks/notice/main.bro | 167 ++++++++++-------- .../base/frameworks/notice/non-cluster.bro | 2 +- scripts/base/frameworks/notice/weird.bro | 24 +-- 7 files changed, 113 insertions(+), 98 deletions(-) diff --git a/scripts/base/frameworks/notice/actions/drop.bro b/scripts/base/frameworks/notice/actions/drop.bro index 1befd8644f..aaed27bc4a 100644 --- a/scripts/base/frameworks/notice/actions/drop.bro +++ b/scripts/base/frameworks/notice/actions/drop.bro @@ -7,12 +7,14 @@ module Notice; export { redef enum Action += { - ## Drops the address via Drop::drop_address, and generates an alarm. + ## Drops the address via Drop::drop_address, and generates an + ## alarm. ACTION_DROP }; redef record Info += { - ## Indicate if the $src IP address was dropped and denied network access. + ## Indicate if the $src IP address was dropped and denied + ## network access. dropped: bool &log &default=F; }; } diff --git a/scripts/base/frameworks/notice/actions/page.bro b/scripts/base/frameworks/notice/actions/page.bro index e29b2bf0ee..1699af186b 100644 --- a/scripts/base/frameworks/notice/actions/page.bro +++ b/scripts/base/frameworks/notice/actions/page.bro @@ -6,12 +6,14 @@ module Notice; export { redef enum Action += { - ## Indicates that the notice should be sent to the pager email address - ## configured in the :bro:id:`Notice::mail_page_dest` variable. + ## Indicates that the notice should be sent to the pager email + ## address configured in the :bro:id:`Notice::mail_page_dest` + ## variable. ACTION_PAGE }; - ## Email address to send notices with the :bro:enum:`Notice::ACTION_PAGE` action. + ## Email address to send notices with the :bro:enum:`Notice::ACTION_PAGE` + ## action. const mail_page_dest = "" &redef; } diff --git a/scripts/base/frameworks/notice/actions/pp-alarms.bro b/scripts/base/frameworks/notice/actions/pp-alarms.bro index 52312c2624..2b3b2d8b08 100644 --- a/scripts/base/frameworks/notice/actions/pp-alarms.bro +++ b/scripts/base/frameworks/notice/actions/pp-alarms.bro @@ -19,7 +19,7 @@ export { ## then highlight such lines differently. global flag_nets: set[subnet] &redef; - ## Function that renders a single alarm. Can be overidden. + ## Function that renders a single alarm. Can be overridden. global pretty_print_alarm: function(out: file, n: Info) &redef; ## Force generating mail file, even if reading from traces or no mail diff --git a/scripts/base/frameworks/notice/cluster.bro b/scripts/base/frameworks/notice/cluster.bro index c722f37bab..f197761acf 100644 --- a/scripts/base/frameworks/notice/cluster.bro +++ b/scripts/base/frameworks/notice/cluster.bro @@ -17,7 +17,7 @@ export { ## Manager can communicate notice suppression to workers. redef Cluster::manager2worker_events += /Notice::begin_suppression/; -## Workers needs need ability to forward notices to manager. +## Workers need ability to forward notices to manager. redef Cluster::worker2manager_events += /Notice::cluster_notice/; @if ( Cluster::local_node_type() != Cluster::MANAGER ) diff --git a/scripts/base/frameworks/notice/main.bro b/scripts/base/frameworks/notice/main.bro index facd148491..fbd55c6de7 100644 --- a/scripts/base/frameworks/notice/main.bro +++ b/scripts/base/frameworks/notice/main.bro @@ -1,7 +1,7 @@ ##! This is the notice framework which enables Bro to "notice" things which ##! are odd or potentially bad. Decisions of the meaning of various notices ##! need to be done per site because Bro does not ship with assumptions about -##! what is bad activity for sites. More extensive documetation about using +##! what is bad activity for sites. More extensive documentation about using ##! the notice framework can be found in :doc:`/frameworks/notice`. module Notice; @@ -14,13 +14,13 @@ export { ALARM_LOG, }; - ## Scripts creating new notices need to redef this enum to add their own - ## specific notice types which would then get used when they call the - ## :bro:id:`NOTICE` function. The convention is to give a general category - ## along with the specific notice separating words with underscores and - ## using leading capitals on each word except for abbreviations which are - ## kept in all capitals. For example, SSH::Login is for heuristically - ## guessed successful SSH logins. + ## Scripts creating new notices need to redef this enum to add their + ## own specific notice types which would then get used when they call + ## the :bro:id:`NOTICE` function. The convention is to give a general + ## category along with the specific notice separating words with + ## underscores and using leading capitals on each word except for + ## abbreviations which are kept in all capitals. For example, + ## SSH::Login is for heuristically guessed successful SSH logins. type Type: enum { ## Notice reporting a count of how often a notice occurred. Tally, @@ -30,67 +30,72 @@ export { type Action: enum { ## Indicates that there is no action to be taken. ACTION_NONE, - ## Indicates that the notice should be sent to the notice logging stream. + ## Indicates that the notice should be sent to the notice + ## logging stream. ACTION_LOG, - ## Indicates that the notice should be sent to the email address(es) - ## configured in the :bro:id:`Notice::mail_dest` variable. + ## Indicates that the notice should be sent to the email + ## address(es) configured in the :bro:id:`Notice::mail_dest` + ## variable. ACTION_EMAIL, - ## Indicates that the notice should be alarmed. A readable ASCII - ## version of the alarm log is emailed in bulk to the address(es) - ## configured in :bro:id:`Notice::mail_dest`. + ## Indicates that the notice should be alarmed. A readable + ## ASCII version of the alarm log is emailed in bulk to the + ## address(es) configured in :bro:id:`Notice::mail_dest`. ACTION_ALARM, }; type ActionSet: set[Notice::Action]; - ## The notice framework is able to do automatic notice supression by - ## utilizing the $identifier field in :bro:type:`Notice::Info` records. - ## Set this to "0secs" to completely disable automated notice suppression. + ## The notice framework is able to do automatic notice suppression by + ## utilizing the *identifier* field in :bro:type:`Notice::Info` records. + ## Set this to "0secs" to completely disable automated notice + ## suppression. const default_suppression_interval = 1hrs &redef; type Info: record { - ## An absolute time indicating when the notice occurred, defaults - ## to the current network time. + ## An absolute time indicating when the notice occurred, + ## defaults to the current network time. ts: time &log &optional; ## A connection UID which uniquely identifies the endpoints ## concerned with the notice. uid: string &log &optional; - ## A connection 4-tuple identifying the endpoints concerned with the - ## notice. + ## A connection 4-tuple identifying the endpoints concerned + ## with the notice. id: conn_id &log &optional; ## A shorthand way of giving the uid and id to a notice. The - ## reference to the actual connection will be deleted after applying - ## the notice policy. + ## reference to the actual connection will be deleted after + ## applying the notice policy. conn: connection &optional; ## A shorthand way of giving the uid and id to a notice. The - ## reference to the actual connection will be deleted after applying - ## the notice policy. + ## reference to the actual connection will be deleted after + ## applying the notice policy. iconn: icmp_conn &optional; - ## A file record if the notice is relted to a file. The - ## reference to the actual fa_file record will be deleted after applying - ## the notice policy. + ## A file record if the notice is related to a file. The + ## reference to the actual fa_file record will be deleted after + ## applying the notice policy. f: fa_file &optional; - ## A file unique ID if this notice is related to a file. If the $f - ## field is provided, this will be automatically filled out. + ## A file unique ID if this notice is related to a file. If + ## the *f* field is provided, this will be automatically filled + ## out. fuid: string &log &optional; - ## A mime type if the notice is related to a file. If the $f field - ## is provided, this will be automatically filled out. + ## A mime type if the notice is related to a file. If the *f* + ## field is provided, this will be automatically filled out. file_mime_type: string &log &optional; - ## Frequently files can be "described" to give a bit more context. - ## This field will typically be automatically filled out from an - ## fa_file record. For example, if a notice was related to a - ## file over HTTP, the URL of the request would be shown. + ## Frequently files can be "described" to give a bit more + ## context. This field will typically be automatically filled + ## out from an fa_file record. For example, if a notice was + ## related to a file over HTTP, the URL of the request would + ## be shown. file_desc: string &log &optional; - ## The transport protocol. Filled automatically when either conn, iconn - ## or p is specified. + ## The transport protocol. Filled automatically when either + ## *conn*, *iconn* or *p* is specified. proto: transport_proto &log &optional; ## The :bro:type:`Notice::Type` of the notice. @@ -117,38 +122,42 @@ export { ## The actions which have been applied to this notice. actions: ActionSet &log &default=ActionSet(); - ## By adding chunks of text into this element, other scripts can - ## expand on notices that are being emailed. The normal way to add text - ## is to extend the vector by handling the :bro:id:`Notice::notice` - ## event and modifying the notice in place. + ## By adding chunks of text into this element, other scripts + ## can expand on notices that are being emailed. The normal + ## way to add text is to extend the vector by handling the + ## :bro:id:`Notice::notice` event and modifying the notice in + ## place. email_body_sections: vector of string &optional; - ## Adding a string "token" to this set will cause the notice framework's - ## built-in emailing functionality to delay sending the email until - ## either the token has been removed or the email has been delayed - ## for :bro:id:`Notice::max_email_delay`. + ## Adding a string "token" to this set will cause the notice + ## framework's built-in emailing functionality to delay sending + ## the email until either the token has been removed or the + ## email has been delayed for :bro:id:`Notice::max_email_delay`. email_delay_tokens: set[string] &optional; - ## This field is to be provided when a notice is generated for the - ## purpose of deduplicating notices. The identifier string should - ## be unique for a single instance of the notice. This field should be - ## filled out in almost all cases when generating notices to define - ## when a notice is conceptually a duplicate of a previous notice. + ## This field is to be provided when a notice is generated for + ## the purpose of deduplicating notices. The identifier string + ## should be unique for a single instance of the notice. This + ## field should be filled out in almost all cases when + ## generating notices to define when a notice is conceptually + ## a duplicate of a previous notice. ## - ## For example, an SSL certificate that is going to expire soon should - ## always have the same identifier no matter the client IP address - ## that connected and resulted in the certificate being exposed. In - ## this case, the resp_h, resp_p, and hash of the certificate would be - ## used to create this value. The hash of the cert is included - ## because servers can return multiple certificates on the same port. + ## For example, an SSL certificate that is going to expire soon + ## should always have the same identifier no matter the client + ## IP address that connected and resulted in the certificate + ## being exposed. In this case, the resp_h, resp_p, and hash + ## of the certificate would be used to create this value. The + ## hash of the cert is included because servers can return + ## multiple certificates on the same port. ## - ## Another example might be a host downloading a file which triggered - ## a notice because the MD5 sum of the file it downloaded was known - ## by some set of intelligence. In that case, the orig_h (client) - ## and MD5 sum would be used in this field to dedup because if the - ## same file is downloaded over and over again you really only want to - ## know about it a single time. This makes it possible to send those - ## notices to email without worrying so much about sending thousands + ## Another example might be a host downloading a file which + ## triggered a notice because the MD5 sum of the file it + ## downloaded was known by some set of intelligence. In that + ## case, the orig_h (client) and MD5 sum would be used in this + ## field to dedup because if the same file is downloaded over + ## and over again you really only want to know about it a + ## single time. This makes it possible to send those notices + ## to email without worrying so much about sending thousands ## of emails. identifier: string &optional; @@ -174,9 +183,9 @@ export { ## Local system sendmail program. const sendmail = "/usr/sbin/sendmail" &redef; - ## Email address to send notices with the :bro:enum:`Notice::ACTION_EMAIL` - ## action or to send bulk alarm logs on rotation with - ## :bro:enum:`Notice::ACTION_ALARM`. + ## Email address to send notices with the + ## :bro:enum:`Notice::ACTION_EMAIL` action or to send bulk alarm logs + ## on rotation with :bro:enum:`Notice::ACTION_ALARM`. const mail_dest = "" &redef; ## Address that emails will be from. @@ -198,9 +207,9 @@ export { global log_mailing_postprocessor: function(info: Log::RotationInfo): bool; ## This is the event that is called as the entry point to the - ## notice framework by the global :bro:id:`NOTICE` function. By the time - ## this event is generated, default values have already been filled out in - ## the :bro:type:`Notice::Info` record and the notice + ## notice framework by the global :bro:id:`NOTICE` function. By the + ## time this event is generated, default values have already been + ## filled out in the :bro:type:`Notice::Info` record and the notice ## policy has also been applied. ## ## n: The record containing notice data. @@ -217,7 +226,8 @@ export { ## n: The record containing the notice in question. global is_being_suppressed: function(n: Notice::Info): bool; - ## This event is generated on each occurence of an event being suppressed. + ## This event is generated on each occurrence of an event being + ## suppressed. ## ## n: The record containing notice data regarding the notice type ## being suppressed. @@ -237,18 +247,19 @@ export { ## ## dest: The intended recipient of the notice email. ## - ## extend: Whether to extend the email using the ``email_body_sections`` - ## field of *n*. + ## extend: Whether to extend the email using the + ## ``email_body_sections`` field of *n*. global email_notice_to: function(n: Info, dest: string, extend: bool); ## Constructs mail headers to which an email body can be appended for ## sending with sendmail. ## - ## subject_desc: a subject string to use for the mail + ## subject_desc: a subject string to use for the mail. ## - ## dest: recipient string to use for the mail + ## dest: recipient string to use for the mail. ## - ## Returns: a string of mail headers to which an email body can be appended + ## Returns: a string of mail headers to which an email body can be + ## appended. global email_headers: function(subject_desc: string, dest: string): string; ## This event can be handled to access the :bro:type:`Notice::Info` @@ -257,8 +268,8 @@ export { ## rec: The record containing notice data before it is logged. global log_notice: event(rec: Info); - ## This is an internal wrapper for the global :bro:id:`NOTICE` function; - ## disregard. + ## This is an internal wrapper for the global :bro:id:`NOTICE` + ## function; disregard. ## ## n: The record of notice data. global internal_NOTICE: function(n: Notice::Info); diff --git a/scripts/base/frameworks/notice/non-cluster.bro b/scripts/base/frameworks/notice/non-cluster.bro index 002ec0da34..a9f503a9f3 100644 --- a/scripts/base/frameworks/notice/non-cluster.bro +++ b/scripts/base/frameworks/notice/non-cluster.bro @@ -3,7 +3,7 @@ module GLOBAL; -## This is the entry point in the global namespace for notice framework. +## This is the entry point in the global namespace for the notice framework. function NOTICE(n: Notice::Info) { # Suppress this notice if necessary. diff --git a/scripts/base/frameworks/notice/weird.bro b/scripts/base/frameworks/notice/weird.bro index f894a42464..e7faf38df4 100644 --- a/scripts/base/frameworks/notice/weird.bro +++ b/scripts/base/frameworks/notice/weird.bro @@ -26,8 +26,8 @@ export { type Info: record { ## The time when the weird occurred. ts: time &log; - ## If a connection is associated with this weird, this will be the - ## connection's unique ID. + ## If a connection is associated with this weird, this will be + ## the connection's unique ID. uid: string &log &optional; ## conn_id for the optional connection. id: conn_id &log &optional; @@ -37,16 +37,16 @@ export { addl: string &log &optional; ## Indicate if this weird was also turned into a notice. notice: bool &log &default=F; - ## The peer that originated this weird. This is helpful in cluster - ## deployments if a particular cluster node is having trouble to help - ## identify which node is having trouble. + ## The peer that originated this weird. This is helpful in + ## cluster deployments if a particular cluster node is having + ## trouble to help identify which node is having trouble. peer: string &log &optional; }; ## Types of actions that may be taken when handling weird activity events. type Action: enum { - ## A dummy action indicating the user does not care what internal - ## decision is made regarding a given type of weird. + ## A dummy action indicating the user does not care what + ## internal decision is made regarding a given type of weird. ACTION_UNSPECIFIED, ## No action is to be taken. ACTION_IGNORE, @@ -252,16 +252,16 @@ export { ## a unique weird every ``create_expire`` interval. global weird_ignore: set[string, string] &create_expire=10min &redef; - ## A state set which tracks unique weirds solely by the name to reduce - ## duplicate logging. This is not synchronized deliberately because it - ## could cause overload during storms + ## A state set which tracks unique weirds solely by name to reduce + ## duplicate logging. This is deliberately not synchronized because it + ## could cause overload during storms. global did_log: set[string, string] &create_expire=1day &redef; - ## A state set which tracks unique weirds solely by the name to reduce + ## A state set which tracks unique weirds solely by name to reduce ## duplicate notices from being raised. global did_notice: set[string, string] &create_expire=1day &redef; - ## Handlers of this event are invoked one per write to the weird + ## Handlers of this event are invoked once per write to the weird ## logging stream before the data is actually written. ## ## rec: The weird columns about to be logged to the weird stream. From f5d6931f0019001cc92da42a46fb1c52155fca95 Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Tue, 22 Oct 2013 09:37:43 -0500 Subject: [PATCH 09/47] Fix typos and formatting in the signature framework docs --- scripts/base/frameworks/signatures/main.bro | 56 +++++++++++---------- 1 file changed, 30 insertions(+), 26 deletions(-) diff --git a/scripts/base/frameworks/signatures/main.bro b/scripts/base/frameworks/signatures/main.bro index 42876220f6..8448588120 100644 --- a/scripts/base/frameworks/signatures/main.bro +++ b/scripts/base/frameworks/signatures/main.bro @@ -11,21 +11,23 @@ export { redef enum Notice::Type += { ## Generic notice type for notice-worthy signature matches. Sensitive_Signature, - ## Host has triggered many signatures on the same host. The number of - ## signatures is defined by the + ## Host has triggered many signatures on the same host. The + ## number of signatures is defined by the ## :bro:id:`Signatures::vert_scan_thresholds` variable. Multiple_Signatures, - ## Host has triggered the same signature on multiple hosts as defined - ## by the :bro:id:`Signatures::horiz_scan_thresholds` variable. + ## Host has triggered the same signature on multiple hosts as + ## defined by the :bro:id:`Signatures::horiz_scan_thresholds` + ## variable. Multiple_Sig_Responders, - ## The same signature has triggered multiple times for a host. The - ## number of times the signature has been triggered is defined by the - ## :bro:id:`Signatures::count_thresholds` variable. To generate this - ## notice, the :bro:enum:`Signatures::SIG_COUNT_PER_RESP` action must - ## bet set for the signature. + ## The same signature has triggered multiple times for a host. + ## The number of times the signature has been triggered is + ## defined by the :bro:id:`Signatures::count_thresholds` + ## variable. To generate this notice, the + ## :bro:enum:`Signatures::SIG_COUNT_PER_RESP` action must be + ## set for the signature. Count_Signature, - ## Summarize the number of times a host triggered a signature. The - ## interval between summaries is defined by the + ## Summarize the number of times a host triggered a signature. + ## The interval between summaries is defined by the ## :bro:id:`Signatures::summary_interval` variable. Signature_Summary, }; @@ -37,11 +39,12 @@ export { ## All of them write the signature record to the logging stream unless ## declared otherwise. type Action: enum { - ## Ignore this signature completely (even for scan detection). Don't - ## write to the signatures logging stream. + ## Ignore this signature completely (even for scan detection). + ## Don't write to the signatures logging stream. SIG_IGNORE, - ## Process through the various aggregate techniques, but don't report - ## individually and don't write to the signatures logging stream. + ## Process through the various aggregate techniques, but don't + ## report individually and don't write to the signatures logging + ## stream. SIG_QUIET, ## Generate a notice. SIG_LOG, @@ -64,20 +67,21 @@ export { ## The record type which contains the column fields of the signature log. type Info: record { - ## The network time at which a signature matching type of event to - ## be logged has occurred. + ## The network time at which a signature matching type of event + ## to be logged has occurred. ts: time &log; ## The host which triggered the signature match event. src_addr: addr &log &optional; - ## The host port on which the signature-matching activity occurred. + ## The host port on which the signature-matching activity + ## occurred. src_port: port &log &optional; - ## The destination host which was sent the payload that triggered the - ## signature match. + ## The destination host which was sent the payload that + ## triggered the signature match. dst_addr: addr &log &optional; - ## The destination host port which was sent the payload that triggered - ## the signature match. + ## The destination host port which was sent the payload that + ## triggered the signature match. dst_port: port &log &optional; - ## Notice associated with signature event + ## Notice associated with signature event. note: Notice::Type &log; ## The name of the signature that matched. sig_id: string &log &optional; @@ -103,8 +107,8 @@ export { ## different responders has reached one of the thresholds. const horiz_scan_thresholds = { 5, 10, 50, 100, 500, 1000 } &redef; - ## Generate a notice if, for a pair [orig, resp], the number of different - ## signature matches has reached one of the thresholds. + ## Generate a notice if, for a pair [orig, resp], the number of + ## different signature matches has reached one of the thresholds. const vert_scan_thresholds = { 5, 10, 50, 100, 500, 1000 } &redef; ## Generate a notice if a :bro:enum:`Signatures::SIG_COUNT_PER_RESP` @@ -112,7 +116,7 @@ export { const count_thresholds = { 5, 10, 50, 100, 500, 1000, 10000, 1000000, } &redef; ## The interval between when :bro:enum:`Signatures::Signature_Summary` - ## notice are generated. + ## notices are generated. const summary_interval = 1 day &redef; ## This event can be handled to access/alter data about to be logged From bb14a44c2f6f67d6c83eab7121a4e153d09e9c94 Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Tue, 22 Oct 2013 12:08:19 -0500 Subject: [PATCH 10/47] Make RFC links in the docs more consistent --- src/analyzer/protocol/http/functions.bif | 10 +++++----- src/analyzer/protocol/login/events.bif | 6 ++---- src/analyzer/protocol/netbios/events.bif | 16 ++++++++-------- src/analyzer/protocol/ssl/events.bif | 2 +- 4 files changed, 16 insertions(+), 18 deletions(-) diff --git a/src/analyzer/protocol/http/functions.bif b/src/analyzer/protocol/http/functions.bif index c4e5df80d5..6db5e8b862 100644 --- a/src/analyzer/protocol/http/functions.bif +++ b/src/analyzer/protocol/http/functions.bif @@ -42,11 +42,11 @@ function skip_http_entity_data%(c: connection, is_orig: bool%): any ## ## .. note:: ## -## Unescaping reserved characters may cause loss of information. RFC 2396: -## A URI is always in an "escaped" form, since escaping or unescaping a -## completed URI might change its semantics. Normally, the only time -## escape encodings can safely be made is when the URI is being created -## from its component parts. +## Unescaping reserved characters may cause loss of information. +## :rfc:`2396`: A URI is always in an "escaped" form, since escaping or +## unescaping a completed URI might change its semantics. Normally, the +## only time escape encodings can safely be made is when the URI is +## being created from its component parts. function unescape_URI%(URI: string%): string %{ const u_char* line = URI->Bytes(); diff --git a/src/analyzer/protocol/login/events.bif b/src/analyzer/protocol/login/events.bif index 68f1c3cf11..91c58f21c4 100644 --- a/src/analyzer/protocol/login/events.bif +++ b/src/analyzer/protocol/login/events.bif @@ -1,7 +1,6 @@ ## Generated for client side commands on an RSH connection. ## -## See `RFC 1258 `__ for more information -## about the Rlogin/Rsh protocol. +## See :rfc:`1258` for more information about the Rlogin/Rsh protocol. ## ## c: The connection. ## @@ -30,8 +29,7 @@ event rsh_request%(c: connection, client_user: string, server_user: string, line ## Generated for client side commands on an RSH connection. ## -## See `RFC 1258 `__ for more information -## about the Rlogin/Rsh protocol. +## See :rfc:`1258` for more information about the Rlogin/Rsh protocol. ## ## c: The connection. ## diff --git a/src/analyzer/protocol/netbios/events.bif b/src/analyzer/protocol/netbios/events.bif index bf382e1663..72933f1e49 100644 --- a/src/analyzer/protocol/netbios/events.bif +++ b/src/analyzer/protocol/netbios/events.bif @@ -3,7 +3,7 @@ ## its name!) the NetBIOS datagram service on UDP port 138. ## ## See `Wikipedia `__ for more information -## about NetBIOS. `RFC 1002 `__ describes +## about NetBIOS. :rfc:`1002` describes ## the packet format for NetBIOS over TCP/IP, which Bro parses. ## ## c: The connection, which may be TCP or UDP, depending on the type of the @@ -12,7 +12,7 @@ ## is_orig: True if the message was sent by the originator of the connection. ## ## msg_type: The general type of message, as defined in Section 4.3.1 of -## `RFC 1002 `__. +## :rfc:`1002`. ## ## data_len: The length of the message's payload. ## @@ -35,7 +35,7 @@ event netbios_session_message%(c: connection, is_orig: bool, msg_type: count, da ## (despite its name!) the NetBIOS datagram service on UDP port 138. ## ## See `Wikipedia `__ for more information -## about NetBIOS. `RFC 1002 `__ describes +## about NetBIOS. :rfc:`1002` describes ## the packet format for NetBIOS over TCP/IP, which Bro parses. ## ## c: The connection, which may be TCP or UDP, depending on the type of the @@ -63,7 +63,7 @@ event netbios_session_request%(c: connection, msg: string%); ## 139, and (despite its name!) the NetBIOS datagram service on UDP port 138. ## ## See `Wikipedia `__ for more information -## about NetBIOS. `RFC 1002 `__ describes +## about NetBIOS. :rfc:`1002` describes ## the packet format for NetBIOS over TCP/IP, which Bro parses. ## ## c: The connection, which may be TCP or UDP, depending on the type of the @@ -91,7 +91,7 @@ event netbios_session_accepted%(c: connection, msg: string%); ## 139, and (despite its name!) the NetBIOS datagram service on UDP port 138. ## ## See `Wikipedia `__ for more information -## about NetBIOS. `RFC 1002 `__ describes +## about NetBIOS. :rfc:`1002` describes ## the packet format for NetBIOS over TCP/IP, which Bro parses. ## ## c: The connection, which may be TCP or UDP, depending on the type of the @@ -121,7 +121,7 @@ event netbios_session_rejected%(c: connection, msg: string%); ## 139, and (despite its name!) the NetBIOS datagram service on UDP port 138. ## ## See `Wikipedia `__ for more information -## about NetBIOS. `RFC 1002 `__ describes +## about NetBIOS. :rfc:`1002` describes ## the packet format for NetBIOS over TCP/IP, which Bro parses. ## ## c: The connection, which may be TCP or UDP, depending on the type of the @@ -154,7 +154,7 @@ event netbios_session_raw_message%(c: connection, is_orig: bool, msg: string%); ## (despite its name!) the NetBIOS datagram service on UDP port 138. ## ## See `Wikipedia `__ for more information -## about NetBIOS. `RFC 1002 `__ describes +## about NetBIOS. :rfc:`1002` describes ## the packet format for NetBIOS over TCP/IP, which Bro parses. ## ## c: The connection, which may be TCP or UDP, depending on the type of the @@ -184,7 +184,7 @@ event netbios_session_ret_arg_resp%(c: connection, msg: string%); ## its name!) the NetBIOS datagram service on UDP port 138. ## ## See `Wikipedia `__ for more information -## about NetBIOS. `RFC 1002 `__ describes +## about NetBIOS. :rfc:`1002` describes ## the packet format for NetBIOS over TCP/IP, which Bro parses. ## ## c: The connection, which may be TCP or UDP, depending on the type of the diff --git a/src/analyzer/protocol/ssl/events.bif b/src/analyzer/protocol/ssl/events.bif index 56e5ef59c0..b673954e53 100644 --- a/src/analyzer/protocol/ssl/events.bif +++ b/src/analyzer/protocol/ssl/events.bif @@ -123,7 +123,7 @@ event ssl_alert%(c: connection, is_orig: bool, level: count, desc: count%); ## an unencrypted handshake, and Bro extracts as much information out of that ## as it can. This event is raised when an SSL/TLS server passes a session ## ticket to the client that can later be used for resuming the session. The -## mechanism is described in :rfc:`4507` +## mechanism is described in :rfc:`4507`. ## ## See `Wikipedia `__ for ## more information about the SSL/TLS protocol. From e724e5d392b27f48e074412a2fd3efc5e8dc1f47 Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Tue, 22 Oct 2013 13:46:09 -0500 Subject: [PATCH 11/47] Do not include documentation of commented-out items Some documentation of commented-out items was being output to HTML, which was causing that documentation to be merged with the documentation of the next item. Fixed by changing "##" to "#" so that the doc comments are not included in the HTML. --- scripts/base/frameworks/sumstats/cluster.bro | 4 ++-- scripts/base/utils/active-http.bro | 6 ++++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/scripts/base/frameworks/sumstats/cluster.bro b/scripts/base/frameworks/sumstats/cluster.bro index ed51a95e30..42311b8687 100644 --- a/scripts/base/frameworks/sumstats/cluster.bro +++ b/scripts/base/frameworks/sumstats/cluster.bro @@ -28,8 +28,8 @@ export { ## values for a sumstat. global cluster_ss_request: event(uid: string, ss_name: string, cleanup: bool); - ## Event sent by nodes that are collecting sumstats after receiving a - ## request for the sumstat from the manager. + # Event sent by nodes that are collecting sumstats after receiving a + # request for the sumstat from the manager. #global cluster_ss_response: event(uid: string, ss_name: string, data: ResultTable, done: bool, cleanup: bool); ## This event is sent by the manager in a cluster to initiate the diff --git a/scripts/base/utils/active-http.bro b/scripts/base/utils/active-http.bro index c289691a75..9f62e7bbaa 100644 --- a/scripts/base/utils/active-http.bro +++ b/scripts/base/utils/active-http.bro @@ -32,9 +32,11 @@ export { ## mind that you will probably need to set the *method* field ## to "POST" or "PUT". client_data: string &optional; - ## Arbitrary headers to pass to the server. Some headers - ## will be included by libCurl. + + # Arbitrary headers to pass to the server. Some headers + # will be included by libCurl. #custom_headers: table[string] of string &optional; + ## Timeout for the request. max_time: interval &default=default_max_time; ## Additional curl command line arguments. Be very careful From 181f1803740c2c93068c2530bf7ce9c71d86a1cc Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Tue, 22 Oct 2013 14:42:23 -0500 Subject: [PATCH 12/47] Fix doc comment so the text gets included in the HTML --- scripts/policy/misc/app-stats/main.bro | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/policy/misc/app-stats/main.bro b/scripts/policy/misc/app-stats/main.bro index 24c9ac2ade..3a0219db6e 100644 --- a/scripts/policy/misc/app-stats/main.bro +++ b/scripts/policy/misc/app-stats/main.bro @@ -1,5 +1,5 @@ -#! AppStats collects information about web applications in use -#! on the network. +##! AppStats collects information about web applications in use +##! on the network. @load base/protocols/http @load base/protocols/ssl From 1b26c05c2cf1788ceec3043bf14a53670be79ab0 Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Tue, 22 Oct 2013 14:44:59 -0500 Subject: [PATCH 13/47] Add more script package README files The text from these README files appears on the "Bro Script Packages" page after building the documentation. --- scripts/base/frameworks/notice/README | 4 ++++ scripts/policy/integration/barnyard2/README | 1 + scripts/policy/misc/app-stats/README | 1 + scripts/policy/misc/detect-traceroute/README | 1 + scripts/policy/tuning/defaults/README | 2 ++ 5 files changed, 9 insertions(+) create mode 100644 scripts/base/frameworks/notice/README create mode 100644 scripts/policy/integration/barnyard2/README create mode 100644 scripts/policy/misc/app-stats/README create mode 100644 scripts/policy/misc/detect-traceroute/README create mode 100644 scripts/policy/tuning/defaults/README diff --git a/scripts/base/frameworks/notice/README b/scripts/base/frameworks/notice/README new file mode 100644 index 0000000000..c46a8a7e5c --- /dev/null +++ b/scripts/base/frameworks/notice/README @@ -0,0 +1,4 @@ +The notice framework enables Bro to "notice" things which are odd or +potentially bad, leaving it to the local configuration to define which +of them are actionable. This decoupling of detection and reporting allows +Bro to be customized to the different needs that sites have. diff --git a/scripts/policy/integration/barnyard2/README b/scripts/policy/integration/barnyard2/README new file mode 100644 index 0000000000..a792668397 --- /dev/null +++ b/scripts/policy/integration/barnyard2/README @@ -0,0 +1 @@ +Integration with Barnyard2. diff --git a/scripts/policy/misc/app-stats/README b/scripts/policy/misc/app-stats/README new file mode 100644 index 0000000000..a0fe433cc8 --- /dev/null +++ b/scripts/policy/misc/app-stats/README @@ -0,0 +1 @@ +AppStats collects information about web applications in use on the network. diff --git a/scripts/policy/misc/detect-traceroute/README b/scripts/policy/misc/detect-traceroute/README new file mode 100644 index 0000000000..f3d9abf634 --- /dev/null +++ b/scripts/policy/misc/detect-traceroute/README @@ -0,0 +1 @@ +Detect hosts that are running traceroute. diff --git a/scripts/policy/tuning/defaults/README b/scripts/policy/tuning/defaults/README new file mode 100644 index 0000000000..d5417588c9 --- /dev/null +++ b/scripts/policy/tuning/defaults/README @@ -0,0 +1,2 @@ +Sets various defaults, and prints warning messages to stdout under +certain conditions. From 4b8171f74a18d28a51625d40861c57ac478c9ab4 Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Tue, 22 Oct 2013 16:40:29 -0500 Subject: [PATCH 14/47] Document which Bro script vars are set by BroControl --- scripts/base/frameworks/cluster/main.bro | 1 + scripts/base/frameworks/communication/main.bro | 8 ++++++-- scripts/base/frameworks/logging/main.bro | 9 ++++++++- scripts/base/frameworks/notice/actions/pp-alarms.bro | 2 ++ scripts/base/frameworks/notice/main.bro | 9 +++++++++ scripts/base/utils/site.bro | 3 ++- 6 files changed, 28 insertions(+), 4 deletions(-) diff --git a/scripts/base/frameworks/cluster/main.bro b/scripts/base/frameworks/cluster/main.bro index 0ff0f47fa7..12cc9e27d4 100644 --- a/scripts/base/frameworks/cluster/main.bro +++ b/scripts/base/frameworks/cluster/main.bro @@ -120,6 +120,7 @@ export { ## The cluster layout definition. This should be placed into a filter ## named cluster-layout.bro somewhere in the BROPATH. It will be ## automatically loaded if the CLUSTER_NODE environment variable is set. + ## Note that BroControl handles all of this automatically. const nodes: table[string] of Node = {} &redef; ## This is usually supplied on the command line for each instance diff --git a/scripts/base/frameworks/communication/main.bro b/scripts/base/frameworks/communication/main.bro index 47b4120e54..92d527101d 100644 --- a/scripts/base/frameworks/communication/main.bro +++ b/scripts/base/frameworks/communication/main.bro @@ -15,13 +15,16 @@ export { ## are wildcards. const listen_interface = 0.0.0.0 &redef; - ## Which port to listen on. + ## Which port to listen on. Note that BroControl sets this + ## automatically. const listen_port = 47757/tcp &redef; ## This defines if a listening socket should use SSL. const listen_ssl = F &redef; ## Defines if a listening socket can bind to IPv6 addresses. + ## + ## Note that this is overridden by the BroControl IPv6Comm option. const listen_ipv6 = F &redef; ## If :bro:id:`Communication::listen_interface` is a non-global @@ -128,7 +131,8 @@ export { }; ## The table of Bro or Broccoli nodes that Bro will initiate connections - ## to or respond to connections from. + ## to or respond to connections from. Note that BroControl sets this + ## automatically. global nodes: table[string] of Node &redef; ## A table of peer nodes for which this node issued a diff --git a/scripts/base/frameworks/logging/main.bro b/scripts/base/frameworks/logging/main.bro index 476d314523..c068866f63 100644 --- a/scripts/base/frameworks/logging/main.bro +++ b/scripts/base/frameworks/logging/main.bro @@ -76,9 +76,16 @@ export { }; ## Default rotation interval. Zero disables rotation. + ## + ## Note that this is overridden by the BroControl LogRotationInterval + ## option. const default_rotation_interval = 0secs &redef; - ## Default alarm summary mail interval. Zero disables alarm summary mails. + ## Default alarm summary mail interval. Zero disables alarm summary + ## mails. + ## + ## Note that this is overridden by the BroControl MailAlarmsInterval + ## option. const default_mail_alarms_interval = 0secs &redef; ## Default naming format for timestamps embedded into filenames. diff --git a/scripts/base/frameworks/notice/actions/pp-alarms.bro b/scripts/base/frameworks/notice/actions/pp-alarms.bro index 2b3b2d8b08..453c82e3d1 100644 --- a/scripts/base/frameworks/notice/actions/pp-alarms.bro +++ b/scripts/base/frameworks/notice/actions/pp-alarms.bro @@ -13,6 +13,8 @@ export { ## Address to send the pretty-printed reports to. Default if not set is ## :bro:id:`Notice::mail_dest`. + ## + ## Note that this is overridden by the BroControl MailAlarmsTo option. const mail_dest_pretty_printed = "" &redef; ## If an address from one of these networks is reported, we mark ## the entry with an additional quote symbol (i.e., ">"). Many MUAs diff --git a/scripts/base/frameworks/notice/main.bro b/scripts/base/frameworks/notice/main.bro index fbd55c6de7..a5f17a4979 100644 --- a/scripts/base/frameworks/notice/main.bro +++ b/scripts/base/frameworks/notice/main.bro @@ -182,17 +182,26 @@ export { global policy: hook(n: Notice::Info); ## Local system sendmail program. + ## + ## Note that this is overridden by the BroControl SendMail option. const sendmail = "/usr/sbin/sendmail" &redef; ## Email address to send notices with the ## :bro:enum:`Notice::ACTION_EMAIL` action or to send bulk alarm logs ## on rotation with :bro:enum:`Notice::ACTION_ALARM`. + ## + ## Note that this is overridden by the BroControl MailTo option. const mail_dest = "" &redef; ## Address that emails will be from. + ## + ## Note that this is overridden by the BroControl MailFrom option. const mail_from = "Big Brother " &redef; ## Reply-to address used in outbound email. const reply_to = "" &redef; ## Text string prefixed to the subject of all emails sent out. + ## + ## Note that this is overridden by the BroControl MailSubjectPrefix + ## option. const mail_subject_prefix = "[Bro]" &redef; ## The maximum amount of time a plugin can delay email from being sent. const max_email_delay = 15secs &redef; diff --git a/scripts/base/utils/site.bro b/scripts/base/utils/site.bro index 9edefe6a8e..f90da4cdf2 100644 --- a/scripts/base/utils/site.bro +++ b/scripts/base/utils/site.bro @@ -17,7 +17,8 @@ export { [::1]/128, } &redef; - ## Networks that are considered "local". + ## Networks that are considered "local". Note that BroControl sets + ## this automatically. const local_nets: set[subnet] &redef; ## This is used for retrieving the subnet when using multiple entries in From 09381504539b8cac3298135cab684260594c9090 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Wed, 23 Oct 2013 11:22:30 -0500 Subject: [PATCH 15/47] Fix minor formatting problem in NEWS. --- NEWS | 3 +++ 1 file changed, 3 insertions(+) diff --git a/NEWS b/NEWS index 41b29358ba..7ac96be47f 100644 --- a/NEWS +++ b/NEWS @@ -21,17 +21,20 @@ New Functionality efficiently, now): - HTTP: + * Identify MIME type of messages. * Extract messages to disk. * Compute MD5 for messages. - SMTP: + * Identify MIME type of messages. * Extract messages to disk. * Compute MD5 for messages. * Provide access to start of entity data. - FTP data transfers: + * Identify MIME types of data. * Record to disk. From 34aece4ddd1009b808082c08b34733da83dd25d0 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Wed, 23 Oct 2013 11:30:59 -0500 Subject: [PATCH 16/47] Add NEWS about incompatible local.bro changes, addresses BIT-1047. --- NEWS | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/NEWS b/NEWS index 7ac96be47f..6e6a299f15 100644 --- a/NEWS +++ b/NEWS @@ -246,6 +246,19 @@ most submodules. Changed Functionality --------------------- +- Previous versions of ``$prefix/share/bro/site/local.bro`` (where + "$prefix" indicates the installation prefix of Bro), aren't compatible + with Bro 2.2. This file won't be overwritten when installing over a + previous Bro installation to prevent clobbering users' modifications, + but an example of the new version is located in + ``$prefix/share/bro/site/local.bro.example``. So if no modification + has been done to the previous local.bro, just copy the new example + version over it, else merge in the differences. For reference, + a common error message when attempting to use an outdated local.bro + looks like:: + + fatal error in /usr/local/bro/share/bro/policy/frameworks/software/vulnerable.bro, line 41: BroType::AsRecordType (table/record) (set[record { min:record { major:count; minor:count; minor2:count; minor3:count; addl:string; }; max:record { major:count; minor:count; minor2:count; minor3:count; addl:string; }; }]) + - The interface to extracting content from application-layer protocols (including HTTP, SMTP, FTP) has changed significantly due to the introduction of the new file analysis framework (see above). From 7e95755ce59ee04b2f16e49c81fa5cb8185fef3f Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Wed, 23 Oct 2013 11:37:23 -0500 Subject: [PATCH 17/47] Fix record coercion tolerance of optional fields. There were cases where coercing a record value with an uninitialized field could cause a null pointer dereference even though the field can validly be unset since it has &optional. --- NEWS | 18 +++++++++++++ src/Val.cc | 12 ++++++--- .../Baseline/language.named-record-ctors/out | 7 +++++ testing/btest/language/named-record-ctors.bro | 27 ++++++++++++++++++- 4 files changed, 60 insertions(+), 4 deletions(-) diff --git a/NEWS b/NEWS index 6e6a299f15..5a97c2e4a3 100644 --- a/NEWS +++ b/NEWS @@ -259,6 +259,24 @@ Changed Functionality fatal error in /usr/local/bro/share/bro/policy/frameworks/software/vulnerable.bro, line 41: BroType::AsRecordType (table/record) (set[record { min:record { major:count; minor:count; minor2:count; minor3:count; addl:string; }; max:record { major:count; minor:count; minor2:count; minor3:count; addl:string; }; }]) +- The type of ``Software::vulnerable_versions`` changed to allow + more flexibility and range specifications. An example usage: + + .. code:: bro + + const java_1_6_vuln = Software::VulnerableVersionRange( + $max = Software::Version($major = 1, $minor = 6, $minor2 = 0, $minor3 = 44) + ); + + const java_1_7_vuln = Software::VulnerableVersionRange( + $min = Software::Version($major = 1, $minor = 7), + $max = Software::Version($major = 1, $minor = 7, $minor2 = 0, $minor3 = 20) + ); + + redef Software::vulnerable_versions += { + ["Java"] = set(java_1_6_vuln, java_1_7_vuln) + }; + - The interface to extracting content from application-layer protocols (including HTTP, SMTP, FTP) has changed significantly due to the introduction of the new file analysis framework (see above). diff --git a/src/Val.cc b/src/Val.cc index 450f3c1653..dbd4863c67 100644 --- a/src/Val.cc +++ b/src/Val.cc @@ -2720,16 +2720,22 @@ RecordVal* RecordVal::CoerceTo(const RecordType* t, Val* aggr, bool allow_orphan break; } + Val* v = Lookup(i); + + if ( ! v ) + // Check for allowable optional fields is outside the loop, below. + continue; + if ( ar_t->FieldType(t_i)->Tag() == TYPE_RECORD - && ! same_type(ar_t->FieldType(t_i), Lookup(i)->Type()) ) + && ! same_type(ar_t->FieldType(t_i), v->Type()) ) { - Expr* rhs = new ConstExpr(Lookup(i)->Ref()); + Expr* rhs = new ConstExpr(v->Ref()); Expr* e = new RecordCoerceExpr(rhs, ar_t->FieldType(t_i)->AsRecordType()); ar->Assign(t_i, e->Eval(0)); continue; } - ar->Assign(t_i, Lookup(i)->Ref()); + ar->Assign(t_i, v->Ref()); } for ( i = 0; i < ar_t->NumFields(); ++i ) diff --git a/testing/btest/Baseline/language.named-record-ctors/out b/testing/btest/Baseline/language.named-record-ctors/out index 39b2ed7c0b..89a7025012 100644 --- a/testing/btest/Baseline/language.named-record-ctors/out +++ b/testing/btest/Baseline/language.named-record-ctors/out @@ -1,2 +1,9 @@ [min=, max=2] [min=7, max=42] +[aaa=1, bbb=test, ccc=, ddd=default] +{ +[Java] = { +[min=, max=[major=1, minor=6, minor2=0, minor3=44, addl=]], +[min=[major=1, minor=7, minor2=, minor3=, addl=], max=[major=1, minor=7, minor2=0, minor3=20, addl=]] +} +} diff --git a/testing/btest/language/named-record-ctors.bro b/testing/btest/language/named-record-ctors.bro index 7f04b9d4b0..d0a6fc70e5 100644 --- a/testing/btest/language/named-record-ctors.bro +++ b/testing/btest/language/named-record-ctors.bro @@ -1,4 +1,4 @@ -# @TEST-EXEC: bro -b %INPUT >out +# @TEST-EXEC: bro -b frameworks/software/vulnerable %INPUT >out # @TEST-EXEC: btest-diff out type MyRec: record { @@ -6,7 +6,32 @@ type MyRec: record { max: count; }; +type Bar: record { + aaa: count; + bbb: string &optional; + ccc: string &optional; + ddd: string &default="default"; +}; + +const java_1_6_vuln = Software::VulnerableVersionRange( + $max = Software::Version($major = 1, $minor = 6, $minor2 = 0, $minor3 = 44) +); + +const java_1_7_vuln = Software::VulnerableVersionRange( + $min = Software::Version($major = 1, $minor = 7), + $max = Software::Version($major = 1, $minor = 7, $minor2 = 0, $minor3 = 20) +); + +redef Software::vulnerable_versions += { + ["Java"] = set(java_1_6_vuln, java_1_7_vuln) +}; + local myrec: MyRec = MyRec($max=2); print myrec; myrec = MyRec($min=7, $max=42); print myrec; + +local data = Bar($aaa=1, $bbb="test"); +print data; + +print Software::vulnerable_versions; From 8a6e7e8036c78d2b445e89791fb346c1fa2917aa Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Wed, 23 Oct 2013 16:04:03 -0500 Subject: [PATCH 18/47] Add NEWS about new features of broctl and upgrade info --- NEWS | 37 +++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/NEWS b/NEWS index 5a97c2e4a3..f4ec88ad83 100644 --- a/NEWS +++ b/NEWS @@ -236,6 +236,35 @@ New Functionality To use CPU pinning, a new per-node option ``pin_cpus`` can be specified in node.cfg if the OS is either Linux or FreeBSD. +- BroControl now returns useful exit codes. Most BroControl commands + return 0 if everything was OK, and 1 otherwise. However, there are + a few exceptions. The "status" and "top" commands return 0 if all Bro + nodes are running, and 1 if not all nodes are running. The "cron" + command always returns 0 (but it still sends email if there were any + problems). Any command provided by a plugin always returns 0. + +- BroControl now has an option "env_vars" to set Bro environment variables. + The value of this option is a comma-separated list of environment variable + assignments (e.g., "VAR1=value, VAR2=another"). The "env_vars" option + can apply to all Bro nodes (by setting it in broctl.cfg), or can be + node-specific (by setting it in node.cfg). Environment variables in + node.cfg have priority over any specified in broctl.cfg. + +- BroControl now supports load balancing with PF_RING while sniffing + multiple interfaces. Rather than assigning the same PF_RING cluster ID + to all workers on a host, cluster ID assignment is now based on which + interface a worker is sniffing (i.e., all workers on a host that sniff + the same interface will share a cluster ID). This is handled by + BroControl automatically. + +- BroControl has several new options: MailConnectionSummary (for + disabling the sending of connection summary report emails), + MailAlarmsInterval (for specifying a different interval to send alarm + summary emails), CompressCmd (if archived log files will be compressed, + this specifies the command that will be used to compress them), + CompressExtension (if archived log files will be compressed, this + specifies the file extension to use). + - BroControl comes with its own test-suite now. ``make test`` in ``aux/broctl`` will run it. @@ -362,6 +391,14 @@ Changed Functionality - We removed the BitTorrent DPD signatures pending further updates to that analyzer. +- In previous versions of BroControl, running "broctl cron" would create + a file ``$prefix/logs/stats/www`` (where "$prefix" indicates the + installation prefix of Bro). Now, it is created as a directory. + Therefore, if you perform an upgrade install and you're using BroControl, + then you may see an email (generated by "broctl cron") containing an + error message: "error running update-stats". To fix this problem, + either remove that file (it is not needed) or rename it. + Bro 2.1 ======= From 72a4a9041685b07338cda50f45cf482825d36143 Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Wed, 23 Oct 2013 16:36:14 -0500 Subject: [PATCH 19/47] Add more script package README files The text from these README files appears on the "Bro Script Packages" page after building the documentation. --- scripts/base/files/extract/README | 1 + scripts/base/files/hash/README | 1 + scripts/base/files/unified2/README | 1 + scripts/base/frameworks/logging/postprocessors/README | 1 + scripts/base/frameworks/reporter/README | 2 ++ scripts/base/frameworks/signatures/README | 4 ++++ scripts/base/frameworks/sumstats/plugins/README | 1 + scripts/policy/frameworks/intel/seen/README | 1 + scripts/policy/misc/app-stats/plugins/README | 1 + scripts/policy/tuning/README | 1 + 10 files changed, 14 insertions(+) create mode 100644 scripts/base/files/extract/README create mode 100644 scripts/base/files/hash/README create mode 100644 scripts/base/files/unified2/README create mode 100644 scripts/base/frameworks/logging/postprocessors/README create mode 100644 scripts/base/frameworks/reporter/README create mode 100644 scripts/base/frameworks/signatures/README create mode 100644 scripts/base/frameworks/sumstats/plugins/README create mode 100644 scripts/policy/frameworks/intel/seen/README create mode 100644 scripts/policy/misc/app-stats/plugins/README create mode 100644 scripts/policy/tuning/README diff --git a/scripts/base/files/extract/README b/scripts/base/files/extract/README new file mode 100644 index 0000000000..1a2116a9e0 --- /dev/null +++ b/scripts/base/files/extract/README @@ -0,0 +1 @@ +Support for extracing files with the file analysis framework. diff --git a/scripts/base/files/hash/README b/scripts/base/files/hash/README new file mode 100644 index 0000000000..855a30effe --- /dev/null +++ b/scripts/base/files/hash/README @@ -0,0 +1 @@ +Support for file hashes with the file analysis framework. diff --git a/scripts/base/files/unified2/README b/scripts/base/files/unified2/README new file mode 100644 index 0000000000..08b5014db6 --- /dev/null +++ b/scripts/base/files/unified2/README @@ -0,0 +1 @@ +Support for Unified2 files in the file analysis framework. diff --git a/scripts/base/frameworks/logging/postprocessors/README b/scripts/base/frameworks/logging/postprocessors/README new file mode 100644 index 0000000000..33fb4e5462 --- /dev/null +++ b/scripts/base/frameworks/logging/postprocessors/README @@ -0,0 +1 @@ +Support for postprocessors in the logging framework. diff --git a/scripts/base/frameworks/reporter/README b/scripts/base/frameworks/reporter/README new file mode 100644 index 0000000000..0a0bd9334f --- /dev/null +++ b/scripts/base/frameworks/reporter/README @@ -0,0 +1,2 @@ +This framework is intended to create an output and filtering path for +internally generated messages/warnings/errors. diff --git a/scripts/base/frameworks/signatures/README b/scripts/base/frameworks/signatures/README new file mode 100644 index 0000000000..fd45cd3a19 --- /dev/null +++ b/scripts/base/frameworks/signatures/README @@ -0,0 +1,4 @@ +The signature framework provides for doing low-level pattern matching. While +signatures are not Bro's preferred detection tool, they sometimes come in +handy and are closer to what many people are familiar with from using +other NIDS. diff --git a/scripts/base/frameworks/sumstats/plugins/README b/scripts/base/frameworks/sumstats/plugins/README new file mode 100644 index 0000000000..4c4f36a623 --- /dev/null +++ b/scripts/base/frameworks/sumstats/plugins/README @@ -0,0 +1 @@ +Plugins for the summary statistics framework. diff --git a/scripts/policy/frameworks/intel/seen/README b/scripts/policy/frameworks/intel/seen/README new file mode 100644 index 0000000000..e06a869125 --- /dev/null +++ b/scripts/policy/frameworks/intel/seen/README @@ -0,0 +1 @@ +Scripts that send data to the intelligence framework. diff --git a/scripts/policy/misc/app-stats/plugins/README b/scripts/policy/misc/app-stats/plugins/README new file mode 100644 index 0000000000..cb2e04d8ba --- /dev/null +++ b/scripts/policy/misc/app-stats/plugins/README @@ -0,0 +1 @@ +Plugins for AppStats. diff --git a/scripts/policy/tuning/README b/scripts/policy/tuning/README new file mode 100644 index 0000000000..9f04f71108 --- /dev/null +++ b/scripts/policy/tuning/README @@ -0,0 +1 @@ +Miscellaneous tuning parameters. From eab886fb84e1a6e531470ec3872361b9791c8033 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Wed, 23 Oct 2013 16:51:55 -0500 Subject: [PATCH 20/47] Change test of identify_data BIF to ignore charset. It may vary with libmagic version. --- testing/btest/Baseline/bifs.identify_data/out | 6 +++--- testing/btest/bifs/identify_data.bro | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/testing/btest/Baseline/bifs.identify_data/out b/testing/btest/Baseline/bifs.identify_data/out index 1cadefbf6e..9645f524b3 100644 --- a/testing/btest/Baseline/bifs.identify_data/out +++ b/testing/btest/Baseline/bifs.identify_data/out @@ -1,4 +1,4 @@ ASCII text, with no line terminators -text/plain; charset=us-ascii -PNG image -image/png; charset=binary +text/plain +PNG image data +image/png diff --git a/testing/btest/bifs/identify_data.bro b/testing/btest/bifs/identify_data.bro index 68cac55c61..836a5a428f 100644 --- a/testing/btest/bifs/identify_data.bro +++ b/testing/btest/bifs/identify_data.bro @@ -1,5 +1,5 @@ -# -# @TEST-EXEC: bro -b %INPUT | sed 's/PNG image data/PNG image/g' >out +# Text encodings may vary with libmagic version so don't test that part. +# @TEST-EXEC: bro -b %INPUT | sed 's/; charset=.*//g' >out # @TEST-EXEC: btest-diff out event bro_init() From 0b5c1a1f2883df3d8ac365e6cff5f9cfa94b2312 Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Thu, 24 Oct 2013 10:23:17 -0500 Subject: [PATCH 21/47] Add gawk to list of optional packages BSD and debian-based Linux do not include gawk by default. Noticed that a test was failing on these platforms due to the use of a bro-cut option that requires gawk. --- doc/install/install.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/install/install.rst b/doc/install/install.rst index 86e92c23c6..a4bd51d29e 100644 --- a/doc/install/install.rst +++ b/doc/install/install.rst @@ -97,6 +97,7 @@ build time: * LibGeoIP (for geo-locating IP addresses) * sendmail (enables Bro and BroControl to send mail) + * gawk (enables all features of bro-cut) * gperftools (tcmalloc is used to improve memory and CPU usage) * ipsumdump (for trace-summary; http://www.cs.ucla.edu/~kohler/ipsumdump) * Ruby executable, library, and headers (for Broccoli Ruby bindings) From af6e44589f276caffc5c2f6393d61a0193fbbf82 Mon Sep 17 00:00:00 2001 From: Seth Hall Date: Thu, 24 Oct 2013 14:55:37 -0400 Subject: [PATCH 22/47] Hack to make sure that the starting BPF filter is logged on clusters. --- .../base/frameworks/packet-filter/__load__.bro | 5 +++++ .../base/frameworks/packet-filter/cluster.bro | 14 ++++++++++++++ scripts/base/frameworks/packet-filter/main.bro | 1 + .../Baseline/core.print-bpf-filters/output | 18 +++++++++--------- 4 files changed, 29 insertions(+), 9 deletions(-) create mode 100644 scripts/base/frameworks/packet-filter/cluster.bro diff --git a/scripts/base/frameworks/packet-filter/__load__.bro b/scripts/base/frameworks/packet-filter/__load__.bro index 011885e8b7..40e9d323b9 100644 --- a/scripts/base/frameworks/packet-filter/__load__.bro +++ b/scripts/base/frameworks/packet-filter/__load__.bro @@ -1,3 +1,8 @@ @load ./utils @load ./main @load ./netstats + +@load base/frameworks/cluster +@if ( Cluster::is_enabled() ) +@load ./cluster +@endif diff --git a/scripts/base/frameworks/packet-filter/cluster.bro b/scripts/base/frameworks/packet-filter/cluster.bro new file mode 100644 index 0000000000..34f0600d18 --- /dev/null +++ b/scripts/base/frameworks/packet-filter/cluster.bro @@ -0,0 +1,14 @@ + +module PacketFilter; + +event remote_connection_handshake_done(p: event_peer) &priority=3 + { + if ( Cluster::local_node_type() == Cluster::WORKER && + p$descr in Cluster::nodes && + Cluster::nodes[p$descr]$node_type == Cluster::MANAGER ) + { + # This ensures that a packet filter is installed and logged + # after the manager connects to us. + install(); + } + } diff --git a/scripts/base/frameworks/packet-filter/main.bro b/scripts/base/frameworks/packet-filter/main.bro index 4a6e9c43b5..8b1739acb4 100644 --- a/scripts/base/frameworks/packet-filter/main.bro +++ b/scripts/base/frameworks/packet-filter/main.bro @@ -294,6 +294,7 @@ function install(): bool # Do an audit log for the packet filter. local info: Info; info$ts = network_time(); + info$node = peer_description; # If network_time() is 0.0 we're at init time so use the wall clock. if ( info$ts == 0.0 ) { diff --git a/testing/btest/Baseline/core.print-bpf-filters/output b/testing/btest/Baseline/core.print-bpf-filters/output index 1342aa5869..768e6762f3 100644 --- a/testing/btest/Baseline/core.print-bpf-filters/output +++ b/testing/btest/Baseline/core.print-bpf-filters/output @@ -3,28 +3,28 @@ #empty_field (empty) #unset_field - #path packet_filter -#open 2013-08-12-18-24-49 +#open 2013-10-24-18-53-49 #fields ts node filter init success #types time string string bool bool -1376331889.617206 - ip or not ip T T -#close 2013-08-12-18-24-49 +1382640829.338079 bro ip or not ip T T +#close 2013-10-24-18-53-49 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path packet_filter -#open 2013-08-12-18-24-49 +#open 2013-10-24-18-53-49 #fields ts node filter init success #types time string string bool bool -1376331889.904944 - port 42 T T -#close 2013-08-12-18-24-49 +1382640829.495639 bro port 42 T T +#close 2013-10-24-18-53-49 #separator \x09 #set_separator , #empty_field (empty) #unset_field - #path packet_filter -#open 2013-08-12-18-24-50 +#open 2013-10-24-18-53-49 #fields ts node filter init success #types time string string bool bool -1376331890.192875 - (vlan) and (ip or not ip) T T -#close 2013-08-12-18-24-50 +1382640829.653368 bro (vlan) and (ip or not ip) T T +#close 2013-10-24-18-53-49 From 1cfb3a38e0ee2932b6c97ca0387399b5507399a2 Mon Sep 17 00:00:00 2001 From: Seth Hall Date: Thu, 24 Oct 2013 15:21:03 -0400 Subject: [PATCH 23/47] Add UDP support to the checksum offload detection script. --- scripts/base/misc/find-checksum-offloading.bro | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/scripts/base/misc/find-checksum-offloading.bro b/scripts/base/misc/find-checksum-offloading.bro index 15c1d6661d..d7e6577827 100644 --- a/scripts/base/misc/find-checksum-offloading.bro +++ b/scripts/base/misc/find-checksum-offloading.bro @@ -16,6 +16,7 @@ export { # Keep track of how many bad checksums have been seen. global bad_ip_checksums = 0; global bad_tcp_checksums = 0; +global bad_udp_checksums = 0; # Track to see if this script is done so that messages aren't created multiple times. global done = F; @@ -28,7 +29,11 @@ event ChecksumOffloading::check() local pkts_recvd = net_stats()$pkts_recvd; local bad_ip_checksum_pct = (pkts_recvd != 0) ? (bad_ip_checksums*1.0 / pkts_recvd*1.0) : 0; local bad_tcp_checksum_pct = (pkts_recvd != 0) ? (bad_tcp_checksums*1.0 / pkts_recvd*1.0) : 0; - if ( bad_ip_checksum_pct > 0.05 || bad_tcp_checksum_pct > 0.05 ) + local bad_udp_checksum_pct = (pkts_recvd != 0) ? (bad_udp_checksums*1.0 / pkts_recvd*1.0) : 0; + + if ( bad_ip_checksum_pct > 0.05 || + bad_tcp_checksum_pct > 0.05 || + bad_udp_checksum_pct > 0.05 ) { local packet_src = reading_traces() ? "trace file likely has" : "interface is likely receiving"; local bad_checksum_msg = (bad_ip_checksum_pct > 0.0) ? "IP" : ""; @@ -38,6 +43,13 @@ event ChecksumOffloading::check() bad_checksum_msg += " and "; bad_checksum_msg += "TCP"; } + if ( bad_udp_checksum_pct > 0.0 ) + { + if ( |bad_checksum_msg| > 0 ) + bad_checksum_msg += " and "; + bad_checksum_msg += "UDP"; + } + local message = fmt("Your %s invalid %s checksums, most likely from NIC checksum offloading.", packet_src, bad_checksum_msg); Reporter::warning(message); done = T; @@ -65,6 +77,8 @@ event conn_weird(name: string, c: connection, addl: string) { if ( name == "bad_TCP_checksum" ) ++bad_tcp_checksums; + else if ( name == "bad_UDP_checksum" ) + ++bad_udp_checksums; } event bro_done() From 381adb2a1ea1fdba9a29394e8d505a71cfe49768 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Thu, 24 Oct 2013 12:26:26 -0700 Subject: [PATCH 24/47] temporary osx mavericks libc++ issue workaround. When using clang3.3 on mavericks with libc++, after the first std::getline-call encountered eof on an ifstream, all following std::getline calls on the same ifstream will also get eof, even if ifstream::clear has been called and the file has been appended in the meantime. Seeking to the current position after a clear before trying to read the line fixes this behavior. This is just a temporary workaround... --- src/input/readers/Ascii.cc | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/src/input/readers/Ascii.cc b/src/input/readers/Ascii.cc index 0b16c9d468..12d677742b 100644 --- a/src/input/readers/Ascii.cc +++ b/src/input/readers/Ascii.cc @@ -13,6 +13,19 @@ #include #include +// libc++ on Mavericks does not allow further getlines on a filehandle +// after the first eof was encountered, even after a clear. Enable +// workaround. + +#ifdef __clang__ +# ifdef __APPLE__ +# include +# ifdef __MAC_10_9 +# define MAVERICKS_WORKAROUND +# endif +# endif +#endif + using namespace input::reader; using threading::Value; using threading::Field; @@ -279,6 +292,9 @@ bool Ascii::DoUpdate() } string line; +#ifdef MAVERICKS_WORKAROUND + file->seekg(file->tellg()); +#endif while ( GetLine(line ) ) { // split on tabs From ba93d0df08ba64a246e8b935399f93ac34db463d Mon Sep 17 00:00:00 2001 From: Seth Hall Date: Thu, 24 Oct 2013 16:28:10 -0400 Subject: [PATCH 25/47] Intel framework notes added to NEWS --- NEWS | 24 +++++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/NEWS b/NEWS index 5a97c2e4a3..7b1034bd89 100644 --- a/NEWS +++ b/NEWS @@ -10,6 +10,28 @@ Bro 2.2 Beta New Functionality ----------------- +- A completely overhauled intelligence framework for consuming + external intelligence data. It provides an abstracted mechanism + for feeding data into the framework to be matched against the + data available. It also provides a function named ``Intel::match`` + which makes any hits on intelligence data available to the + scripting language. + + Using input framework, the intel framework can load data from + text files. It can also update and add data if changes are + made to the file being monitored. Files to monitor for + intelligence can be provided by redef-ing the + ``Intel::read_files`` variable. + + The intel framework is cluster-ready. On a cluster, the + manager is the only node that needs to load in data from disk, + the cluster support will distribute the data across a cluster + automatically. + + Scripts are provided at ``policy/frameworks/intel/seen`` that + provide a broad set of sources of data to feed into the intel + framwork to be matched. + - A new file analysis framework moves most of the processing of file content from script-land into the core, where it belongs. See ``doc/file-analysis.rst``, or the online documentation, for more @@ -40,7 +62,7 @@ New Functionality - IRC DCC transfers: Record to disk. - - Support for analyzing data transfered via HTTP range requests. + - Support for analyzing data transferred via HTTP range requests. - A binary input reader interfaces the input framework with the file analysis, allowing to inject files on disk into Bro's From c640e2919372e032f7915645ead39a79d6121725 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Thu, 24 Oct 2013 13:34:17 -0700 Subject: [PATCH 26/47] rework libc++ patch to use AvailabilityMacros.h instead of Availability.h AvailabilityMacros.h was introduced in 10.2, Availability is only available in 10.5+ - this increases backwards compatibility. --- src/input/readers/Ascii.cc | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/input/readers/Ascii.cc b/src/input/readers/Ascii.cc index 12d677742b..3229d75ed6 100644 --- a/src/input/readers/Ascii.cc +++ b/src/input/readers/Ascii.cc @@ -19,9 +19,11 @@ #ifdef __clang__ # ifdef __APPLE__ -# include -# ifdef __MAC_10_9 -# define MAVERICKS_WORKAROUND +# include +# ifdef __MAC_OS_X_VERSION_MAX_ALLOWED +# if __MAC_OS_X_VERSION_MAX_ALLOWED == 1090 +# define MAVERICKS_WORKAROUND +# endif # endif # endif #endif From 8ce3865bd20127f4174f9a347e5ffdd9c156e10d Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Thu, 24 Oct 2013 14:31:23 -0700 Subject: [PATCH 27/47] Updating baselines for packet filter changes. --- doc/scripts/DocSourcesList.cmake | 1 + .../coverage.bare-load-baseline/canonified_loaded_scripts.log | 4 ++-- .../canonified_loaded_scripts.log | 4 ++-- testing/btest/Baseline/coverage.init-default/missing_loads | 1 + 4 files changed, 6 insertions(+), 4 deletions(-) diff --git a/doc/scripts/DocSourcesList.cmake b/doc/scripts/DocSourcesList.cmake index 0b67ab1995..fdf5990659 100644 --- a/doc/scripts/DocSourcesList.cmake +++ b/doc/scripts/DocSourcesList.cmake @@ -121,6 +121,7 @@ rest_target(${psd} base/frameworks/notice/extend-email/hostnames.bro) rest_target(${psd} base/frameworks/notice/main.bro) rest_target(${psd} base/frameworks/notice/non-cluster.bro) rest_target(${psd} base/frameworks/notice/weird.bro) +rest_target(${psd} base/frameworks/packet-filter/cluster.bro) rest_target(${psd} base/frameworks/packet-filter/main.bro) rest_target(${psd} base/frameworks/packet-filter/netstats.bro) rest_target(${psd} base/frameworks/packet-filter/utils.bro) diff --git a/testing/btest/Baseline/coverage.bare-load-baseline/canonified_loaded_scripts.log b/testing/btest/Baseline/coverage.bare-load-baseline/canonified_loaded_scripts.log index 5ee8158ddf..04c6546678 100644 --- a/testing/btest/Baseline/coverage.bare-load-baseline/canonified_loaded_scripts.log +++ b/testing/btest/Baseline/coverage.bare-load-baseline/canonified_loaded_scripts.log @@ -3,7 +3,7 @@ #empty_field (empty) #unset_field - #path loaded_scripts -#open 2013-08-31-17-46-55 +#open 2013-10-24-21-30-36 #fields name #types string scripts/base/init-bare.bro @@ -100,4 +100,4 @@ scripts/base/init-bare.bro build/scripts/base/bif/top-k.bif.bro scripts/policy/misc/loaded-scripts.bro scripts/base/utils/paths.bro -#close 2013-08-31-17-46-55 +#close 2013-10-24-21-30-36 diff --git a/testing/btest/Baseline/coverage.default-load-baseline/canonified_loaded_scripts.log b/testing/btest/Baseline/coverage.default-load-baseline/canonified_loaded_scripts.log index a11933c79a..618e212f0d 100644 --- a/testing/btest/Baseline/coverage.default-load-baseline/canonified_loaded_scripts.log +++ b/testing/btest/Baseline/coverage.default-load-baseline/canonified_loaded_scripts.log @@ -3,7 +3,7 @@ #empty_field (empty) #unset_field - #path loaded_scripts -#open 2013-08-31-17-46-56 +#open 2013-10-24-21-30-37 #fields name #types string scripts/base/init-bare.bro @@ -220,4 +220,4 @@ scripts/base/init-default.bro scripts/base/files/unified2/main.bro scripts/base/misc/find-checksum-offloading.bro scripts/policy/misc/loaded-scripts.bro -#close 2013-08-31-17-46-56 +#close 2013-10-24-21-30-37 diff --git a/testing/btest/Baseline/coverage.init-default/missing_loads b/testing/btest/Baseline/coverage.init-default/missing_loads index 23cae7b694..b5fbf644d5 100644 --- a/testing/btest/Baseline/coverage.init-default/missing_loads +++ b/testing/btest/Baseline/coverage.init-default/missing_loads @@ -4,4 +4,5 @@ -./frameworks/cluster/setup-connections.bro -./frameworks/intel/cluster.bro -./frameworks/notice/cluster.bro +-./frameworks/packet-filter/cluster.bro -./frameworks/sumstats/cluster.bro From 9a58a28da00f4fcb221d17ba8fb350a3f78faa84 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Thu, 24 Oct 2013 15:46:16 -0700 Subject: [PATCH 28/47] change temporary workaround to permanent fix. According to Howard Hinnant, in use cases like ours you need to call file->sync() to synchronize the internal ifstream buffer with the file. See http://stackoverflow.com/questions/19558376/clang-3-3-xcode-libc-stdgetline-does-not-read-data-after-calling-ifstream --- src/input/readers/Ascii.cc | 19 +------------------ 1 file changed, 1 insertion(+), 18 deletions(-) diff --git a/src/input/readers/Ascii.cc b/src/input/readers/Ascii.cc index 3229d75ed6..d5b0d94dce 100644 --- a/src/input/readers/Ascii.cc +++ b/src/input/readers/Ascii.cc @@ -13,21 +13,6 @@ #include #include -// libc++ on Mavericks does not allow further getlines on a filehandle -// after the first eof was encountered, even after a clear. Enable -// workaround. - -#ifdef __clang__ -# ifdef __APPLE__ -# include -# ifdef __MAC_OS_X_VERSION_MAX_ALLOWED -# if __MAC_OS_X_VERSION_MAX_ALLOWED == 1090 -# define MAVERICKS_WORKAROUND -# endif -# endif -# endif -#endif - using namespace input::reader; using threading::Value; using threading::Field; @@ -294,9 +279,7 @@ bool Ascii::DoUpdate() } string line; -#ifdef MAVERICKS_WORKAROUND - file->seekg(file->tellg()); -#endif + file->sync(); while ( GetLine(line ) ) { // split on tabs From 42c4a51da3788fcb8c03c4706559b763690a0d0c Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Thu, 24 Oct 2013 16:52:26 -0700 Subject: [PATCH 29/47] Updating submodule(s). [nomail] --- CHANGES | 4 ++++ VERSION | 2 +- aux/binpac | 2 +- aux/bro-aux | 2 +- aux/broccoli | 2 +- aux/broctl | 2 +- aux/btest | 2 +- 7 files changed, 10 insertions(+), 6 deletions(-) diff --git a/CHANGES b/CHANGES index 615b98df4c..10bc187666 100644 --- a/CHANGES +++ b/CHANGES @@ -1,4 +1,8 @@ +2.2-beta-151 | 2013-10-24 16:52:26 -0700 + + * Updating submodule(s). + 2.2-beta-150 | 2013-10-24 16:32:14 -0700 * Change temporary ASCII reader workaround for getline() on diff --git a/VERSION b/VERSION index 3b69c06d40..2b5702fffa 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.2-beta-150 +2.2-beta-151 diff --git a/aux/binpac b/aux/binpac index 923994715b..0f20a50afa 160000 --- a/aux/binpac +++ b/aux/binpac @@ -1 +1 @@ -Subproject commit 923994715b34bf3292e402bbe00c00ff77556490 +Subproject commit 0f20a50afacb68154b4035b6da63164d154093e4 diff --git a/aux/bro-aux b/aux/bro-aux index 1496e0319f..d17f99107c 160000 --- a/aux/bro-aux +++ b/aux/bro-aux @@ -1 +1 @@ -Subproject commit 1496e0319f6fa12bb39362ab0947c82e1d6c669b +Subproject commit d17f99107cc778627a0829f0ae416073bb1e20bb diff --git a/aux/broccoli b/aux/broccoli index e57ec85a89..5cc63348a4 160000 --- a/aux/broccoli +++ b/aux/broccoli @@ -1 +1 @@ -Subproject commit e57ec85a898a077cb3376462cac1f047e9aeaee7 +Subproject commit 5cc63348a4c3e54adaf59e5a85bec055025c6c1f diff --git a/aux/broctl b/aux/broctl index e8eda204f4..e3c82d67d3 160000 --- a/aux/broctl +++ b/aux/broctl @@ -1 +1 @@ -Subproject commit e8eda204f418c78cc35102db04602ad2ea94aff8 +Subproject commit e3c82d67d3835e7a56d577b91abe99c396bbe989 diff --git a/aux/btest b/aux/btest index 056c666cd8..cfc8fe7ddf 160000 --- a/aux/btest +++ b/aux/btest @@ -1 +1 @@ -Subproject commit 056c666cd8534ba3ba88731d985dde3e29206800 +Subproject commit cfc8fe7ddf5ba3a9f957d1d5a98e9cfe1e9692ac From c980d1055e1e17da4867e3fab1ee10f604b242b0 Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Thu, 24 Oct 2013 18:16:49 -0700 Subject: [PATCH 30/47] Fix for input readers occasionally dead-locking. Bernhard and I tracked it down we believe: the thread queue could deadlock in certain cases. As a fix we tuned the heuristic for telling if a queue might have input to occasionaly err on the safe side by flagging "yes", so that processing will proceed. It's a bit unfortunate to apply this fix last minute before the release as it could potentially impact performance if the heuristic fails to often. We believe the chosen parmaterization should be fine ... --- CHANGES | 4 ++++ VERSION | 2 +- src/threading/Queue.h | 10 ++++++---- 3 files changed, 11 insertions(+), 5 deletions(-) diff --git a/CHANGES b/CHANGES index 10bc187666..1ae192f293 100644 --- a/CHANGES +++ b/CHANGES @@ -1,4 +1,8 @@ +2.2-beta-152 | 2013-10-24 18:16:49 -0700 + + * Fix for input readers occasionally dead-locking. (Robin Sommer) + 2.2-beta-151 | 2013-10-24 16:52:26 -0700 * Updating submodule(s). diff --git a/VERSION b/VERSION index 2b5702fffa..26d1beb6fe 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.2-beta-151 +2.2-beta-152 diff --git a/src/threading/Queue.h b/src/threading/Queue.h index 792fb63f9c..c4f2bfab00 100644 --- a/src/threading/Queue.h +++ b/src/threading/Queue.h @@ -61,11 +61,13 @@ public: bool Ready(); /** - * Returns true if the next Get() operation might succeed. - * This function may occasionally return a value not - * indicating the actual state, but won't do so very often. + * Returns true if the next Get() operation might succeed. This + * function may occasionally return a value not indicating the actual + * state, but won't do so very often. Occasionally we also return a + * true unconditionally to avoid a deadlock when both pointers happen + * to be equal even though there's stuff queued. */ - bool MaybeReady() { return ( ( read_ptr - write_ptr) != 0 ); } + bool MaybeReady() { return (read_ptr != write_ptr) || (random() % 10000 == 0); } /** Wake up the reader if it's currently blocked for input. This is primarily to give it a chance to check termination quickly. From 2ac0d77f06c58bd1a1daf21b01eb338f4cb88ccb Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Thu, 24 Oct 2013 23:39:16 -0700 Subject: [PATCH 31/47] alternative deadlock issue fix. This fix also fixes the deadlock issue without putting any new strain into the main packet processing path. Instead of occasionally returning true in MaybeReady sometime, we occasionally process threads if time_mgr time is not running. If time_mgr time is running, we have heartbeat messages that will trigger processing in any case -- processing always checks the exact state of the Queues. This fix probably also means that we can remove the communication loads from all input framework tests and run them all simultaneously. --- src/threading/Manager.cc | 6 +++++- src/threading/Queue.h | 9 +++++---- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/src/threading/Manager.cc b/src/threading/Manager.cc index 1b6cb551e2..3bb8dbff3b 100644 --- a/src/threading/Manager.cc +++ b/src/threading/Manager.cc @@ -82,7 +82,11 @@ double Manager::NextTimestamp(double* network_time) { MsgThread* t = *i; - if ( (*i)->MightHaveOut() && ! t->Killed() ) + if ( ( (*i)->MightHaveOut() && ! t->Killed() ) // there might be something in the queue + // Workaround: when running without network source, and without any communication, + // timer_manager is always 1. Hence the previous if will never trigger heartbeats + // In this case, we still have to check process our threads from time to time. + || ( timer_mgr->Time() == 1.0 && random() % 10000 == 0 ) ) return timer_mgr->Time(); } diff --git a/src/threading/Queue.h b/src/threading/Queue.h index c4f2bfab00..5c1ddfd28e 100644 --- a/src/threading/Queue.h +++ b/src/threading/Queue.h @@ -63,11 +63,12 @@ public: /** * Returns true if the next Get() operation might succeed. This * function may occasionally return a value not indicating the actual - * state, but won't do so very often. Occasionally we also return a - * true unconditionally to avoid a deadlock when both pointers happen - * to be equal even though there's stuff queued. + * state, but won't do so very often. Note that this means that it can + * consistently return false even if there is something in the Queue. + * You have to check real queue status from time to time to be sure that + * it is empty. */ - bool MaybeReady() { return (read_ptr != write_ptr) || (random() % 10000 == 0); } + bool MaybeReady() { return (read_ptr != write_ptr); } /** Wake up the reader if it's currently blocked for input. This is primarily to give it a chance to check termination quickly. From 0fdbdff3c4af12172e5de7bb1a9c419ae877737f Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Fri, 25 Oct 2013 09:57:12 -0700 Subject: [PATCH 32/47] Wrong example file was included - reported by Michael Auger @LM4K --- doc/scripting/index.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/scripting/index.rst b/doc/scripting/index.rst index 749fae6457..c6b479a7af 100644 --- a/doc/scripting/index.rst +++ b/doc/scripting/index.rst @@ -214,7 +214,7 @@ take a look at a simple script, stored as ``connection_record_01.bro``, that will output the connection record for a single connection. -.. btest-include:: ${DOC_ROOT}/scripting/connection_record_02.bro +.. btest-include:: ${DOC_ROOT}/scripting/connection_record_01.bro Again, we start with ``@load``, this time importing the :doc:`/scripts/base/protocols/conn/index` scripts which supply the tracking From a09d8e94e0a643cc61c8c4cf5ff95771531f1dcb Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Fri, 25 Oct 2013 10:21:33 -0700 Subject: [PATCH 33/47] make the documentation of the SQLite reader/writer a bit nicer. --- scripts/base/frameworks/input/readers/sqlite.bro | 10 ++++++++-- scripts/base/frameworks/logging/writers/sqlite.bro | 10 +++++++++- 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/scripts/base/frameworks/input/readers/sqlite.bro b/scripts/base/frameworks/input/readers/sqlite.bro index 2cd025e7d4..3ed2a88461 100644 --- a/scripts/base/frameworks/input/readers/sqlite.bro +++ b/scripts/base/frameworks/input/readers/sqlite.bro @@ -1,6 +1,12 @@ -##! Interface for the SQLite input reader. +##! Interface for the SQLite input reader. Redefinable options are available +##! to tweak the input format of the SQLite reader. ##! -##! The defaults are set to match Bro's ASCII output. +##! See :doc:`/frameworks/logging-input-sqlite` for an introduction on how to +##! use the SQLite reader. +##! +##! When using the SQLite reader, you have to specify the SQL query that returns +##! the desired data by setting ``query`` in the ``config`` table. See the +##! introduction mentioned above for an example. module InputSQLite; diff --git a/scripts/base/frameworks/logging/writers/sqlite.bro b/scripts/base/frameworks/logging/writers/sqlite.bro index 5df5e356c8..883c67d760 100644 --- a/scripts/base/frameworks/logging/writers/sqlite.bro +++ b/scripts/base/frameworks/logging/writers/sqlite.bro @@ -1,5 +1,13 @@ -##! Interface for the SQLite log writer. Redefinable options are available +##! Interface for the SQLite log writer. Redefinable options are available ##! to tweak the output format of the SQLite reader. +##! +##! See :doc:`/frameworks/logging-input-sqlite` for an introduction on how to +##! use the SQLite log writer. +##! +##! The SQL writer currently supports one writer-specific filter option via +##! ``config``: setting ``tablename`` sets the name of the table that is used +##! or created in the SQLite database. An example for this is given in the +##! introduction mentioned above. module LogSQLite; From 32d7c96cd426f11190cb971f7fa1250570c6e68d Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Fri, 25 Oct 2013 15:11:20 -0500 Subject: [PATCH 34/47] Update test and baseline for a recent doc test fix --- .../output | 3 +-- ...st => include-doc_scripting_connection_record_01_bro.btest} | 3 +-- 2 files changed, 2 insertions(+), 4 deletions(-) rename testing/btest/Baseline/{doc.sphinx.include-doc_scripting_connection_record_02_bro@2 => doc.sphinx.include-doc_scripting_connection_record_01_bro}/output (75%) rename testing/btest/doc/sphinx/{include-doc_scripting_connection_record_02_bro@2.btest => include-doc_scripting_connection_record_01_bro.btest} (75%) diff --git a/testing/btest/Baseline/doc.sphinx.include-doc_scripting_connection_record_02_bro@2/output b/testing/btest/Baseline/doc.sphinx.include-doc_scripting_connection_record_01_bro/output similarity index 75% rename from testing/btest/Baseline/doc.sphinx.include-doc_scripting_connection_record_02_bro@2/output rename to testing/btest/Baseline/doc.sphinx.include-doc_scripting_connection_record_01_bro/output index e4552b8580..34303a12ad 100644 --- a/testing/btest/Baseline/doc.sphinx.include-doc_scripting_connection_record_02_bro@2/output +++ b/testing/btest/Baseline/doc.sphinx.include-doc_scripting_connection_record_01_bro/output @@ -1,9 +1,8 @@ # @TEST-EXEC: cat %INPUT >output && btest-diff output -connection_record_02.bro +connection_record_01.bro @load base/protocols/conn -@load base/protocols/dns event connection_state_remove(c: connection) { diff --git a/testing/btest/doc/sphinx/include-doc_scripting_connection_record_02_bro@2.btest b/testing/btest/doc/sphinx/include-doc_scripting_connection_record_01_bro.btest similarity index 75% rename from testing/btest/doc/sphinx/include-doc_scripting_connection_record_02_bro@2.btest rename to testing/btest/doc/sphinx/include-doc_scripting_connection_record_01_bro.btest index e4552b8580..34303a12ad 100644 --- a/testing/btest/doc/sphinx/include-doc_scripting_connection_record_02_bro@2.btest +++ b/testing/btest/doc/sphinx/include-doc_scripting_connection_record_01_bro.btest @@ -1,9 +1,8 @@ # @TEST-EXEC: cat %INPUT >output && btest-diff output -connection_record_02.bro +connection_record_01.bro @load base/protocols/conn -@load base/protocols/dns event connection_state_remove(c: connection) { From c299a71b83bfd158bfc16dec36145adfa7f43513 Mon Sep 17 00:00:00 2001 From: Daniel Thayer Date: Fri, 25 Oct 2013 15:14:52 -0500 Subject: [PATCH 35/47] Add curl to list of optional dependencies The curl utility is used by the active-http.bro script, but this isn't mentioned anywhere. --- doc/install/install.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/install/install.rst b/doc/install/install.rst index a4bd51d29e..fdbb7e9d8b 100644 --- a/doc/install/install.rst +++ b/doc/install/install.rst @@ -98,6 +98,7 @@ build time: * LibGeoIP (for geo-locating IP addresses) * sendmail (enables Bro and BroControl to send mail) * gawk (enables all features of bro-cut) + * curl (used by one of the Bro scripts) * gperftools (tcmalloc is used to improve memory and CPU usage) * ipsumdump (for trace-summary; http://www.cs.ucla.edu/~kohler/ipsumdump) * Ruby executable, library, and headers (for Broccoli Ruby bindings) From be5b0105b0c7f1eef15f8ebace1efe6e3c7b921d Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Fri, 25 Oct 2013 15:05:08 -0700 Subject: [PATCH 36/47] Updating README with download/git information. The git instructions were hard to find and this way they will show up on github. --- CHANGES | 4 ++++ README | 10 ++++++++++ VERSION | 2 +- 3 files changed, 15 insertions(+), 1 deletion(-) diff --git a/CHANGES b/CHANGES index 505cbeeefa..f56bd8411d 100644 --- a/CHANGES +++ b/CHANGES @@ -1,4 +1,8 @@ +2.2-beta-158 | 2013-10-25 15:05:08 -0700 + + * Updating README with download/git information. (Robin Sommer) + 2.2-beta-157 | 2013-10-25 11:11:17 -0700 * Extend the documentation of the SQLite reader/writer framework. diff --git a/README b/README index 734246c914..f841ddd3d8 100644 --- a/README +++ b/README @@ -8,11 +8,21 @@ and pointers for getting started. NEWS contains release notes for the current version, and CHANGES has the complete history of changes. Please see COPYING for licensing information. +You can download source and binary releases on: + + http://www.bro.org/download + +To get the current development version, clone our master git +repository: + + git clone --recursive git://git.bro.org/bro + For more documentation, research publications, and community contact information, please see Bro's home page: http://www.bro.org + On behalf of the Bro Development Team, Vern Paxson & Robin Sommer, diff --git a/VERSION b/VERSION index d60e7aef29..8cdf44379f 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.2-beta-157 +2.2-beta-158 From 4b0ee2e7ca904d150a687435c2e1b0fdbc241572 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Fri, 25 Oct 2013 18:01:46 -0700 Subject: [PATCH 37/47] Fix the dir module. Internally, Dir kept track of the files in directory by storing all inode numbers in a set. However, when a file is deleted and a new file is created in a directory, the old file may get the same inode number as the old one. In this case, bro did not notice the new file. The patch simply changes the indexing of files - now files are indexed by inode and creation time. This should fix the scripts.base.utils.dir test failures. --- scripts/base/utils/dir.bro | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/scripts/base/utils/dir.bro b/scripts/base/utils/dir.bro index e64af43b15..30d6fed832 100644 --- a/scripts/base/utils/dir.bro +++ b/scripts/base/utils/dir.bro @@ -28,7 +28,8 @@ event Dir::monitor_ev(dir: string, last_files: set[string], callback: function(fname: string), poll_interval: interval) { - when ( local result = Exec::run([$cmd=fmt("ls -i -1 \"%s/\"", str_shell_escape(dir))]) ) + # the command lists all file in the directory in the form [inode]-[ctime] [filename] + when ( local result = Exec::run([$cmd=fmt("find \"%s\" -depth 1 -exec stat -f \"%%i-%%c %%N\" {} \\;", str_shell_escape(dir))]) ) { if ( result$exit_code != 0 ) { From f90fcdf15207c4a979b9c38e338b5e3a73a86fb5 Mon Sep 17 00:00:00 2001 From: Bernhard Amann Date: Fri, 25 Oct 2013 18:27:57 -0700 Subject: [PATCH 38/47] Revert "Fix the dir module." This reverts commit 4b0ee2e7ca904d150a687435c2e1b0fdbc241572. Sorry, bad idea. --- scripts/base/utils/dir.bro | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/scripts/base/utils/dir.bro b/scripts/base/utils/dir.bro index 30d6fed832..e64af43b15 100644 --- a/scripts/base/utils/dir.bro +++ b/scripts/base/utils/dir.bro @@ -28,8 +28,7 @@ event Dir::monitor_ev(dir: string, last_files: set[string], callback: function(fname: string), poll_interval: interval) { - # the command lists all file in the directory in the form [inode]-[ctime] [filename] - when ( local result = Exec::run([$cmd=fmt("find \"%s\" -depth 1 -exec stat -f \"%%i-%%c %%N\" {} \\;", str_shell_escape(dir))]) ) + when ( local result = Exec::run([$cmd=fmt("ls -i -1 \"%s/\"", str_shell_escape(dir))]) ) { if ( result$exit_code != 0 ) { From 64812daa50086deb32a353e8af087f8b2ceb019a Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Sat, 26 Oct 2013 19:15:43 -0700 Subject: [PATCH 39/47] Next version of the threading queue deadlock fix. We now just use the read/write counters, as suggested by Gilbert. --- src/threading/Manager.cc | 12 +----------- src/threading/Queue.h | 2 +- 2 files changed, 2 insertions(+), 12 deletions(-) diff --git a/src/threading/Manager.cc b/src/threading/Manager.cc index 9e0616f0d2..1b6cb551e2 100644 --- a/src/threading/Manager.cc +++ b/src/threading/Manager.cc @@ -82,17 +82,7 @@ double Manager::NextTimestamp(double* network_time) { MsgThread* t = *i; - // We check here if there's something ready to read from the - // queue. Normally the queue will tell us that reliably via - // MightHaveOut() because we keep sending heartbeats that - // will ensure that the method will eventually return true. - // However, when running without network source and without - // any communication, the timer_manager's time will always - // remain at 1.0, which means that heartbeats will never be - // triggered. In that case, we make sure to still process our - // threads from time to time. - if ( ((*i)->MightHaveOut() && ! t->Killed()) - || (timer_mgr->Time() == 1.0 && random() % 10000 == 0) ) + if ( (*i)->MightHaveOut() && ! t->Killed() ) return timer_mgr->Time(); } diff --git a/src/threading/Queue.h b/src/threading/Queue.h index b2b3fdaa03..6d21bfd998 100644 --- a/src/threading/Queue.h +++ b/src/threading/Queue.h @@ -69,7 +69,7 @@ public: * it is empty. In other words, this method helps to avoid locking the queue * frequently, but doesn't allow you to forgo it completely. */ - bool MaybeReady() { return (read_ptr != write_ptr); } + bool MaybeReady() { return (num_reads != num_writes); } /** Wake up the reader if it's currently blocked for input. This is primarily to give it a chance to check termination quickly. From eff96bef370ff680c4f116d56f5bc30a67f1cdfa Mon Sep 17 00:00:00 2001 From: Robin Sommer Date: Sat, 26 Oct 2013 19:20:09 -0700 Subject: [PATCH 40/47] Updating submodule(s). [nomail] --- aux/broctl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aux/broctl b/aux/broctl index e3c82d67d3..cea34f6de7 160000 --- a/aux/broctl +++ b/aux/broctl @@ -1 +1 @@ -Subproject commit e3c82d67d3835e7a56d577b91abe99c396bbe989 +Subproject commit cea34f6de7fc3b6f01921593797e5f0f197b67a7 From 31c7c1a6737ddf8bc9b6537459f617ea57c0b799 Mon Sep 17 00:00:00 2001 From: Vlad Grigorescu Date: Mon, 28 Oct 2013 08:09:16 -0400 Subject: [PATCH 41/47] Change percent_lost in capture-loss from a string to a double. --- scripts/policy/misc/capture-loss.bro | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/policy/misc/capture-loss.bro b/scripts/policy/misc/capture-loss.bro index fd578ebf25..089412020a 100644 --- a/scripts/policy/misc/capture-loss.bro +++ b/scripts/policy/misc/capture-loss.bro @@ -34,7 +34,7 @@ export { ## Total number of ACKs seen in the previous measurement interval. acks: count &log; ## Percentage of ACKs seen where the data being ACKed wasn't seen. - percent_lost: string &log; + percent_lost: double &log; }; ## The interval at which capture loss reports are created. @@ -64,7 +64,7 @@ event CaptureLoss::take_measurement(last_ts: time, last_acks: count, last_gaps: $ts_delta=now-last_ts, $peer=peer_description, $acks=acks, $gaps=gaps, - $percent_lost=fmt("%.3f%%", pct_lost)]; + $percent_lost=pct_lost]; if ( pct_lost >= too_much_loss*100 ) NOTICE([$note=Too_Much_Loss, From b255aedc26ec09d0eca7c83b94d2b4c7996676e8 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Mon, 28 Oct 2013 13:24:24 -0500 Subject: [PATCH 42/47] Fix race condition in unit test. Removing an input stream immediately after it's created causes a race to read the entire file before the reader gets the signal to stop. --- .../btest/scripts/base/frameworks/file-analysis/input/basic.bro | 1 - 1 file changed, 1 deletion(-) diff --git a/testing/btest/scripts/base/frameworks/file-analysis/input/basic.bro b/testing/btest/scripts/base/frameworks/file-analysis/input/basic.bro index 053341c840..bd0da0753c 100644 --- a/testing/btest/scripts/base/frameworks/file-analysis/input/basic.bro +++ b/testing/btest/scripts/base/frameworks/file-analysis/input/basic.bro @@ -23,7 +23,6 @@ event bro_init() local source: string = "../input.log"; Input::add_analysis([$source=source, $reader=Input::READER_BINARY, $mode=Input::MANUAL, $name=source]); - Input::remove(source); } event file_state_remove(f: fa_file) &priority=-10 From e1d2f6d82fac00e5c832a9bb9f3bd533f9e01e4c Mon Sep 17 00:00:00 2001 From: Vlad Grigorescu Date: Mon, 28 Oct 2013 08:09:16 -0400 Subject: [PATCH 43/47] Change percent_lost in capture-loss from a string to a double. --- CHANGES | 9 +++++++++ VERSION | 2 +- scripts/policy/misc/capture-loss.bro | 4 ++-- 3 files changed, 12 insertions(+), 3 deletions(-) diff --git a/CHANGES b/CHANGES index ab60704542..6ed56e3f30 100644 --- a/CHANGES +++ b/CHANGES @@ -1,4 +1,13 @@ +2.2-beta-167 | 2013-10-29 06:02:38 -0700 + + * Change percent_lost in capture-loss from a string to a double. + (Vlad Grigorescu) + + * New version of the threading queue deadlock fix. (Robin Sommer) + + * Updating README with download/git information. (Robin Sommer) + 2.2-beta-161 | 2013-10-25 15:48:15 -0700 * Add curl to list of optional dependencies. It's used by the diff --git a/VERSION b/VERSION index dc2a0b123a..09220343ac 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.2-beta-161 +2.2-beta-167 diff --git a/scripts/policy/misc/capture-loss.bro b/scripts/policy/misc/capture-loss.bro index fd578ebf25..089412020a 100644 --- a/scripts/policy/misc/capture-loss.bro +++ b/scripts/policy/misc/capture-loss.bro @@ -34,7 +34,7 @@ export { ## Total number of ACKs seen in the previous measurement interval. acks: count &log; ## Percentage of ACKs seen where the data being ACKed wasn't seen. - percent_lost: string &log; + percent_lost: double &log; }; ## The interval at which capture loss reports are created. @@ -64,7 +64,7 @@ event CaptureLoss::take_measurement(last_ts: time, last_acks: count, last_gaps: $ts_delta=now-last_ts, $peer=peer_description, $acks=acks, $gaps=gaps, - $percent_lost=fmt("%.3f%%", pct_lost)]; + $percent_lost=pct_lost]; if ( pct_lost >= too_much_loss*100 ) NOTICE([$note=Too_Much_Loss, From 26be082951bea4c93b33c3ed3ed617c804b20832 Mon Sep 17 00:00:00 2001 From: Seth Hall Date: Tue, 29 Oct 2013 11:09:55 -0400 Subject: [PATCH 44/47] Return the Dir module to file name tracking instead of inode tracking. --- scripts/base/utils/dir.bro | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/scripts/base/utils/dir.bro b/scripts/base/utils/dir.bro index e64af43b15..3a2da7ffdd 100644 --- a/scripts/base/utils/dir.bro +++ b/scripts/base/utils/dir.bro @@ -28,7 +28,7 @@ event Dir::monitor_ev(dir: string, last_files: set[string], callback: function(fname: string), poll_interval: interval) { - when ( local result = Exec::run([$cmd=fmt("ls -i -1 \"%s/\"", str_shell_escape(dir))]) ) + when ( local result = Exec::run([$cmd=fmt("ls -1 \"%s/\"", str_shell_escape(dir))]) ) { if ( result$exit_code != 0 ) { @@ -44,10 +44,9 @@ event Dir::monitor_ev(dir: string, last_files: set[string], for ( i in files ) { - local parts = split1(files[i], / /); - if ( parts[1] !in last_files ) - callback(build_path_compressed(dir, parts[2])); - add current_files[parts[1]]; + if ( files[i] !in last_files ) + callback(build_path_compressed(dir, files[i])); + add current_files[files[i]]; } schedule poll_interval From b2d6ccfb195c0a7f9f92ab2b3a1d693483fa08de Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Tue, 29 Oct 2013 10:45:11 -0500 Subject: [PATCH 45/47] Revert "Fix race condition in unit test." This reverts commit b255aedc26ec09d0eca7c83b94d2b4c7996676e8. The test should work as it was -- actually seems to be a race in the thread termination code. --- .../btest/scripts/base/frameworks/file-analysis/input/basic.bro | 1 + 1 file changed, 1 insertion(+) diff --git a/testing/btest/scripts/base/frameworks/file-analysis/input/basic.bro b/testing/btest/scripts/base/frameworks/file-analysis/input/basic.bro index bd0da0753c..053341c840 100644 --- a/testing/btest/scripts/base/frameworks/file-analysis/input/basic.bro +++ b/testing/btest/scripts/base/frameworks/file-analysis/input/basic.bro @@ -23,6 +23,7 @@ event bro_init() local source: string = "../input.log"; Input::add_analysis([$source=source, $reader=Input::READER_BINARY, $mode=Input::MANUAL, $name=source]); + Input::remove(source); } event file_state_remove(f: fa_file) &priority=-10 From 7c7967c1ab6379369575c60825437f701dece5e2 Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Tue, 29 Oct 2013 11:04:34 -0500 Subject: [PATCH 46/47] Don't build broccoli ruby bindings by default, use --enable-ruby to do so. --- configure | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/configure b/configure index 1990d78569..2f14f95f9a 100755 --- a/configure +++ b/configure @@ -32,12 +32,12 @@ Usage: $0 [OPTION]... [VAR=VALUE]... --enable-perftools force use of Google perftools on non-Linux systems (automatically on when perftools is present on Linux) --enable-perftools-debug use Google's perftools for debugging + --enable-ruby build ruby bindings for broccoli --disable-broccoli don't build or install the Broccoli library --disable-broctl don't install Broctl --disable-auxtools don't build or install auxiliary tools --disable-perftools don't try to build with Google Perftools --disable-python don't try to build python bindings for broccoli - --disable-ruby don't try to build ruby bindings for broccoli --disable-dataseries don't use the optional DataSeries log writer --disable-elasticsearch don't use the optional ElasticSearch log writer @@ -113,6 +113,7 @@ append_cache_entry INSTALL_BROCTL BOOL true append_cache_entry CPACK_SOURCE_IGNORE_FILES STRING append_cache_entry ENABLE_MOBILE_IPV6 BOOL false append_cache_entry DISABLE_PERFTOOLS BOOL false +append_cache_entry DISABLE_RUBY_BINDINGS BOOL true # parse arguments while [ $# -ne 0 ]; do @@ -174,8 +175,8 @@ while [ $# -ne 0 ]; do --disable-python) append_cache_entry DISABLE_PYTHON_BINDINGS BOOL true ;; - --disable-ruby) - append_cache_entry DISABLE_RUBY_BINDINGS BOOL true + --enable-ruby) + append_cache_entry DISABLE_RUBY_BINDINGS BOOL false ;; --disable-dataseries) append_cache_entry DISABLE_DATASERIES BOOL true From 22d35d2c8cb8da0ee897503e2f82f8efeccc84ae Mon Sep 17 00:00:00 2001 From: Jon Siwek Date: Tue, 29 Oct 2013 14:40:07 -0500 Subject: [PATCH 47/47] Fix thread processing/termination conditions. A thread that is done/killed should signify that the thread manager has some processing to do -- it needs to process any messages in its out queue, join the thread, and delete it. Otherwise the thread manager may reach a state where it makes no progress in processing the last remaining done/killed thread. --- src/threading/Manager.cc | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/src/threading/Manager.cc b/src/threading/Manager.cc index 1b6cb551e2..4491cd42b5 100644 --- a/src/threading/Manager.cc +++ b/src/threading/Manager.cc @@ -82,7 +82,10 @@ double Manager::NextTimestamp(double* network_time) { MsgThread* t = *i; - if ( (*i)->MightHaveOut() && ! t->Killed() ) + if ( t->MightHaveOut() || t->Killed() ) + // Even if the thread doesn't have output, it may be killed/done, + // which should also signify that processing is needed. The + // "processing" in that case is joining the thread and deleting it. return timer_mgr->Time(); } @@ -149,10 +152,8 @@ void Manager::Process() { BasicThread* t = *i; - if ( ! t->Killed() ) - continue; - - to_delete.push_back(t); + if ( t->Killed() ) + to_delete.push_back(t); } for ( all_thread_list::iterator i = to_delete.begin(); i != to_delete.end(); i++ )