mirror of
https://github.com/zeek/zeek.git
synced 2025-10-06 00:28:21 +00:00
Merge remote-tracking branch 'origin/master' into topic/johanna/ssl-resumption
This commit is contained in:
commit
ba3b35a612
327 changed files with 8457 additions and 5926 deletions
|
@ -9,3 +9,8 @@ signature file-tar {
|
|||
file-magic /([[:print:]\x00]){100}(([[:digit:]\x00\x20]){8}){3}/
|
||||
file-mime "application/x-tar", 150
|
||||
}
|
||||
|
||||
signature file-swf {
|
||||
file-magic /(F|C|Z)WS/
|
||||
file-mime "application/x-shockwave-flash", 60
|
||||
}
|
|
@ -2769,19 +2769,6 @@ signature file-magic-auto408 {
|
|||
file-magic /(.{512})(\xec\xa5\xc1)/
|
||||
}
|
||||
|
||||
# >0 string,=FWS (len=3), ["Macromedia Flash data,"], swap_endian=0
|
||||
# >>3 byte&,x, ["version %d"], swap_endian=0
|
||||
signature file-magic-auto409 {
|
||||
file-mime "application/x-shockwave-flash", 1
|
||||
file-magic /(FWS)(.{1})/
|
||||
}
|
||||
|
||||
# >0 string,=CWS (len=3), ["Macromedia Flash data (compressed),"], swap_endian=0
|
||||
signature file-magic-auto410 {
|
||||
file-mime "application/x-shockwave-flash", 60
|
||||
file-magic /(CWS)/
|
||||
}
|
||||
|
||||
# >0 regex/20,=^\.[A-Za-z0-9][A-Za-z0-9][ \t] (len=29), ["troff or preprocessor input text"], swap_endian=0
|
||||
signature file-magic-auto411 {
|
||||
file-mime "text/troff", 59
|
||||
|
|
|
@ -264,10 +264,10 @@ export {
|
|||
## Returns: The set of MIME types.
|
||||
global registered_mime_types: function(tag: Analyzer::Tag) : set[string];
|
||||
|
||||
## Returns a table of all MIME-type-to-analyzer mappings currently registered.
|
||||
## Returns a table of all MIME-type-to-analyzer mappings currently registered.
|
||||
##
|
||||
## Returns: A table mapping each analyzer to the set of MIME types registered for
|
||||
## it.
|
||||
## Returns: A table mapping each analyzer to the set of MIME types
|
||||
## registered for it.
|
||||
global all_registered_mime_types: function() : table[Analyzer::Tag] of set[string];
|
||||
|
||||
## Event that can be handled to access the Info record as it is sent on
|
||||
|
|
|
@ -4,6 +4,17 @@
|
|||
module Input;
|
||||
|
||||
export {
|
||||
type Event: enum {
|
||||
EVENT_NEW = 0,
|
||||
EVENT_CHANGED = 1,
|
||||
EVENT_REMOVED = 2,
|
||||
};
|
||||
|
||||
type Mode: enum {
|
||||
MANUAL = 0,
|
||||
REREAD = 1,
|
||||
STREAM = 2
|
||||
};
|
||||
|
||||
## The default input reader used. Defaults to `READER_ASCII`.
|
||||
const default_reader = READER_ASCII &redef;
|
||||
|
|
|
@ -81,6 +81,9 @@ export {
|
|||
## Where the data was discovered.
|
||||
where: Where &log;
|
||||
|
||||
## The name of the node where the match was discovered.
|
||||
node: string &optional &log;
|
||||
|
||||
## If the data was discovered within a connection, the
|
||||
## connection record should go here to give context to the data.
|
||||
conn: connection &optional;
|
||||
|
@ -240,6 +243,11 @@ function Intel::seen(s: Seen)
|
|||
s$indicator_type = Intel::ADDR;
|
||||
}
|
||||
|
||||
if ( ! s?$node )
|
||||
{
|
||||
s$node = peer_description;
|
||||
}
|
||||
|
||||
if ( have_full_data )
|
||||
{
|
||||
local items = get_items(s);
|
||||
|
|
|
@ -1,7 +1,5 @@
|
|||
@load ./main
|
||||
@load ./postprocessors
|
||||
@load ./writers/ascii
|
||||
@load ./writers/dataseries
|
||||
@load ./writers/sqlite
|
||||
@load ./writers/elasticsearch
|
||||
@load ./writers/none
|
||||
|
|
|
@ -5,9 +5,15 @@
|
|||
|
||||
module Log;
|
||||
|
||||
# Log::ID and Log::Writer are defined in types.bif due to circular dependencies.
|
||||
|
||||
export {
|
||||
## Type that defines an ID unique to each log stream. Scripts creating new log
|
||||
## streams need to redef this enum to add their own specific log ID. The log ID
|
||||
## implicitly determines the default name of the generated log file.
|
||||
type Log::ID: enum {
|
||||
## Dummy place-holder.
|
||||
UNKNOWN
|
||||
};
|
||||
|
||||
## If true, local logging is by default enabled for all filters.
|
||||
const enable_local_logging = T &redef;
|
||||
|
||||
|
@ -27,13 +33,13 @@ export {
|
|||
const set_separator = "," &redef;
|
||||
|
||||
## String to use for empty fields. This should be different from
|
||||
## *unset_field* to make the output unambiguous.
|
||||
## *unset_field* to make the output unambiguous.
|
||||
## Can be overwritten by individual writers.
|
||||
const empty_field = "(empty)" &redef;
|
||||
|
||||
## String to use for an unset &optional field.
|
||||
## Can be overwritten by individual writers.
|
||||
const unset_field = "-" &redef;
|
||||
const unset_field = "-" &redef;
|
||||
|
||||
## Type defining the content of a logging stream.
|
||||
type Stream: record {
|
||||
|
|
|
@ -1,60 +0,0 @@
|
|||
##! Interface for the DataSeries log writer.
|
||||
|
||||
module LogDataSeries;
|
||||
|
||||
export {
|
||||
## Compression to use with the DS output file. Options are:
|
||||
##
|
||||
## 'none' -- No compression.
|
||||
## 'lzf' -- LZF compression (very quick, but leads to larger output files).
|
||||
## 'lzo' -- LZO compression (very fast decompression times).
|
||||
## 'zlib' -- GZIP compression (slower than LZF, but also produces smaller output).
|
||||
## 'bz2' -- BZIP2 compression (slower than GZIP, but also produces smaller output).
|
||||
const compression = "zlib" &redef;
|
||||
|
||||
## The extent buffer size.
|
||||
## Larger values here lead to better compression and more efficient writes,
|
||||
## but also increase the lag between the time events are received and
|
||||
## the time they are actually written to disk.
|
||||
const extent_size = 65536 &redef;
|
||||
|
||||
## Should we dump the XML schema we use for this DS file to disk?
|
||||
## If yes, the XML schema shares the name of the logfile, but has
|
||||
## an XML ending.
|
||||
const dump_schema = F &redef;
|
||||
|
||||
## How many threads should DataSeries spawn to perform compression?
|
||||
## Note that this dictates the number of threads per log stream. If
|
||||
## you're using a lot of streams, you may want to keep this number
|
||||
## relatively small.
|
||||
##
|
||||
## Default value is 1, which will spawn one thread / stream.
|
||||
##
|
||||
## Maximum is 128, minimum is 1.
|
||||
const num_threads = 1 &redef;
|
||||
|
||||
## Should time be stored as an integer or a double?
|
||||
## Storing time as a double leads to possible precision issues and
|
||||
## can (significantly) increase the size of the resulting DS log.
|
||||
## That said, timestamps stored in double form are consistent
|
||||
## with the rest of Bro, including the standard ASCII log. Hence, we
|
||||
## use them by default.
|
||||
const use_integer_for_time = F &redef;
|
||||
}
|
||||
|
||||
# Default function to postprocess a rotated DataSeries log file. It moves the
|
||||
# rotated file to a new name that includes a timestamp with the opening time,
|
||||
# and then runs the writer's default postprocessor command on it.
|
||||
function default_rotation_postprocessor_func(info: Log::RotationInfo) : bool
|
||||
{
|
||||
# Move file to name including both opening and closing time.
|
||||
local dst = fmt("%s.%s.ds", info$path,
|
||||
strftime(Log::default_rotation_date_format, info$open));
|
||||
|
||||
system(fmt("/bin/mv %s %s", info$fname, dst));
|
||||
|
||||
# Run default postprocessor.
|
||||
return Log::run_rotation_postprocessor_cmd(info, dst);
|
||||
}
|
||||
|
||||
redef Log::default_rotation_postprocessors += { [Log::WRITER_DATASERIES] = default_rotation_postprocessor_func };
|
|
@ -1,48 +0,0 @@
|
|||
##! Log writer for sending logs to an ElasticSearch server.
|
||||
##!
|
||||
##! Note: This module is in testing and is not yet considered stable!
|
||||
##!
|
||||
##! There is one known memory issue. If your elasticsearch server is
|
||||
##! running slowly and taking too long to return from bulk insert
|
||||
##! requests, the message queue to the writer thread will continue
|
||||
##! growing larger and larger giving the appearance of a memory leak.
|
||||
|
||||
module LogElasticSearch;
|
||||
|
||||
export {
|
||||
## Name of the ES cluster.
|
||||
const cluster_name = "elasticsearch" &redef;
|
||||
|
||||
## ES server.
|
||||
const server_host = "127.0.0.1" &redef;
|
||||
|
||||
## ES port.
|
||||
const server_port = 9200 &redef;
|
||||
|
||||
## Name of the ES index.
|
||||
const index_prefix = "bro" &redef;
|
||||
|
||||
## The ES type prefix comes before the name of the related log.
|
||||
## e.g. prefix = "bro\_" would create types of bro_dns, bro_software, etc.
|
||||
const type_prefix = "" &redef;
|
||||
|
||||
## The time before an ElasticSearch transfer will timeout. Note that
|
||||
## the fractional part of the timeout will be ignored. In particular,
|
||||
## time specifications less than a second result in a timeout value of
|
||||
## 0, which means "no timeout."
|
||||
const transfer_timeout = 2secs;
|
||||
|
||||
## The batch size is the number of messages that will be queued up before
|
||||
## they are sent to be bulk indexed.
|
||||
const max_batch_size = 1000 &redef;
|
||||
|
||||
## The maximum amount of wall-clock time that is allowed to pass without
|
||||
## finishing a bulk log send. This represents the maximum delay you
|
||||
## would like to have with your logs before they are sent to ElasticSearch.
|
||||
const max_batch_interval = 1min &redef;
|
||||
|
||||
## The maximum byte size for a buffered JSON string to send to the bulk
|
||||
## insert API.
|
||||
const max_byte_size = 1024 * 1024 &redef;
|
||||
}
|
||||
|
|
@ -2485,8 +2485,7 @@ type http_message_stat: record {
|
|||
header_length: count;
|
||||
};
|
||||
|
||||
## Maximum number of HTTP entity data delivered to events. The amount of data
|
||||
## can be limited for better performance, zero disables truncation.
|
||||
## Maximum number of HTTP entity data delivered to events.
|
||||
##
|
||||
## .. bro:see:: http_entity_data skip_http_entity_data skip_http_data
|
||||
global http_entity_data_delivery_size = 1500 &redef;
|
||||
|
@ -3364,9 +3363,6 @@ const global_hash_seed: string = "" &redef;
|
|||
## The maximum is currently 128 bits.
|
||||
const bits_per_uid: count = 96 &redef;
|
||||
|
||||
# Load BiFs defined by plugins.
|
||||
@load base/bif/plugins
|
||||
|
||||
# Load these frameworks here because they use fairly deep integration with
|
||||
# BiFs and script-land defined types.
|
||||
@load base/frameworks/logging
|
||||
|
@ -3375,3 +3371,7 @@ const bits_per_uid: count = 96 &redef;
|
|||
@load base/frameworks/files
|
||||
|
||||
@load base/bif
|
||||
|
||||
# Load BiFs defined by plugins.
|
||||
@load base/bif/plugins
|
||||
|
||||
|
|
|
@ -30,6 +30,7 @@ export {
|
|||
const HELLO_REQUEST = 0;
|
||||
const CLIENT_HELLO = 1;
|
||||
const SERVER_HELLO = 2;
|
||||
const HELLO_VERIFY_REQUEST = 3; # RFC 6347
|
||||
const SESSION_TICKET = 4; # RFC 5077
|
||||
const CERTIFICATE = 11;
|
||||
const SERVER_KEY_EXCHANGE = 12;
|
||||
|
@ -40,6 +41,7 @@ export {
|
|||
const FINISHED = 20;
|
||||
const CERTIFICATE_URL = 21; # RFC 3546
|
||||
const CERTIFICATE_STATUS = 22; # RFC 3546
|
||||
const SUPPLEMENTAL_DATA = 23; # RFC 4680
|
||||
|
||||
## Mapping between numeric codes and human readable strings for alert
|
||||
## levels.
|
||||
|
@ -112,7 +114,8 @@ export {
|
|||
[19] = "client_certificate_type",
|
||||
[20] = "server_certificate_type",
|
||||
[21] = "padding", # temporary till 2015-03-12
|
||||
[22] = "encrypt_then_mac", # temporary till 2015-06-05
|
||||
[22] = "encrypt_then_mac",
|
||||
[23] = "extended_master_secret", # temporary till 2015-09-26
|
||||
[35] = "SessionTicket TLS",
|
||||
[40] = "extended_random",
|
||||
[13172] = "next_protocol_negotiation",
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue