mirror of
https://github.com/zeek/zeek.git
synced 2025-10-02 22:58:20 +00:00
Merge remote-tracking branch 'origin/master' into topic/bernhard/input-threads
Conflicts: src/CMakeLists.txt testing/btest/Baseline/coverage.bare-load-baseline/canonified_loaded_scripts.log testing/btest/Baseline/coverage.default-load-baseline/canonified_loaded_scripts.log
This commit is contained in:
commit
3b82d69eb3
167 changed files with 3528 additions and 1066 deletions
|
@ -105,5 +105,8 @@ event protocol_violation(c: connection, atype: count, aid: count,
|
|||
reason: string) &priority=-5
|
||||
{
|
||||
if ( c?$dpd )
|
||||
{
|
||||
Log::write(DPD::LOG, c$dpd);
|
||||
delete c$dpd;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
@load ./main
|
||||
@load ./postprocessors
|
||||
@load ./writers/ascii
|
||||
@load ./writers/dataseries
|
||||
|
|
|
@ -332,7 +332,7 @@ function __default_rotation_postprocessor(info: RotationInfo) : bool
|
|||
function default_path_func(id: ID, path: string, rec: any) : string
|
||||
{
|
||||
local id_str = fmt("%s", id);
|
||||
|
||||
|
||||
local parts = split1(id_str, /::/);
|
||||
if ( |parts| == 2 )
|
||||
{
|
||||
|
@ -340,7 +340,7 @@ function default_path_func(id: ID, path: string, rec: any) : string
|
|||
# or a filter path explicitly set by the user, so continue using it.
|
||||
if ( path != "" )
|
||||
return path;
|
||||
|
||||
|
||||
# Example: Notice::LOG -> "notice"
|
||||
if ( parts[2] == "LOG" )
|
||||
{
|
||||
|
@ -356,11 +356,11 @@ function default_path_func(id: ID, path: string, rec: any) : string
|
|||
output = cat(output, sub_bytes(module_parts[4],1,1), "_", sub_bytes(module_parts[4], 2, |module_parts[4]|));
|
||||
return to_lower(output);
|
||||
}
|
||||
|
||||
|
||||
# Example: Notice::POLICY_LOG -> "notice_policy"
|
||||
if ( /_LOG$/ in parts[2] )
|
||||
parts[2] = sub(parts[2], /_LOG$/, "");
|
||||
|
||||
|
||||
return cat(to_lower(parts[1]),"_",to_lower(parts[2]));
|
||||
}
|
||||
else
|
||||
|
@ -376,13 +376,16 @@ function run_rotation_postprocessor_cmd(info: RotationInfo, npath: string) : boo
|
|||
if ( pp_cmd == "" )
|
||||
return T;
|
||||
|
||||
# Turn, e.g., Log::WRITER_ASCII into "ascii".
|
||||
local writer = subst_string(to_lower(fmt("%s", info$writer)), "log::writer_", "");
|
||||
|
||||
# The date format is hard-coded here to provide a standardized
|
||||
# script interface.
|
||||
system(fmt("%s %s %s %s %s %d",
|
||||
system(fmt("%s %s %s %s %s %d %s",
|
||||
pp_cmd, npath, info$path,
|
||||
strftime("%y-%m-%d_%H.%M.%S", info$open),
|
||||
strftime("%y-%m-%d_%H.%M.%S", info$close),
|
||||
info$terminating));
|
||||
info$terminating, writer));
|
||||
|
||||
return T;
|
||||
}
|
||||
|
@ -407,7 +410,7 @@ function add_filter(id: ID, filter: Filter) : bool
|
|||
# definition.
|
||||
if ( ! filter?$path_func )
|
||||
filter$path_func = default_path_func;
|
||||
|
||||
|
||||
filters[id, filter$name] = filter;
|
||||
return __add_filter(id, filter);
|
||||
}
|
||||
|
|
60
scripts/base/frameworks/logging/writers/dataseries.bro
Normal file
60
scripts/base/frameworks/logging/writers/dataseries.bro
Normal file
|
@ -0,0 +1,60 @@
|
|||
##! Interface for the DataSeries log writer.
|
||||
|
||||
module LogDataSeries;
|
||||
|
||||
export {
|
||||
## Compression to use with the DS output file. Options are:
|
||||
##
|
||||
## 'none' -- No compression.
|
||||
## 'lzf' -- LZF compression. Very quick, but leads to larger output files.
|
||||
## 'lzo' -- LZO compression. Very fast decompression times.
|
||||
## 'gz' -- GZIP compression. Slower than LZF, but also produces smaller output.
|
||||
## 'bz2' -- BZIP2 compression. Slower than GZIP, but also produces smaller output.
|
||||
const compression = "lzo" &redef;
|
||||
|
||||
## The extent buffer size.
|
||||
## Larger values here lead to better compression and more efficient writes, but
|
||||
## also increase the lag between the time events are received and the time they
|
||||
## are actually written to disk.
|
||||
const extent_size = 65536 &redef;
|
||||
|
||||
## Should we dump the XML schema we use for this DS file to disk?
|
||||
## If yes, the XML schema shares the name of the logfile, but has
|
||||
## an XML ending.
|
||||
const dump_schema = F &redef;
|
||||
|
||||
## How many threads should DataSeries spawn to perform compression?
|
||||
## Note that this dictates the number of threads per log stream. If
|
||||
## you're using a lot of streams, you may want to keep this number
|
||||
## relatively small.
|
||||
##
|
||||
## Default value is 1, which will spawn one thread / stream.
|
||||
##
|
||||
## Maximum is 128, minimum is 1.
|
||||
const num_threads = 1 &redef;
|
||||
|
||||
## Should time be stored as an integer or a double?
|
||||
## Storing time as a double leads to possible precision issues and
|
||||
## can (significantly) increase the size of the resulting DS log.
|
||||
## That said, timestamps stored in double form are consistent
|
||||
## with the rest of Bro, including the standard ASCII log. Hence, we
|
||||
## use them by default.
|
||||
const use_integer_for_time = F &redef;
|
||||
}
|
||||
|
||||
# Default function to postprocess a rotated DataSeries log file. It moves the
|
||||
# rotated file to a new name that includes a timestamp with the opening time, and
|
||||
# then runs the writer's default postprocessor command on it.
|
||||
function default_rotation_postprocessor_func(info: Log::RotationInfo) : bool
|
||||
{
|
||||
# Move file to name including both opening and closing time.
|
||||
local dst = fmt("%s.%s.ds", info$path,
|
||||
strftime(Log::default_rotation_date_format, info$open));
|
||||
|
||||
system(fmt("/bin/mv %s %s", info$fname, dst));
|
||||
|
||||
# Run default postprocessor.
|
||||
return Log::run_rotation_postprocessor_cmd(info, dst);
|
||||
}
|
||||
|
||||
redef Log::default_rotation_postprocessors += { [Log::WRITER_DATASERIES] = default_rotation_postprocessor_func };
|
|
@ -23,7 +23,10 @@ redef Cluster::worker2manager_events += /Notice::cluster_notice/;
|
|||
@if ( Cluster::local_node_type() != Cluster::MANAGER )
|
||||
# The notice policy is completely handled by the manager and shouldn't be
|
||||
# done by workers or proxies to save time for packet processing.
|
||||
redef policy = {};
|
||||
event bro_init() &priority=-11
|
||||
{
|
||||
Notice::policy = table();
|
||||
}
|
||||
|
||||
event Notice::begin_suppression(n: Notice::Info)
|
||||
{
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue