diff --git a/doc/logging-dataseries.rst b/doc/logging-dataseries.rst index 6eef223a90..67f95ecf3b 100644 --- a/doc/logging-dataseries.rst +++ b/doc/logging-dataseries.rst @@ -24,8 +24,8 @@ distributed on `HP Labs' web site to use recent developments of both packages with Bro, which you can download from github like this:: - git clone http://github.com/eric-anderson/Lintel - git clone http://github.com/eric-anderson/DataSeries + git clone http://github.com/dataseries/Lintel + git clone http://github.com/dataseries/DataSeries To then build and install the two into ````, do:: @@ -109,8 +109,13 @@ TODO Warning, while packing field not_valid_after of record 11, error was > 10%: (1346460000 / 1000000 = 1346.46, round() = 1346) + See Eric's mail. + * For testing our script-level options: - Can we get the extentsize from a ``.ds`` file? - Can we get the compressio level from a ``.ds`` file? + See Eric's mail. + +* Do we have a leak? diff --git a/src/logging/writers/DataSeries.cc b/src/logging/writers/DataSeries.cc index aacef01f80..a3d193be97 100644 --- a/src/logging/writers/DataSeries.cc +++ b/src/logging/writers/DataSeries.cc @@ -329,13 +329,7 @@ bool DataSeries::DoInit(string path, int num_fields, const threading::Field* con else Warning(Fmt("%s is not a valid compression type. Valid types are: 'lzf', 'lzo', 'gz', 'bz2', 'none', 'any'. Defaulting to 'any'", ds_compression.c_str())); - const ExtentType& type = log_types.registerTypeR(schema); - - // Note: This is a bit dicey as it depends on the implementation of - // registerTypeR(), but its what the DataSeries guys recommended - // given that we function we originally used has been deprecated. - log_type = &type; - + log_type = log_types.registerTypePtr(schema); log_series.setType(*log_type); return OpenLog(path); diff --git a/src/logging/writers/DataSeries.h b/src/logging/writers/DataSeries.h index ab2bcec88c..0d9ab67e95 100644 --- a/src/logging/writers/DataSeries.h +++ b/src/logging/writers/DataSeries.h @@ -100,7 +100,7 @@ private: // Internal DataSeries structures we need to keep track of. vector schema_list; ExtentTypeLibrary log_types; - const ExtentType *log_type; + ExtentType::Ptr log_type; ExtentSeries log_series; ExtentMap extents; int compress_type; diff --git a/testing/btest/core/leaks/dataseries-rotate.bro b/testing/btest/core/leaks/dataseries-rotate.bro new file mode 100644 index 0000000000..188de9717b --- /dev/null +++ b/testing/btest/core/leaks/dataseries-rotate.bro @@ -0,0 +1,34 @@ +# +# @TEST-REQUIRES: has-writer DataSeries && which ds2txt +# @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks +# +# @TEST-GROUP: leaks +# +# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local bro -m -b -r %DIR/../rotation.trace %INPUT Log::default_writer=Log::WRITER_DATASERIES + +module Test; + +export { + # Create a new ID for our log stream + redef enum Log::ID += { LOG }; + + # Define a record with all the columns the log file can have. + # (I'm using a subset of fields from ssh-ext for demonstration.) + type Log: record { + t: time; + id: conn_id; # Will be rolled out into individual columns. + } &log; +} + +redef Log::default_rotation_interval = 1hr; +redef Log::default_rotation_postprocessor_cmd = "echo"; + +event bro_init() +{ + Log::create_stream(Test::LOG, [$columns=Log]); +} + +event new_connection(c: connection) + { + Log::write(Test::LOG, [$t=network_time(), $id=c$id]); + } diff --git a/testing/btest/core/leaks/dataseries.bro b/testing/btest/core/leaks/dataseries.bro new file mode 100644 index 0000000000..886ee54dd9 --- /dev/null +++ b/testing/btest/core/leaks/dataseries.bro @@ -0,0 +1,9 @@ +# Needs perftools support. +# +# @TEST-REQUIRES: has-writer DataSeries && which ds2txt +# @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks +# +# @TEST-GROUP: leaks +# +# @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks +# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local bro -m -r $TRACES/wikipedia.trace Log::default_writer=Log::WRITER_DATASERIES