Working on the logging API exposed to scripts.

- Moving all functions into the Log::* namespace, using the recent
  bifcl updates. Moved logging-specific stuff to logging.bif.

- Log::create_stream() now takes a record Log::Stream as its second
  argument, which specifies columns and (optionally) the event.

- All the internal BiFs are now called "Log::__<something>", with
  script-level wrappers "Log::<something>". That first allows to add
  additional code at the script-level, and second makes things better
  comprehendible as now all relevant functionality is collected (and
  later documetned) in policy/logging.bro.

- New function Log::flush(id), which does the obvious assuming the
  writer supports it.

- add_default_filter() is now called implicitly with every
  create_stream(). Seems that we usually want that functionality, and
  when not, remove_default_filter() gets rid of it.

- The namespace of a stream's ID is now used as the default "path"
  (e.g., if the namespace is SSH, the default log file is "ssh.log").

- Updated policy/test-logging.bro as well as the btest tests according
  to these changes.
This commit is contained in:
Robin Sommer 2011-02-27 15:05:47 -08:00
parent e3b2748fa6
commit ab15437339
26 changed files with 337 additions and 451 deletions

View file

@ -4,15 +4,6 @@ List of the things not implemented yet:
- Rotation support.
- Not sure if the logging does the right thing with &optional and
&default values. Needs testing.
- Put script function/constants into namespace.
- Some typchecks are still missing.
- Cleanup code.
- Spawning writers in separate threads (not clear if we want that initially).
- Check the new event-value code.
Questions:
* Is giving the record column twice to create_stream() too
redundant? (once directly, and once via the event)::
global ssh_log: event(rec: Log);
log_create_stream(LOG_SSH, SSH::Log, ssh_log);

View file

@ -273,53 +273,7 @@ type entropy_test_result: record {
serial_correlation: double;
};
type Log_Writer: enum { # TODO: Move these into bif and use from C++ as well.
WRITER_DEFAULT, # See default_writer below.
WRITER_ASCII,
};
# Each stream gets a unique ID. This type will be extended by
# other scripts.
type Log_ID: enum {
Unknown
};
# The default writer to use if a filter does not specify
# anything else.
const Log_default_writer = WRITER_ASCII &redef;
# A filter defining what to log.
type log_filter: record {
# A name to reference this filter.
name: string;
# A predicate returning True if the filter wants a log entry
# to be recorded. If not given, an implicit True is assumed
# for all entries. The predicate receives one parameter:
# an instance of the log's record type with the fields to be
# logged.
pred: function(rec: any): bool &optional;
# A path for outputting everything matching this
# filter. The path is either a string, or a function
# called with a single ``ID`` argument and returning a string.
#
# The specific interpretation of the string is left to the
# Writer, but if it's refering to a file, it's assumed that no
# extension is given; the writer will add whatever is
# appropiate.
path: string &optional;
path_func: function(id: Log_ID, path: string): string &optional;
# A subset of column names to record. If not given, all
# columns are recorded.
include: set[string] &optional;
exclude: set[string] &optional;
# The writer to use.
writer: Log_Writer &default=Log_default_writer;
};
@load logging # sic! Not logging.bif.
# Prototypes of Bro built-in functions.
@load strings.bif.bro
@ -1482,4 +1436,3 @@ const dump_selected_source_packets = F &redef;
# NOTE: DO NOT SET THIS TO TRUE WHEN ANONYMIZING A TRACE!
# (TODO: this variable should be disabled when using '-A' option)
const dump_original_packets_if_not_rewriting = F &redef;

View file

@ -1,10 +1,110 @@
module Log;
function Log_add_default_filter(id: Log_ID)
# Log::ID and Log::Writer are defined in bro.init due to circular dependencies.
export {
# A stream defining the logging.
type Stream: record {
# A record type defining the log's columns.
columns: any;
# A optional event raised for each log entry. It must receive
# a single argument of type $columns.
ev: any &optional;
};
# A filter customizes logging.
type Filter: record {
# A name to reference this filter.
name: string;
# A predicate returning True if the filter wants a log entry
# to be recorded. If not given, an implicit True is assumed
# for all entries. The predicate receives one parameter:
# an instance of the log's record type with the fields to be
# logged.
pred: function(rec: any): bool &optional;
# A path for outputting everything matching this
# filter. The path is either a string, or a function
# called with a single ``ID`` argument and returning a string.
#
# The specific interpretation of the string is left to the
# Writer, but if it's refering to a file, it's assumed that no
# extension is given; the writer will add whatever is
# appropiate.
path: string &optional;
path_func: function(id: ID, path: string): string &optional;
# A subset of column names to record. If not given, all
# columns are recorded.
include: set[string] &optional;
exclude: set[string] &optional;
# The writer to use.
writer: Writer &optional;
};
global create_stream: function(id: Log::ID, stream: Log::Stream) : bool;
global add_filter: function(id: Log::ID, filter: Log::Filter) : bool;
global remove_filter: function(id: Log::ID, name: string) : bool;
global write: function(id: Log::ID, columns: any) : bool;
global set_buf: function(id: Log::ID, buffered: bool): bool;
global flush: function(id: Log::ID): bool;
global add_default_filter: function(id: ID) : bool;
global remove_default_filter: function(id: ID) : bool;
}
@load logging.bif # Needs Log::Filter and Log::Stream defined.
module Log;
export {
# The default writer to use if a filter does not specify
# anything else.
const default_writer = Log::WRITER_ASCII &redef;
}
function create_stream(id: Log::ID, stream: Log::Stream) : bool
{
log_add_filter(id, [$name="default"]);
if ( ! Log::__create_stream(id, stream) )
return F;
return add_default_filter(id);
}
function Log_remove_default_filter(id: Log_ID): bool
function add_filter(id: Log::ID, filter: Log::Filter) : bool
{
log_remove_filter(id, "default");
return Log::__add_filter(id, filter);
}
function remove_filter(id: Log::ID, name: string) : bool
{
return Log::__remove_filter(id, name);
}
function write(id: Log::ID, columns: any) : bool
{
return Log::__write(id, columns);
}
function set_buf(id: Log::ID, buffered: bool): bool
{
return Log::__set_buf(id, buffered);
}
function flush(id: Log::ID): bool
{
return Log::__flush(id);
}
function add_default_filter(id: ID) : bool
{
return add_filter(id, [$name="default"]);
}
function remove_default_filter(id: ID) : bool
{
return remove_filter(id, "default");
}

View file

@ -1,10 +1,8 @@
module SSH;
@load logging
export {
# Create a new ID for our log stream
redef enum Log_ID += { LOG_SSH };
redef enum Log::ID += { SSH };
# Define a record with all the columns the log file can have.
# (I'm using a subset of fields from ssh-ext for demonstration.)
@ -16,7 +14,7 @@ export {
};
}
global ssh_log: event(rec: Log);
global log_ssh: event(rec: Log);
function fail(rec: Log): bool
{
@ -27,47 +25,26 @@ event bro_init()
{
# Create the stream.
# First argument is the ID for the stream.
# Second argument is the log record type.
# Third argument is the log event, which must receive a single argument of type arg2.
log_create_stream(LOG_SSH, SSH::Log, ssh_log);
# Second argument is a record of type Log::Stream.
Log::create_stream(SSH, [$columns=Log, $ev=log_ssh]);
# Add a default filter that simply logs everything to "ssh.log" using the default writer.
Log_add_default_filter(LOG_SSH);
log_add_filter(LOG_SSH, [$name="f1", $path="ssh.success", $pred=function(rec: Log): bool { return rec$status == "success"; }]);
log_add_filter(LOG_SSH, [$name="f2", $path="ssh.failure", $pred=fail]);
# Printing headers for the filters doesn't work yet either and needs to
# be considered in the final design. (based on the "select" set).
#Log::add_filter("ssh", [$name="successful logins",
# #$pred(rec: Log) = { print rec$status; return T; },
# $path="ssh-logins",
# #$select=set("t"),
# $writer=Log::WRITER_CSV]);
Log::add_filter(SSH, [$name="f1", $path="ssh.success", $pred=function(rec: Log): bool { return rec$status == "success"; }]);
Log::add_filter(SSH, [$name="f2", $path="ssh.failure", $pred=fail]);
local cid = [$orig_h=1.2.3.4, $orig_p=1234/tcp, $resp_h=2.3.4.5, $resp_p=80/tcp];
local r: Log = [$t=network_time(), $id=cid, $status="success"];
# Log something.
log_write(LOG_SSH, r);
log_write(LOG_SSH, [$t=network_time(), $id=cid, $status="failure", $country="US"]);
log_write(LOG_SSH, [$t=network_time(), $id=cid, $status="failure", $country="UK"]);
log_write(LOG_SSH, [$t=network_time(), $id=cid, $status="success", $country="BR"]);
log_write(LOG_SSH, [$t=network_time(), $id=cid, $status="failure", $country="MX"]);
Log::write(SSH, r);
Log::write(SSH, [$t=network_time(), $id=cid, $status="failure", $country="US"]);
Log::write(SSH, [$t=network_time(), $id=cid, $status="failure", $country="UK"]);
Log::write(SSH, [$t=network_time(), $id=cid, $status="success", $country="BR"]);
Log::write(SSH, [$t=network_time(), $id=cid, $status="failure", $country="MX"]);
}
event ssh_log(rec: Log)
event log_ssh(rec: Log)
{
print fmt("Ran the log handler from the same module. Extracting time: %0.6f", rec$t);
print rec;
}
#
#
#module WHATEVER;
#
#event SSH::log(rec: SSH::Log)
# {
# print fmt("Ran the SSH::log handler from a different module. Extracting time: %0.6f", rec$t);
# }

View file

@ -127,6 +127,7 @@ endmacro(GET_BIF_OUTPUT_FILES)
set(BIF_SRCS
bro.bif
logging.bif
event.bif
const.bif
types.bif

View file

@ -508,6 +508,7 @@ void builtin_run_time(const char* msg, BroObj* arg)
}
#include "bro.bif.func_h"
#include "logging.bif.func_h"
#include "common-rw.bif.func_h"
#include "finger-rw.bif.func_h"
@ -519,6 +520,7 @@ void builtin_run_time(const char* msg, BroObj* arg)
#include "dns-rw.bif.func_h"
#include "bro.bif.func_def"
#include "logging.bif.func_def"
#include "strings.bif.func_def"
void init_builtin_funcs()
@ -530,6 +532,7 @@ void init_builtin_funcs()
gap_info = internal_type("gap_info")->AsRecordType();
#include "bro.bif.func_init"
#include "logging.bif.func_init"
#include "common-rw.bif.func_init"
#include "finger-rw.bif.func_init"

View file

@ -76,9 +76,9 @@ LogMgr::~LogMgr()
delete *s;
}
LogMgr::Stream* LogMgr::FindStream(EnumVal* stream_id)
LogMgr::Stream* LogMgr::FindStream(EnumVal* id)
{
unsigned int idx = stream_id->AsEnum();
unsigned int idx = id->AsEnum();
if ( idx >= streams.size() || ! streams[idx] )
{
@ -111,14 +111,52 @@ void LogMgr::RemoveDisabledWriters(Stream* stream)
}
}
bool LogMgr::CreateStream(EnumVal* stream_id, RecordType* columns, EventHandlerPtr handler)
bool LogMgr::CreateStream(EnumVal* id, RecordVal* sval)
{
// TODO: Should check that the record has only supported types.
RecordType* rtype = sval->Type()->AsRecordType();
unsigned int idx = stream_id->AsEnum();
if ( ! same_type(rtype, BifType::Record::Log::Stream, 0) )
{
run_time("sval argument not of right type");
return false;
}
RecordType* columns = sval->Lookup(rtype->FieldOffset("columns"))->AsType()->AsTypeType()->Type()->AsRecordType();
Val* event_val = sval->Lookup(rtype->FieldOffset("ev"));
Func* event = event_val ? event_val->AsFunc() : 0;
if ( event )
{
// Make sure the event prototyped as expected.
FuncType* etype = event->FType()->AsFuncType();
if ( ! etype->IsEvent() )
{
run_time("stream event is a function, not an event");
return false;
}
const type_list* args = etype->ArgTypes()->Types();
if ( args->length() != 1 )
{
run_time("stream event must take a single argument");
return false;
}
if ( ! same_type((*args)[0], columns) )
{
run_time("stream event's argument type does not match column record type");
return new Val(0, TYPE_BOOL);
}
}
// Make sure the vector has an entries for all streams up to the one
// given.
unsigned int idx = id->AsEnum();
while ( idx >= streams.size() )
streams.push_back(0);
@ -126,14 +164,14 @@ bool LogMgr::CreateStream(EnumVal* stream_id, RecordType* columns, EventHandlerP
// We already know this one, delete the previous definition.
delete streams[idx];
// Create new stream and record the type for the columns.
// Create new stream.
streams[idx] = new Stream;
streams[idx]->name = stream_id->Type()->AsEnumType()->Lookup(idx);
streams[idx]->name = id->Type()->AsEnumType()->Lookup(idx);
streams[idx]->event = event ? event_registry->Lookup(event->GetID()->Name()) : 0;
streams[idx]->columns = columns;
streams[idx]->event = handler;
columns->Ref();
DBG_LOG(DBG_LOGGING, "Created new logging stream '%s', raising event %s", streams[idx]->name.c_str(), streams[idx]->event->Name());
DBG_LOG(DBG_LOGGING, "Created new logging stream '%s', raising event %s", streams[idx]->name.c_str(), event ? streams[idx]->event->Name() : "<none>");
return true;
}
@ -207,17 +245,17 @@ bool LogMgr::TraverseRecord(Filter* filter, RecordType* rt, TableVal* include, T
return true;
}
bool LogMgr::AddFilter(EnumVal* stream_id, RecordVal* fval)
bool LogMgr::AddFilter(EnumVal* id, RecordVal* fval)
{
RecordType* rtype = fval->Type()->AsRecordType();
if ( ! same_type(rtype, log_filter, 0) )
if ( ! same_type(rtype, BifType::Record::Log::Filter, 0) )
{
run_time("filter argument not of right type");
return false;
}
Stream* stream = FindStream(stream_id);
Stream* stream = FindStream(id);
if ( ! stream )
return false;
@ -227,17 +265,9 @@ bool LogMgr::AddFilter(EnumVal* stream_id, RecordVal* fval)
Val* writer_val = fval->Lookup(idx);
if ( ! writer_val )
{
// Use default.
// FIXME: Shouldn't Lookup() already take care if this?
const Attr* def_attr = log_filter->FieldDecl(idx)->FindAttr(ATTR_DEFAULT);
if ( ! def_attr )
internal_error("log_filter missing &default for writer attribute");
writer = BifConst::Log::default_writer->AsEnum();
writer_val = def_attr->AttrExpr()->Eval(0);
writer = writer_val->AsEnum();
Unref(writer_val);
}
else
writer = writer_val->AsEnum();
@ -269,7 +299,6 @@ bool LogMgr::AddFilter(EnumVal* stream_id, RecordVal* fval)
// Create a new Filter instance.
Val* event = fval->Lookup(rtype->FieldOffset("ev"));
Val* pred = fval->Lookup(rtype->FieldOffset("pred"));
Val* path_func = fval->Lookup(rtype->FieldOffset("path_func"));
@ -302,27 +331,19 @@ bool LogMgr::AddFilter(EnumVal* stream_id, RecordVal* fval)
else
{
// If no path is given, use the Stream ID as the default.
const char* n = stream->name.c_str();
char* lower = new char[strlen(n) + 1];
for ( char* s = lower; *n; ++n, ++s )
{
if ( strncmp(n, "::", 2) == 0 )
{
// Remove the scope operator. TODO: We need ab better way to
// generate the default here, but let's wait until we have
// everything in the right namespace.
*s = '_';
++n;
}
// If no path is given, use the Stream ID's namespace as the default
// if it has one, and it ID itself otherwise.
const char* s = stream->name.c_str();
const char* e = strstr(s, "::");
else
*s = tolower(*n);
}
if ( ! e )
e = s + strlen(s);
filter->path = string(lower);
filter->path_val = new StringVal(lower);
free(lower);
string path(s, e);
std::transform(path.begin(), path.end(), path.begin(), ::tolower);
filter->path = path;
filter->path_val = new StringVal(path.c_str());
}
stream->filters.push_back(filter);
@ -344,9 +365,9 @@ bool LogMgr::AddFilter(EnumVal* stream_id, RecordVal* fval)
return true;
}
bool LogMgr::RemoveFilter(EnumVal* stream_id, StringVal* filter)
bool LogMgr::RemoveFilter(EnumVal* id, StringVal* filter)
{
Stream* stream = FindStream(stream_id);
Stream* stream = FindStream(id);
if ( ! stream )
return false;
@ -369,11 +390,11 @@ bool LogMgr::RemoveFilter(EnumVal* stream_id, StringVal* filter)
return true;
}
bool LogMgr::Write(EnumVal* stream_id, RecordVal* columns)
bool LogMgr::Write(EnumVal* id, RecordVal* columns)
{
bool error = false;
Stream* stream = FindStream(stream_id);
Stream* stream = FindStream(id);
if ( ! stream )
return false;
@ -415,7 +436,7 @@ bool LogMgr::Write(EnumVal* stream_id, RecordVal* columns)
if ( filter->path_func )
{
val_list vl(2);
vl.append(stream_id->Ref());
vl.append(id->Ref());
vl.append(filter->path_val->Ref());
Val* v = filter->path_func->Call(&vl);
@ -562,9 +583,9 @@ LogVal** LogMgr::RecordToFilterVals(Filter* filter, RecordVal* columns)
return vals;
}
bool LogMgr::SetBuf(EnumVal* stream_id, bool enabled)
bool LogMgr::SetBuf(EnumVal* id, bool enabled)
{
Stream* stream = FindStream(stream_id);
Stream* stream = FindStream(id);
if ( ! stream )
return false;
@ -579,6 +600,23 @@ bool LogMgr::SetBuf(EnumVal* stream_id, bool enabled)
return true;
}
bool LogMgr::Flush(EnumVal* id)
{
Stream* stream = FindStream(id);
if ( ! stream )
return false;
for ( list<Filter*>::iterator i = stream->filters.begin(); i != stream->filters.end(); ++i )
{
for ( Filter::WriterMap::iterator j = (*i)->writers.begin(); j != (*i)->writers.end(); j++ )
j->second->Flush();
}
RemoveDisabledWriters(stream);
return true;
}
void LogMgr::Error(LogWriter* writer, const char* msg)
{
run_time(fmt("error with writer for %s: %s", writer->Path().c_str(), msg));

View file

@ -55,11 +55,12 @@ public:
// These correspond to the BiFs visible on the scripting layer. The
// actual BiFs just forward here.
bool CreateStream(EnumVal* stream_id, RecordType* columns, EventHandlerPtr handler);
bool AddFilter(EnumVal* stream_id, RecordVal* filter);
bool RemoveFilter(EnumVal* stream_id, StringVal* filter);
bool Write(EnumVal* stream_id, RecordVal* columns);
bool SetBuf(EnumVal* stream_id, bool enabled); // Changes the state for all writers for that stream.
bool CreateStream(EnumVal* id, RecordVal* stream);
bool AddFilter(EnumVal* id, RecordVal* filter);
bool RemoveFilter(EnumVal* id, StringVal* filter);
bool Write(EnumVal* id, RecordVal* columns);
bool SetBuf(EnumVal* id, bool enabled); // Changes the state for all writers for that stream.
bool Flush(EnumVal* id); // Flushes all writers for the stream.
protected:
friend class LogWriter;
@ -75,7 +76,7 @@ private:
bool TraverseRecord(Filter* filter, RecordType* rt, TableVal* include, TableVal* exclude, string path, list<int> indices);
LogVal** RecordToFilterVals(Filter* filter, RecordVal* columns);
Stream* FindStream(EnumVal* stream_id);
Stream* FindStream(EnumVal* id);
void RemoveDisabledWriters(Stream* stream);
vector<Stream *> streams; // Indexed by stream enum.

View file

@ -60,6 +60,17 @@ bool LogWriter::SetBuf(bool enabled)
return true;
}
bool LogWriter::Flush()
{
if ( ! DoFlush() )
{
disabled = true;
return false;
}
return true;
}
void LogWriter::Finish()
{
DoFinish();

View file

@ -31,9 +31,12 @@ public:
// occured, in which case the writer must not be used further.
bool Write(LogVal** vals);
// Sets the buffering status for the writer, if the writer supports
// Sets the buffering status for the writer, if the writer supports it.
bool SetBuf(bool enabled);
// Flushes any currently buffered output, if the writer support it.
bool Flush();
// Finished writing to this logger. Will not be called if an error has
// been indicated earlier. After calling this, no more writing must be
// performed.
@ -65,6 +68,12 @@ protected:
// semantics.
virtual bool DoSetBuf(bool enabled) = 0;
// Called to flush any currently buffered output.
//
// A writer may ignore flush requests if it doesn't fit with its
// semantics.
virtual bool DoFlush() = 0;
// Called when a log output is to be rotated. Most directly, this only
// applies to writers outputting files, thoug a writer may also trigger
// other regular actions if that fits a similar model

View file

@ -52,6 +52,12 @@ write_error:
return false;
}
bool LogWriterAscii::DoFlush()
{
fflush(file);
return true;
}
void LogWriterAscii::DoFinish()
{
}

View file

@ -19,6 +19,7 @@ protected:
virtual bool DoWrite(int num_fields, LogField** fields, LogVal** vals);
virtual bool DoSetBuf(bool enabled);
virtual bool DoRotate(string rotated_path);
virtual bool DoFlush();
virtual void DoFinish();
private:

View file

@ -260,11 +260,10 @@ int record_all_packets;
RecordType* script_id;
TableType* id_table;
RecordType* log_filter;
#include "const.bif.netvar_def"
#include "types.bif.netvar_def"
#include "event.bif.netvar_def"
#include "logging.bif.netvar_def"
void init_event_handlers()
{
@ -320,6 +319,7 @@ void init_net_var()
{
#include "const.bif.netvar_init"
#include "types.bif.netvar_init"
#include "logging.bif.netvar_init"
conn_id = internal_type("conn_id")->AsRecordType();
endpoint = internal_type("endpoint")->AsRecordType();
@ -568,6 +568,4 @@ void init_net_var()
script_id = internal_type("script_id")->AsRecordType();
id_table = internal_type("id_table")->AsTableType();
log_filter = internal_type("log_filter")->AsRecordType();
}

View file

@ -264,8 +264,6 @@ extern int record_all_packets;
extern RecordType* script_id;
extern TableType* id_table;
extern RecordType* log_filter;
// Initializes globals that don't pertain to network/event analysis.
extern void init_general_global_var();
@ -275,5 +273,6 @@ extern void init_net_var();
#include "const.bif.netvar_h"
#include "types.bif.netvar_h"
#include "event.bif.netvar_h"
#include "logging.bif.netvar_h"
#endif

View file

@ -1115,7 +1115,6 @@ void EnumType::AddName(const string& module_name, const char* name, bool is_expo
void EnumType::AddName(const string& module_name, const char* name, bro_int_t val, bool is_export)
{
/* explicit value specified */
error_t rv;
if ( counter > 0 )
{
error("cannot mix explicit enumerator assignment and implicit auto-increment");

View file

@ -235,7 +235,7 @@ public:
return &val.subnet_val;
}
const BroType* AsType() const
BroType* AsType() const
{
CHECK_TAG(type->Tag(), TYPE_TYPE, "Val::Type", type_name)
return type;

View file

@ -360,200 +360,6 @@ function cat%(...%): string
return new StringVal(s);
%}
function logging_log%(index: string, rec: any%): any
%{
// TODO: Verify that rec is a record
// TODO: Coerce to the correct record type to fill in defaults.
RecordVal *recval = rec->AsRecordVal();
// Lookup the stream
TableVal *streams = opt_internal_table("Logging::streams");
VectorVal *columns;
RecordVal *stream_record;
if ( streams )
{
stream_record = streams->Lookup(index)->AsRecordVal();
if ( stream_record )
{
int columns_field = stream_record->Type()->AsRecordType()->FieldOffset("columns");
columns = stream_record->Lookup(columns_field)->AsVectorVal();
}
}
else
{
printf("Logging framework is dead (Logging::streams not found).\n");
return false;
}
// Lookup all filters for stream
TableVal *filters = opt_internal_table("Logging::filters");
TableVal *stream_filters;
if ( filters )
{
Val *lookup_v = filters->Lookup(index);
if ( lookup_v )
stream_filters = lookup_v->AsTableVal();
}
else
{
printf("Logging framework is dead (Logging::filters not found).\n");
return false;
}
// Generate the event for the log stream
// This happens regardless of all filters.
int name_field = stream_record->Type()->AsRecordType()->FieldOffset("name");
StringVal *log_type = stream_record->AsRecordVal()->Lookup(name_field)->AsStringVal();
string ID_module = extract_module_name(log_type->CheckString());
// The log event that is generated by default is MODULE_NAME::log
string log_event_name = make_full_var_name(ID_module.c_str(), "log");
EventHandlerPtr ev_ptr = internal_handler(log_event_name.c_str());
if ( ev_ptr )
{
val_list* vl = new val_list;
vl->append(recval->Ref());
mgr.QueueEvent(ev_ptr, vl, SOURCE_LOCAL);
}
// Iterate over the stream_filters
ListVal* filter_recs = stream_filters->ConvertToList(TYPE_ANY);
for ( int i = 0; i < filter_recs->Length(); ++i )
{
RecordVal* rv = filter_recs->Index(i)->AsListVal()->Index(0)->AsRecordVal();
int pred_field_index = rv->Type()->AsRecordType()->FieldOffset("pred");
Val *pred_func = rv->Lookup(pred_field_index);
if ( pred_func )
{
val_list args(1);
args.append(recval->Ref());
int pred_val = pred_func->AsFunc()->Call(&args)->AsBool();
//printf("predicate val: %d\n", pred_val);
// If the predicate returns false, don't send this record on to a writer.
if ( !pred_val )
continue;
}
// Format and print the line
// (send line onward to the filter's WRITER in the future)
// Get a path name for this filter
int path_field = rv->Type()->AsRecordType()->FieldOffset("path");
Val *path_val = rv->Lookup(path_field);
StringVal *path;
// If no path is given, use the Stream ID as the path
if ( path_val )
path = path_val->AsStringVal();
else
path = index;
// Get the file with the "path" name found above for this filter.
// Open a new file is one does not exist yet.
TableVal *logging_files = opt_internal_table("Logging::files");
Val *ff = logging_files->Lookup(path);
if ( !ff )
{
logging_files->Assign(path, new Val(new BroFile(path->CheckString(), "w")));
ff = logging_files->Lookup(path);
}
BroFile *f = ff->AsFile();
if ( ! f->IsOpen() )
continue;
// Actually format the log line and print it to the file found above.
ODesc d;
RecordType *rt = rec->Type()->AsRecordType();
for ( unsigned i = 1; i <= columns->Size(); ++i )
{
const char *field_name = columns->Lookup(i)->AsStringVal()->CheckString();
int field = rt->FieldOffset(field_name);
if ( field >= 0 ) // or if there is a default value
{
rec->AsRecordVal()->Lookup(field)->Describe(&d);
d.Add("\t");
}
}
f->Write(d.Description(), 0);
f->Write("\n",0);
}
Unref(filter_recs);
return 0;
%}
%%{
#include "LogMgr.h"
%%}
function log_create_stream%(id: Log_ID, columns: any, ev: any%) : bool
%{
if ( columns->Type()->Tag() != TYPE_TYPE )
{
run_time("log columns must be a type");
return new Val(0, TYPE_BOOL);
}
if ( columns->Type()->AsTypeType()->Type()->Tag() != TYPE_RECORD )
{
run_time("log columns must be a record type");
return new Val(0, TYPE_BOOL);
}
if ( ev->Type()->Tag() != TYPE_FUNC || ! ev->Type()->AsFuncType()->IsEvent() )
{
run_time("event argument is not of event type");
return new Val(0, TYPE_BOOL);
}
FuncType* et = ev->Type()->AsFuncType();
type_list* args = et->ArgTypes()->Types();
if ( args->length() != 1 )
{
run_time("event must take a single argument");
return new Val(0, TYPE_BOOL);
}
if ( ! same_type((*args)[0], columns->Type()->AsTypeType()->Type()) )
{
run_time("event argument type does not match log record type");
return new Val(0, TYPE_BOOL);
}
EventHandler* handler = event_registry->Lookup(ev->AsFunc()->GetID()->Name());
assert(handler);
bool result = log_mgr->CreateStream(id->AsEnumVal(), columns->Type()->AsTypeType()->Type()->AsRecordType(), handler);
return new Val(result, TYPE_BOOL);
%}
function log_add_filter%(id: Log_ID, filter: log_filter%) : bool
%{
bool result = log_mgr->AddFilter(id->AsEnumVal(), filter->AsRecordVal());
return new Val(result, TYPE_BOOL);
%}
function log_remove_filter%(id: Log_ID, name: string%) : bool
%{
bool result = log_mgr->RemoveFilter(id->AsEnumVal(), name);
return new Val(result, TYPE_BOOL);
%}
function log_write%(id: Log_ID, columns: any%) : bool
%{
bool result = log_mgr->Write(id->AsEnumVal(), columns->AsRecordVal());
return new Val(result, TYPE_BOOL);
%}
function log_set_buf%(id: Log_ID, buffered: bool%): bool
%{
bool result = log_mgr->SetBuf(id->AsEnumVal(), buffered);
return new Val(result, TYPE_BOOL);
%}
function record_type_to_vector%(rt: string%): string_vec
%{
VectorVal* result =

View file

@ -1,23 +1,48 @@
enum log_writer %{
WRITER_DEFAULT,
WRITER_TSV,
WRITER_SYSLOG,
}%
module Log;
void log_create%(id: Logging::Stream, columns: any%)
%%{
#include "LogMgr.h"
#include "NetVar.h"
%%}
const Log::default_writer: Writer;
type Filter: record;
type Stream: record;
function Log::__create_stream%(id: Log::ID, stream: Log::Stream%) : bool
%{
bool result = log_mgr->CreateStream(id->AsEnumVal(), stream->AsRecordVal());
return new Val(result, TYPE_BOOL);
%}
void log_add_filter%(id: Logging::Stream, filter: any%)
function Log::__add_filter%(id: Log::ID, filter: Log::Filter%) : bool
%{
bool result = log_mgr->AddFilter(id->AsEnumVal(), filter->AsRecordVal());
return new Val(result, TYPE_BOOL);
%}
void log_remove_filter%(id: Logging::Stream, name: string%)
function Log::__remove_filter%(id: Log::ID, name: string%) : bool
%{
bool result = log_mgr->RemoveFilter(id->AsEnumVal(), name);
return new Val(result, TYPE_BOOL);
%}
void log_write%(id: Logging::Stream, columns: any%)
function Log::__write%(id: Log::ID, columns: any%) : bool
%{
bool result = log_mgr->Write(id->AsEnumVal(), columns->AsRecordVal());
return new Val(result, TYPE_BOOL);
%}
function Log::__set_buf%(id: Log::ID, buffered: bool%): bool
%{
bool result = log_mgr->SetBuf(id->AsEnumVal(), buffered);
return new Val(result, TYPE_BOOL);
%}
function Log::__flush%(id: Log::ID%): bool
%{
bool result = log_mgr->Flush(id->AsEnumVal());
return new Val(result, TYPE_BOOL);
%}

View file

@ -50,3 +50,14 @@ enum rpc_status %{
RPC_AUTH_ERROR,
RPC_UNKNOWN_ERROR,
%}
module Log;
enum Writer %{
WRITER_DEFAULT,
WRITER_ASCII,
%}
enum ID %{
Unknown,
%}

View file

@ -8,7 +8,7 @@ module SSH;
export {
# Create a new ID for our log stream
redef enum Log_ID += { LOG_SSH };
redef enum Log::ID += { SSH };
# Define a record with all the columns the log file can have.
# (I'm using a subset of fields from ssh-ext for demonstration.)
@ -24,13 +24,12 @@ global ssh_log: event(rec: Log);
event bro_init()
{
log_create_stream(LOG_SSH, SSH::Log, ssh_log);
Log_add_default_filter(LOG_SSH);
Log::create_stream(SSH, [$columns=Log, $ev=ssh_log]);
local cid = [$orig_h=1.2.3.4, $orig_p=1234/tcp, $resp_h=2.3.4.5, $resp_p=80/tcp];
local r: Log = [$t=network_time(), $id=cid, $status="success"];
log_write(LOG_SSH, r);
log_write(LOG_SSH, [$t=network_time(), $id=cid, $status="failure", $country="US"]);
Log::write(SSH, r);
Log::write(SSH, [$t=network_time(), $id=cid, $status="failure", $country="US"]);
}

View file

@ -9,7 +9,7 @@ module SSH;
export {
# Create a new ID for our log stream
redef enum Log_ID += { LOG_SSH };
redef enum Log::ID += { SSH };
# Define a record with all the columns the log file can have.
# (I'm using a subset of fields from ssh-ext for demonstration.)
@ -21,11 +21,9 @@ export {
};
}
global ssh_log: event(rec: Log);
global c = -1;
function path_func(id: Log_ID, path: string) : string
function path_func(id: Log::ID, path: string) : string
{
c = (c + 1) % 3;
@ -34,18 +32,19 @@ function path_func(id: Log_ID, path: string) : string
event bro_init()
{
log_create_stream(LOG_SSH, SSH::Log, ssh_log);
Log::create_stream(SSH, [$columns=Log]);
Log::remove_default_filter(SSH);
log_add_filter(LOG_SSH, [$name="dyn", $path="static-prefix", $path_func=path_func]);
Log::add_filter(SSH, [$name="dyn", $path="static-prefix", $path_func=path_func]);
log_set_buf(LOG_SSH, F);
Log::set_buf(SSH, F);
local cid = [$orig_h=1.2.3.4, $orig_p=1234/tcp, $resp_h=2.3.4.5, $resp_p=80/tcp];
log_write(LOG_SSH, [$t=network_time(), $id=cid, $status="success"]);
log_write(LOG_SSH, [$t=network_time(), $id=cid, $status="failure", $country="US"]);
log_write(LOG_SSH, [$t=network_time(), $id=cid, $status="failure", $country="UK"]);
log_write(LOG_SSH, [$t=network_time(), $id=cid, $status="success", $country="BR"]);
log_write(LOG_SSH, [$t=network_time(), $id=cid, $status="failure", $country="MX"]);
log_write(LOG_SSH, [$t=network_time(), $id=cid, $status="failure", $country="MX2"]);
log_write(LOG_SSH, [$t=network_time(), $id=cid, $status="failure", $country="MX3"]);
Log::write(SSH, [$t=network_time(), $id=cid, $status="success"]);
Log::write(SSH, [$t=network_time(), $id=cid, $status="failure", $country="US"]);
Log::write(SSH, [$t=network_time(), $id=cid, $status="failure", $country="UK"]);
Log::write(SSH, [$t=network_time(), $id=cid, $status="success", $country="BR"]);
Log::write(SSH, [$t=network_time(), $id=cid, $status="failure", $country="MX"]);
Log::write(SSH, [$t=network_time(), $id=cid, $status="failure", $country="MX2"]);
Log::write(SSH, [$t=network_time(), $id=cid, $status="failure", $country="MX3"]);
}

View file

@ -9,7 +9,7 @@ module SSH;
export {
# Create a new ID for our log stream
redef enum Log_ID += { LOG_SSH };
redef enum Log::ID += { SSH };
# Define a record with all the columns the log file can have.
# (I'm using a subset of fields from ssh-ext for demonstration.)
@ -21,8 +21,6 @@ export {
};
}
global ssh_log: event(rec: Log);
function fail(rec: Log): bool
{
return rec$status != "success";
@ -30,14 +28,14 @@ function fail(rec: Log): bool
event bro_init()
{
log_create_stream(LOG_SSH, SSH::Log, ssh_log);
log_add_filter(LOG_SSH, [$name="f1", $path="ssh.success", $pred=function(rec: Log): bool { return rec$status == "success"; }]);
log_add_filter(LOG_SSH, [$name="f2", $path="ssh.failure", $pred=fail]);
Log::create_stream(SSH, [$columns=Log]);
Log::remove_default_filter(SSH);
Log::add_filter(SSH, [$name="f1", $path="ssh.success", $pred=function(rec: Log): bool { return rec$status == "success"; }]);
Log::add_filter(SSH, [$name="f2", $path="ssh.failure", $pred=fail]);
local cid = [$orig_h=1.2.3.4, $orig_p=1234/tcp, $resp_h=2.3.4.5, $resp_p=80/tcp];
local r: Log = [$t=network_time(), $id=cid, $status="success"];
log_write(LOG_SSH, r);
log_write(LOG_SSH, [$t=network_time(), $id=cid, $status="failure", $country="US"]);
Log::write(SSH, r);
Log::write(SSH, [$t=network_time(), $id=cid, $status="failure", $country="US"]);
}

View file

@ -1,15 +1,13 @@
#
# @TEST-EXEC: bro -B logging %INPUT
# @TEST-EXEC: btest-diff ssh_log_ssh.log
# @TEST-EXEC: btest-diff ssh.log
# @TEST-EXEC: btest-diff ssh.failure.log
module SSH;
@load logging
export {
# Create a new ID for our log stream
redef enum Log_ID += { LOG_SSH };
redef enum Log::ID += { SSH };
# Define a record with all the columns the log file can have.
# (I'm using a subset of fields from ssh-ext for demonstration.)
@ -21,26 +19,23 @@ export {
};
}
global ssh_log: event(rec: Log);
event bro_init()
{
log_create_stream(LOG_SSH, SSH::Log,ssh_log);
Log_add_default_filter(LOG_SSH);
log_add_filter(LOG_SSH, [$name="f1", $path="ssh.failure", $pred=function(rec: Log): bool { return rec$status == "failure"; }]);
Log::create_stream(SSH, [$columns=Log]);
Log::add_filter(SSH, [$name="f1", $path="ssh.failure", $pred=function(rec: Log): bool { return rec$status == "failure"; }]);
local cid = [$orig_h=1.2.3.4, $orig_p=1234/tcp, $resp_h=2.3.4.5, $resp_p=80/tcp];
# Log something.
log_write(LOG_SSH, [$t=network_time(), $id=cid, $status="failure", $country="US"]);
log_write(LOG_SSH, [$t=network_time(), $id=cid, $status="failure", $country="UK"]);
Log::write(SSH, [$t=network_time(), $id=cid, $status="failure", $country="US"]);
Log::write(SSH, [$t=network_time(), $id=cid, $status="failure", $country="UK"]);
log_remove_filter(LOG_SSH, "f1");
log_write(LOG_SSH, [$t=network_time(), $id=cid, $status="failure", $country="BR"]);
Log::remove_filter(SSH, "f1");
Log::write(SSH, [$t=network_time(), $id=cid, $status="failure", $country="BR"]);
log_remove_filter(LOG_SSH, "default");
log_write(LOG_SSH, [$t=network_time(), $id=cid, $status="failure", $country="MX"]);
Log::remove_filter(SSH, "default");
Log::write(SSH, [$t=network_time(), $id=cid, $status="failure", $country="MX"]);
log_remove_filter(LOG_SSH, "doesn-not-exist");
Log::remove_filter(SSH, "doesn-not-exist");
}

View file

@ -1,17 +1,12 @@
#
# @TEST-EXEC: bro %INPUT
# @TEST-EXEC: btest-diff ssh_log_ssh.log
# @TEST-EXEC: btest-diff ssh.log
module SSH;
@load logging
export {
# Create a new ID for our log stream
redef enum Log_ID += { LOG_SSH };
redef enum Log::ID += { SSH };
# Define a record with all the columns the log file can have.
# (I'm using a subset of fields from ssh-ext for demonstration.)
type Log: record {
t: time;
id: conn_id; # Will be rolled out into individual columns.
@ -20,46 +15,17 @@ export {
};
}
global ssh_log: event(rec: Log);
event bro_init()
{
# Create the stream.
# First argument is the ID for the stream.
# Second argument is the log record type.
log_create_stream(LOG_SSH, SSH::Log,ssh_log);
# Add a default filter that simply logs everything to "ssh.log" using the default writer.
Log_add_default_filter(LOG_SSH);
# Printing headers for the filters doesn't work yet either and needs to
# be considered in the final design. (based on the "select" set).
#Log::add_filter("ssh", [$name="successful logins",
# #$pred(rec: Log) = { print rec$status; return T; },
# $path="ssh-logins",
# #$select=set("t"),
# $writer=Log::WRITER_CSV]);
Log::create_stream(SSH, [$columns=Log]);
local cid = [$orig_h=1.2.3.4, $orig_p=1234/tcp, $resp_h=2.3.4.5, $resp_p=80/tcp];
# Log something.
log_write(LOG_SSH, [$t=network_time(), $id=cid, $status="success"]);
log_write(LOG_SSH, [$t=network_time(), $id=cid, $status="failure", $country="US"]);
log_write(LOG_SSH, [$t=network_time(), $id=cid, $status="failure", $country="UK"]);
log_write(LOG_SSH, [$t=network_time(), $id=cid, $status="success", $country="BR"]);
log_write(LOG_SSH, [$t=network_time(), $id=cid, $status="failure", $country="MX"]);
Log::write(SSH, [$t=network_time(), $id=cid, $status="success"]);
Log::write(SSH, [$t=network_time(), $id=cid, $status="failure", $country="US"]);
Log::write(SSH, [$t=network_time(), $id=cid, $status="failure", $country="UK"]);
Log::write(SSH, [$t=network_time(), $id=cid, $status="success", $country="BR"]);
Log::write(SSH, [$t=network_time(), $id=cid, $status="failure", $country="MX"]);
}
#event log(rec: Log)
# {
# print fmt("Ran the log handler from the same module. Extracting time: %0.6f", rec$t);
# }
#
#
#module WHATEVER;
#
#event SSH::log(rec: SSH::Log)
# {
# print fmt("Ran the SSH::log handler from a different module. Extracting time: %0.6f", rec$t);
# }