Merge remote-tracking branch 'origin/master' into topic/johanna/openflow

This commit is contained in:
Johanna Amann 2015-04-20 11:27:24 -07:00
commit 1fb7f5121e
61 changed files with 970 additions and 443 deletions

@ -1 +1 @@
Subproject commit f2e34d731ed29bb993fbb065846faa342a8c824f
Subproject commit 6a429e79bbaf0fcc11eff5f639bfb9d1f62be6f2

View file

@ -76,11 +76,28 @@ void Attr::DescribeReST(ODesc* d) const
d->Add("`");
}
else
else if ( expr->Tag() == EXPR_CONST )
{
d->Add("``");
expr->Describe(d);
d-> Add("``");
d->Add("``");
}
else
{
d->Add("``");
Val* v = expr->Eval(0);
ODesc dd;
v->Describe(&dd);
Unref(v);
string s = dd.Description();
for ( size_t i = 0; i < s.size(); ++i )
if ( s[i] == '\n' )
s[i] = ' ';
d->Add(s);
d->Add("``");
}
}
}

View file

@ -137,20 +137,6 @@ bool ChunkedIOFd::Write(Chunk* chunk)
chunk->len, fmt_bytes(chunk->data, min((uint32)20, chunk->len)));
#endif
// Reject if our queue of pending chunks is way too large. Otherwise,
// memory could fill up if the other side doesn't read.
if ( stats.pending > MAX_BUFFERED_CHUNKS )
{
DBG_LOG(DBG_CHUNKEDIO, "write queue full");
#ifdef DEBUG_COMMUNICATION
AddToBuffer("<false:write-queue-full>", false);
#endif
errno = ENOSPC;
return false;
}
#ifdef DEBUG_COMMUNICATION
AddToBuffer(chunk, false);
#endif
@ -627,7 +613,7 @@ bool ChunkedIOFd::IsIdle()
bool ChunkedIOFd::IsFillingUp()
{
return stats.pending > MAX_BUFFERED_CHUNKS_SOFT;
return stats.pending > chunked_io_buffer_soft_cap;
}
iosource::FD_Set ChunkedIOFd::ExtraReadFDs() const
@ -838,15 +824,6 @@ bool ChunkedIOSSL::Write(Chunk* chunk)
chunk->len, fmt_bytes(chunk->data, 20));
#endif
// Reject if our queue of pending chunks is way too large. Otherwise,
// memory could fill up if the other side doesn't read.
if ( stats.pending > MAX_BUFFERED_CHUNKS )
{
DBG_LOG(DBG_CHUNKEDIO, "write queue full");
errno = ENOSPC;
return false;
}
// Queue it.
++stats.pending;
Queue* q = new Queue;

View file

@ -221,13 +221,6 @@ private:
// than BUFFER_SIZE.
static const uint32 FLAG_PARTIAL = 0x80000000;
// We report that we're filling up when there are more than this number
// of pending chunks.
static const uint32 MAX_BUFFERED_CHUNKS_SOFT = 400000;
// Maximum number of chunks we store in memory before rejecting writes.
static const uint32 MAX_BUFFERED_CHUNKS = 500000;
char* read_buffer;
uint32 read_len;
uint32 read_pos;
@ -275,8 +268,6 @@ public:
virtual void Stats(char* buffer, int length);
private:
// Maximum number of chunks we store in memory before rejecting writes.
static const uint32 MAX_BUFFERED_CHUNKS = 500000;
// Only returns true if all data has been read. If not, call
// it again with the same parameters as long as error is not

View file

@ -7,8 +7,6 @@
#include "EquivClass.h"
#include "DFA.h"
int dfa_state_cache_size = 10000;
unsigned int DFA_State::transition_counter = 0;
DFA_State::DFA_State(int arg_state_num, const EquivClass* ec,
@ -292,9 +290,8 @@ unsigned int DFA_State::Size()
+ (centry ? padded_sizeof(CacheEntry) : 0);
}
DFA_State_Cache::DFA_State_Cache(int arg_maxsize)
DFA_State_Cache::DFA_State_Cache()
{
maxsize = arg_maxsize;
hits = misses = 0;
}
@ -402,7 +399,7 @@ DFA_Machine::DFA_Machine(NFA_Machine* n, EquivClass* arg_ec)
ec = arg_ec;
dfa_state_cache = new DFA_State_Cache(dfa_state_cache_size);
dfa_state_cache = new DFA_State_Cache();
NFA_state_list* ns = new NFA_state_list;
ns->append(n->FirstState());

View file

@ -15,8 +15,6 @@ class DFA_State;
#include "NFA.h"
extern int dfa_state_cache_size;
class DFA_Machine;
class DFA_State;
struct CacheEntry;
@ -78,7 +76,7 @@ struct CacheEntry {
class DFA_State_Cache {
public:
DFA_State_Cache(int maxsize);
DFA_State_Cache();
~DFA_State_Cache();
// If the caller stores the handle, it has to call Ref() on it.
@ -105,8 +103,6 @@ public:
void GetStats(Stats* s);
private:
int maxsize;
int hits; // Statistics
int misses;

View file

@ -2599,6 +2599,39 @@ bool AssignExpr::TypeCheck(attr_list* attrs)
if ( ! same_type(op1->Type(), op2->Type()) )
{
if ( bt1 == TYPE_TABLE && bt2 == TYPE_TABLE )
{
if ( op2->Tag() == EXPR_SET_CONSTRUCTOR )
{
// Some elements in constructor list must not match, see if
// we can create a new constructor now that the expected type
// of LHS is known and let it do coercions where possible.
SetConstructorExpr* sce = dynamic_cast<SetConstructorExpr*>(op2);
ListExpr* ctor_list = dynamic_cast<ListExpr*>(sce->Op());
attr_list* attr_copy = 0;
if ( sce->Attrs() )
{
attr_list* a = sce->Attrs()->Attrs();
attrs = new attr_list;
loop_over_list(*a, i)
attrs->append((*a)[i]);
}
int errors_before = reporter->Errors();
op2 = new SetConstructorExpr(ctor_list, attr_copy, op1->Type());
int errors_after = reporter->Errors();
if ( errors_after > errors_before )
{
ExprError("type clash in assignment");
return false;
}
return true;
}
}
ExprError("type clash in assignment");
return false;
}

View file

@ -178,6 +178,7 @@ RecordType* peer;
int forward_remote_state_changes;
int forward_remote_events;
int remote_check_sync_consistency;
bro_uint_t chunked_io_buffer_soft_cap;
StringVal* ssl_ca_certificate;
StringVal* ssl_private_key;
@ -276,6 +277,7 @@ void init_general_global_var()
forward_remote_events = opt_internal_int("forward_remote_events");
remote_check_sync_consistency =
opt_internal_int("remote_check_sync_consistency");
chunked_io_buffer_soft_cap = opt_internal_unsigned("chunked_io_buffer_soft_cap");
ssl_ca_certificate = internal_val("ssl_ca_certificate")->AsStringVal();
ssl_private_key = internal_val("ssl_private_key")->AsStringVal();

View file

@ -181,6 +181,7 @@ extern RecordType* peer;
extern int forward_remote_state_changes;
extern int forward_remote_events;
extern int remote_check_sync_consistency;
extern bro_uint_t chunked_io_buffer_soft_cap;
extern StringVal* ssl_ca_certificate;
extern StringVal* ssl_private_key;

View file

@ -20,9 +20,6 @@ int case_insensitive = 0;
extern int RE_parse(void);
extern void RE_set_input(const char* str);
// If true, the set-wise matching always returns false - for benchmarking.
extern int rule_bench;
Specific_RE_Matcher::Specific_RE_Matcher(match_type arg_mt, int arg_multiline)
: equiv_class(NUM_SYM)
{
@ -279,9 +276,6 @@ inline void RE_Match_State::AddMatches(const AcceptingSet& as,
bool RE_Match_State::Match(const u_char* bv, int n,
bool bol, bool eol, bool clear)
{
if ( rule_bench > 0 )
return false;
if ( current_pos == -1 )
{
// First call to Match().

View file

@ -542,6 +542,9 @@ RemoteSerializer::RemoteSerializer()
current_msgtype = 0;
current_args = 0;
source_peer = 0;
// Register as a "dont-count" source first, we may change that later.
iosource_mgr->Register(this, true);
}
RemoteSerializer::~RemoteSerializer()
@ -571,8 +574,6 @@ void RemoteSerializer::Enable()
Fork();
iosource_mgr->Register(this);
Log(LogInfo, fmt("communication started, parent pid is %d, child pid is %d", getpid(), child_pid));
initialized = 1;
}
@ -612,6 +613,9 @@ void RemoteSerializer::Fork()
if ( child_pid )
return;
// Register as a "does-count" source now.
iosource_mgr->Register(this, false);
// If we are re-forking, remove old entries
loop_over_list(peers, i)
RemovePeer(peers[i]);
@ -3460,7 +3464,8 @@ void SocketComm::Run()
int a = select(max_fd + 1, &fd_read, &fd_write, &fd_except, 0);
if ( selects % 100000 == 0 )
Log(fmt("selects=%ld canwrites=%ld", selects, canwrites));
Log(fmt("selects=%ld canwrites=%ld pending=%lu",
selects, canwrites, io->Stats()->pending));
if ( a < 0 )
// Ignore errors for now.

View file

@ -577,9 +577,6 @@ RuleFileMagicState* RuleMatcher::InitFileMagic() const
{
RuleFileMagicState* state = new RuleFileMagicState();
if ( rule_bench == 3 )
return state;
loop_over_list(root->psets[Rule::FILE_MAGIC], i)
{
RuleHdrTest::PatternSet* set = root->psets[Rule::FILE_MAGIC][i];
@ -630,9 +627,6 @@ RuleMatcher::MIME_Matches* RuleMatcher::Match(RuleFileMagicState* state,
return rval;
}
if ( rule_bench >= 2 )
return rval;
#ifdef DEBUG
if ( debug_logger.IsEnabled(DBG_RULES) )
{
@ -712,9 +706,6 @@ RuleEndpointState* RuleMatcher::InitEndpoint(analyzer::Analyzer* analyzer,
RuleEndpointState* state =
new RuleEndpointState(analyzer, from_orig, opposite, pia);
if ( rule_bench == 3 )
return state;
rule_hdr_test_list tests;
tests.append(root);
@ -837,9 +828,6 @@ void RuleMatcher::Match(RuleEndpointState* state, Rule::PatternType type,
// for 'accepted' (that depends on the average number of matching
// patterns).
if ( rule_bench >= 2 )
return;
bool newmatch = false;
#ifdef DEBUG
@ -956,9 +944,6 @@ void RuleMatcher::Match(RuleEndpointState* state, Rule::PatternType type,
void RuleMatcher::FinishEndpoint(RuleEndpointState* state)
{
if ( rule_bench == 3 )
return;
// Send EOL to payload matchers.
Match(state, Rule::PAYLOAD, (const u_char *) "", 0, false, true, false);
@ -1110,15 +1095,9 @@ void RuleMatcher::ExecRule(Rule* rule, RuleEndpointState* state, bool eos)
void RuleMatcher::ClearEndpointState(RuleEndpointState* state)
{
if ( rule_bench == 3 )
return;
state->payload_size = -1;
ExecPureRules(state, 1);
state->payload_size = -1;
state->matched_by_patterns.clear();
loop_over_list(state->matched_text, i)
delete state->matched_text[i];
state->matched_text.clear();
loop_over_list(state->matchers, j)
state->matchers[j]->state->Clear();
@ -1126,9 +1105,6 @@ void RuleMatcher::ClearEndpointState(RuleEndpointState* state)
void RuleMatcher::ClearFileMagicState(RuleFileMagicState* state) const
{
if ( rule_bench == 3 )
return;
loop_over_list(state->matchers, j)
state->matchers[j]->state->Clear();
}
@ -1496,8 +1472,12 @@ void RuleMatcherState::ClearMatchState(bool orig)
if ( ! rule_matcher )
return;
if ( orig_match_state )
rule_matcher->ClearEndpointState(orig_match_state);
if ( resp_match_state )
if ( orig )
{
if ( orig_match_state )
rule_matcher->ClearEndpointState(orig_match_state);
}
else if ( resp_match_state )
rule_matcher->ClearEndpointState(resp_match_state);
}

View file

@ -22,8 +22,6 @@
//#define MATCHER_PRINT_STATS
extern int rule_bench;
// Parser interface:
extern void rules_error(const char* msg);

View file

@ -31,9 +31,9 @@ void ConnSize_Analyzer::Init()
resp_pkts = 0;
orig_bytes_thresh = 0;
orig_pkts = 0;
orig_pkts_thresh = 0;
resp_bytes_thresh = 0;
resp_pkts = 0;
resp_pkts_thresh = 0;
}
void ConnSize_Analyzer::Done()
@ -93,7 +93,6 @@ void ConnSize_Analyzer::DeliverPacket(int len, const u_char* data, bool is_orig,
{
orig_bytes += ip->TotalLen();
orig_pkts ++;
}
else
{
@ -121,7 +120,7 @@ void ConnSize_Analyzer::SetThreshold(uint64 threshold, bool bytes, bool orig)
resp_pkts_thresh = threshold;
}
// check if threshold is already crossed
// Check if threshold is already crossed.
CheckSizes(orig);
}

View file

@ -1,26 +1,26 @@
## Generated for a connection that crossed a set byte threshold. Note that this
## is a low level event that can fire several times for the same threshold - you
## should probably use ConnThreshold::bytes_threshold_crossed instead.
## is a low level event that should usually be avoided for user code. Use
## ConnThreshold::bytes_threshold_crossed instead.
##
## c: the connection
##
## threshold: the threshold that was set
##
## is_orig: True if the threshold was crossed by the originator of the connection
## is_orig: true if the threshold was crossed by the originator of the connection
##
## .. bro:see:: set_current_conn_packets_threshold set_current_conn_bytes_threshold conn_packets_threshold_crossed
## get_current_conn_bytes_threshold get_current_conn_packets_threshold
event conn_bytes_threshold_crossed%(c: connection, threshold: count, is_orig: bool%);
## Generated for a connection that crossed a set packet threshold. Note that this
## is a low level event that can fire several times for the same threshold - you
## should probably use ConnThreshold::packets_threshold_crossed instead.
## is a low level event that should usually be avoided for user code. Use
## ConnThreshold::bytes_threshold_crossed instead.
##
## c: the connection
##
## threshold: the threshold that was set
##
## is_orig: True if the threshold was crossed by the originator of the connection
## is_orig: true if the threshold was crossed by the originator of the connection
##
## .. bro:see:: set_current_conn_packets_threshold set_current_conn_bytes_threshold conn_bytes_threshold_crossed
## get_current_conn_bytes_threshold get_current_conn_packets_threshold

View file

@ -1,7 +1,7 @@
%%{
#include "analyzer/protocol/conn-size/ConnSize.h"
analyzer::Analyzer* GetConnsizeAnalyzer(Val* cid)
static analyzer::Analyzer* GetConnsizeAnalyzer(Val* cid)
{
Connection* c = sessions->FindConnection(cid);
if ( ! c )
@ -42,7 +42,7 @@ function set_current_conn_bytes_threshold%(cid: conn_id, threshold: count, is_or
return new Val(1, TYPE_BOOL);
%}
## Sets a threshold for connection packets, overwtiting any potential old thresholds.
## Sets a threshold for connection packets, overwtiting any potential old thresholds.
## Be aware that in nearly any case you will want to use the high level API
## instead (ConnThreshold::set_packets_threshold).
##

View file

@ -81,7 +81,7 @@ void PIA::PIA_Done()
}
void PIA::PIA_DeliverPacket(int len, const u_char* data, bool is_orig, uint64 seq,
const IP_Hdr* ip, int caplen)
const IP_Hdr* ip, int caplen, bool clear_state)
{
if ( pkt_buffer.state == SKIPPING )
return;
@ -108,6 +108,9 @@ void PIA::PIA_DeliverPacket(int len, const u_char* data, bool is_orig, uint64 se
// FIXME: I'm not sure why it does not work with eol=true...
DoMatch(data, len, is_orig, true, false, false, ip);
if ( clear_state )
RuleMatcherState::ClearMatchState(is_orig);
pkt_buffer.state = new_state;
current_packet.data = 0;

View file

@ -42,7 +42,7 @@ public:
protected:
void PIA_Done();
void PIA_DeliverPacket(int len, const u_char* data, bool is_orig,
uint64 seq, const IP_Hdr* ip, int caplen);
uint64 seq, const IP_Hdr* ip, int caplen, bool clear_state);
enum State { INIT, BUFFERING, MATCHING_ONLY, SKIPPING } state;
@ -109,7 +109,7 @@ protected:
uint64 seq, const IP_Hdr* ip, int caplen)
{
Analyzer::DeliverPacket(len, data, is_orig, seq, ip, caplen);
PIA_DeliverPacket(len, data, is_orig, seq, ip, caplen);
PIA_DeliverPacket(len, data, is_orig, seq, ip, caplen, true);
}
virtual void ActivateAnalyzer(analyzer::Tag tag, const Rule* rule);
@ -154,7 +154,7 @@ protected:
uint64 seq, const IP_Hdr* ip, int caplen)
{
Analyzer::DeliverPacket(len, data, is_orig, seq, ip, caplen);
PIA_DeliverPacket(len, data, is_orig, seq, ip, caplen);
PIA_DeliverPacket(len, data, is_orig, seq, ip, caplen, false);
}
virtual void DeliverStream(int len, const u_char* data, bool is_orig);

View file

@ -62,7 +62,7 @@ void RDP_Analyzer::DeliverStream(int len, const u_char* data, bool orig)
{
reporter->AnalyzerError(this,
"failed to add TCP child analyzer "
"to RPD analyzer: already exists");
"to RDP analyzer: already exists");
return;
}

View file

@ -2274,7 +2274,7 @@ function to_count%(str: string%): count
const char* s = str->CheckString();
char* end_s;
uint64 u = (uint64) strtoll(s, &end_s, 10);
uint64 u = (uint64) strtoull(s, &end_s, 10);
if ( s[0] == '\0' || end_s[0] != '\0' )
{

View file

@ -26,8 +26,15 @@ int bro_broker::Manager::send_flags_unsolicited_idx;
bro_broker::Manager::~Manager()
{
vector<decltype(data_stores)::key_type> stores_to_close;
for ( auto& s : data_stores )
CloseStore(s.first.first, s.first.second);
stores_to_close.emplace_back(s.first);
for ( auto& s : stores_to_close )
// This doesn't loop directly over data_stores, because CloseStore
// modifies the map and invalidates iterators.
CloseStore(s.first, s.second);
}
static int require_field(RecordType* rt, const char* name)

View file

@ -9,7 +9,7 @@
using namespace input;
Component::Component(const std::string& name, factory_callback arg_factory)
: plugin::Component(plugin::component::WRITER, name)
: plugin::Component(plugin::component::READER, name)
{
factory = arg_factory;

View file

@ -8,6 +8,7 @@
#include <fcntl.h>
#include <errno.h>
#include <signal.h>
#include <stdlib.h>
#include "Raw.h"
#include "Plugin.h"
@ -298,6 +299,17 @@ bool Raw::OpenInput()
Warning(Fmt("Init: cannot set close-on-exec for %s", fname.c_str()));
}
if ( offset )
{
int whence = (offset > 0) ? SEEK_SET : SEEK_END;
if ( fseek(file, offset, whence) < 0 )
{
char buf[256];
strerror_r(errno, buf, sizeof(buf));
Error(Fmt("Seek failed in init: %s", buf));
}
}
return true;
}
@ -377,6 +389,20 @@ bool Raw::DoInit(const ReaderInfo& info, int num_fields, const Field* const* fie
forcekill = true;
}
it = info.config.find("offset"); // we want to seek to a given offset inside the file
if ( it != info.config.end() && ! execute && (Info().mode == MODE_STREAM || Info().mode == MODE_MANUAL) )
{
string offset_s = it->second;
offset = strtoll(offset_s.c_str(), 0, 10);
if ( offset < 0 )
offset++; // we want -1 to be the end of the file
}
else if ( it != info.config.end() )
{
Error("Offset only is supported for MODE_STREAM and MODE_MANUAL; it is also not supported when executing a command");
return false;
}
if ( num_fields != want_fields )
{
Error(Fmt("Filter for raw reader contains wrong number of fields -- got %d, expected %d. "

View file

@ -65,6 +65,8 @@ private:
bool forcekill;
int64_t offset;
int pipes[6];
pid_t childpid;

View file

@ -24,6 +24,7 @@ Manager::~Manager()
for ( SourceList::iterator i = sources.begin(); i != sources.end(); ++i )
{
(*i)->src->Done();
delete (*i)->src;
delete *i;
}
@ -183,9 +184,24 @@ finished:
void Manager::Register(IOSource* src, bool dont_count)
{
// First see if we already have registered that source. If so, just
// adjust dont_count.
for ( SourceList::iterator i = sources.begin(); i != sources.end(); ++i )
{
if ( (*i)->src == src )
{
if ( (*i)->dont_count != dont_count )
// Adjust the global counter.
dont_counts += (dont_count ? 1 : -1);
return;
}
}
src->Init();
Source* s = new Source;
s->src = src;
s->dont_count = dont_count;
if ( dont_count )
++dont_counts;

View file

@ -29,7 +29,9 @@ public:
~Manager();
/**
* Registers an IOSource with the manager.
* Registers an IOSource with the manager. If the source is already
* registered, the method will update its *dont_count* value but not
* do anything else.
*
* @param src The source. The manager takes ownership.
*
@ -117,6 +119,7 @@ private:
FD_Set fd_read;
FD_Set fd_write;
FD_Set fd_except;
bool dont_count;
bool Ready(fd_set* read, fd_set* write, fd_set* except) const
{ return fd_read.Ready(read) || fd_write.Ready(write) ||

View file

@ -181,10 +181,9 @@ bool Ascii::InitFormatter()
Ascii::~Ascii()
{
if ( ! ascii_done )
{
fprintf(stderr, "internal error: finish missing\n");
abort();
}
// In case of errors aborting the logging altogether,
// DoFinish() may not have been called.
CloseFile(network_time);
delete formatter;
}

View file

@ -117,7 +117,6 @@ SampleLogger* sample_logger = 0;
int signal_val = 0;
int optimize = 0;
int do_notice_analysis = 0;
int rule_bench = 0;
extern char version[];
char* command_line_policy = 0;
vector<string> params;
@ -179,8 +178,6 @@ void usage()
fprintf(stderr, " -i|--iface <interface> | read from given interface\n");
fprintf(stderr, " -p|--prefix <prefix> | add given prefix to policy file resolution\n");
fprintf(stderr, " -r|--readfile <readfile> | read from given tcpdump file\n");
fprintf(stderr, " -y|--flowfile <file>[=<ident>] | read from given flow file\n");
fprintf(stderr, " -Y|--netflow <ip>:<prt>[=<id>] | read flow from socket\n");
fprintf(stderr, " -s|--rulefile <rulefile> | read rules from given file\n");
fprintf(stderr, " -t|--tracefile <tracefile> | activate execution tracing\n");
fprintf(stderr, " -w|--writefile <writefile> | write to given tcpdump file\n");
@ -191,11 +188,9 @@ void usage()
fprintf(stderr, " -B|--debug <dbgstreams> | Enable debugging output for selected streams ('-B help' for help)\n");
#endif
fprintf(stderr, " -C|--no-checksums | ignore checksums\n");
fprintf(stderr, " -D|--dfa-size <size> | DFA state cache size\n");
fprintf(stderr, " -F|--force-dns | force DNS\n");
fprintf(stderr, " -I|--print-id <ID name> | print out given ID\n");
fprintf(stderr, " -K|--md5-hashkey <hashkey> | set key for MD5-keyed hashing\n");
fprintf(stderr, " -L|--rule-benchmark | benchmark for rules\n");
fprintf(stderr, " -N|--print-plugins | print available plugins and exit (-NN for verbose)\n");
fprintf(stderr, " -O|--optimize | optimize policy script\n");
fprintf(stderr, " -P|--prime-dns | prime DNS\n");
@ -488,8 +483,6 @@ int main(int argc, char** argv)
{"broxygen", required_argument, 0, 'X'},
{"prefix", required_argument, 0, 'p'},
{"readfile", required_argument, 0, 'r'},
{"flowfile", required_argument, 0, 'y'},
{"netflow", required_argument, 0, 'Y'},
{"rulefile", required_argument, 0, 's'},
{"tracefile", required_argument, 0, 't'},
{"writefile", required_argument, 0, 'w'},
@ -497,13 +490,11 @@ int main(int argc, char** argv)
{"print-state", required_argument, 0, 'x'},
{"analyze", required_argument, 0, 'z'},
{"no-checksums", no_argument, 0, 'C'},
{"dfa-cache", required_argument, 0, 'D'},
{"force-dns", no_argument, 0, 'F'},
{"load-seeds", required_argument, 0, 'G'},
{"save-seeds", required_argument, 0, 'H'},
{"set-seed", required_argument, 0, 'J'},
{"md5-hashkey", required_argument, 0, 'K'},
{"rule-benchmark", no_argument, 0, 'L'},
{"print-plugins", no_argument, 0, 'N'},
{"optimize", no_argument, 0, 'O'},
{"prime-dns", no_argument, 0, 'P'},
@ -557,7 +548,7 @@ int main(int argc, char** argv)
opterr = 0;
char opts[256];
safe_strncpy(opts, "B:D:e:f:I:i:K:l:n:p:R:r:s:T:t:U:w:x:X:z:CFGLNOPSWabdghvZQ",
safe_strncpy(opts, "B:e:f:I:i:K:l:n:p:R:r:s:T:t:U:w:x:X:z:CFGLNOPSWabdghvZQ",
sizeof(opts));
#ifdef USE_PERFTOOLS_DEBUG
@ -631,10 +622,6 @@ int main(int argc, char** argv)
override_ignore_checksums = 1;
break;
case 'D':
dfa_state_cache_size = atoi(optarg);
break;
case 'E':
pseudo_realtime = 1.0;
if ( optarg )
@ -668,10 +655,6 @@ int main(int argc, char** argv)
hmac_key_set = 1;
break;
case 'L':
++rule_bench;
break;
case 'N':
++print_plugins;
break;