mirror of
https://github.com/zeek/zeek.git
synced 2025-10-02 14:48:21 +00:00
Merge remote-tracking branch 'origin/topic/timw/coverity'
* origin/topic/timw/coverity: Mark FileAssembler destructor as default, remove implementation Remove a few protected or private no-op constructors Properly initialize Timer::type. Fixes Coverity 1431144. Properly initialize members of Reassembler. Fixes Coverity 1413348. Fix a number of Coverity issues in Dict
This commit is contained in:
commit
903191e568
10 changed files with 52 additions and 38 deletions
18
CHANGES
18
CHANGES
|
@ -1,3 +1,21 @@
|
|||
|
||||
3.3.0-dev.141 | 2020-08-20 14:56:15 -0700
|
||||
|
||||
* Mark FileAssembler destructor as default, remove implementation (Tim Wojtulewicz, Corelight)
|
||||
|
||||
* Remove a few protected or private no-op constructors (Tim Wojtulewicz, Corelight)
|
||||
|
||||
* Properly initialize Timer::type. Fixes Coverity 1431144. (Tim Wojtulewicz, Corelight)
|
||||
|
||||
* Properly initialize members of Reassembler. Fixes Coverity 1413348. (Tim Wojtulewicz, Corelight)
|
||||
|
||||
* Fix a number of Coverity issues in Dict (Tim Wojtulewicz, Corelight)
|
||||
|
||||
1431186: Asserting that an unsigned value is >= 0 is pointless
|
||||
1431188/1431189/1431191: Side effect of using an assignment operator in a call to ASSERT()
|
||||
1431193: Dereference after null check could lead to null being used
|
||||
1431195: Use of rand() replaced with random()
|
||||
|
||||
3.3.0-dev.135 | 2020-08-20 10:23:29 -0700
|
||||
|
||||
* remove variables in netvar, use tabs in DNS.h and polish comments in dns events. (FlyingWithJerome)
|
||||
|
|
2
VERSION
2
VERSION
|
@ -1 +1 @@
|
|||
3.3.0-dev.135
|
||||
3.3.0-dev.141
|
||||
|
|
|
@ -401,7 +401,6 @@ public:
|
|||
void Dispatch(double t, bool is_expire) override;
|
||||
|
||||
protected:
|
||||
ConnectionTimer() {}
|
||||
|
||||
void Init(Connection* conn, timer_func timer, bool do_expire);
|
||||
|
||||
|
|
34
src/Dict.cc
34
src/Dict.cc
|
@ -283,7 +283,7 @@ int Dictionary::BucketByHash(zeek::detail::hash_t h, int log2_table_size) const
|
|||
int m = 64 - log2_table_size;
|
||||
hash <<= m;
|
||||
hash >>= m;
|
||||
ASSERT(hash>=0);
|
||||
|
||||
return hash;
|
||||
}
|
||||
|
||||
|
@ -368,36 +368,44 @@ void Dictionary::AssertValid() const
|
|||
{
|
||||
bool valid = true;
|
||||
int n = num_entries;
|
||||
for ( int i = Capacity()-1; i >= 0; i-- )
|
||||
if ( table && ! table[i].Empty() )
|
||||
n--;
|
||||
|
||||
ASSERT((valid = (n==0)));
|
||||
if ( table )
|
||||
for ( int i = Capacity()-1; i >= 0; i-- )
|
||||
if ( ! table[i].Empty() )
|
||||
n--;
|
||||
|
||||
valid = (n == 0);
|
||||
ASSERT(valid);
|
||||
DUMPIF(! valid);
|
||||
|
||||
//entries must clustered together
|
||||
for ( int i = 1; i < Capacity(); i++ )
|
||||
{
|
||||
if ( table[i].Empty() )
|
||||
if ( ! table || table[i].Empty() )
|
||||
continue;
|
||||
|
||||
if ( table[i-1].Empty() )
|
||||
{
|
||||
ASSERT((valid=(table[i].distance == 0)));
|
||||
valid = (table[i].distance == 0);
|
||||
ASSERT(valid);
|
||||
DUMPIF(! valid);
|
||||
}
|
||||
else
|
||||
{
|
||||
ASSERT((valid=(table[i].bucket >= table[i-1].bucket)));
|
||||
valid = (table[i].bucket >= table[i-1].bucket);
|
||||
ASSERT(valid);
|
||||
DUMPIF(! valid);
|
||||
|
||||
if ( table[i].bucket == table[i-1].bucket )
|
||||
{
|
||||
ASSERT((valid=(table[i].distance == table[i-1].distance+1)));
|
||||
valid = (table[i].distance == table[i-1].distance+1);
|
||||
ASSERT(valid);
|
||||
DUMPIF(! valid);
|
||||
}
|
||||
else
|
||||
{
|
||||
ASSERT((valid=(table[i].distance <= table[i-1].distance)));
|
||||
valid = (table[i].distance <= table[i-1].distance);
|
||||
ASSERT(valid);
|
||||
DUMPIF(! valid);
|
||||
}
|
||||
}
|
||||
|
@ -446,7 +454,8 @@ void Dictionary::DumpKeys() const
|
|||
DistanceStats(max_distance);
|
||||
if ( binary )
|
||||
{
|
||||
sprintf(key_file, "%d.%d.%zu-%c.key", Length(), max_distance, MemoryAllocation()/Length(), rand()%26 + 'A');
|
||||
char key = char(random() % 26) + 'A';
|
||||
sprintf(key_file, "%d.%d.%zu-%c.key", Length(), max_distance, MemoryAllocation()/Length(), key);
|
||||
std::ofstream f(key_file, std::ios::binary|std::ios::out|std::ios::trunc);
|
||||
for ( int idx = 0; idx < Capacity(); idx++ )
|
||||
if ( ! table[idx].Empty() )
|
||||
|
@ -458,7 +467,8 @@ void Dictionary::DumpKeys() const
|
|||
}
|
||||
else
|
||||
{
|
||||
sprintf(key_file, "%d.%d.%zu-%d.ckey",Length(), max_distance, MemoryAllocation()/Length(), rand()%26 + 'A');
|
||||
char key = char(random() % 26) + 'A';
|
||||
sprintf(key_file, "%d.%d.%zu-%d.ckey",Length(), max_distance, MemoryAllocation()/Length(), key);
|
||||
std::ofstream f(key_file, std::ios::out|std::ios::trunc);
|
||||
for ( int idx = 0; idx < Capacity(); idx++ )
|
||||
if ( ! table[idx].Empty() )
|
||||
|
|
|
@ -292,7 +292,6 @@ public:
|
|||
void SetMaxOldBlocks(uint32_t count) { max_old_blocks = count; }
|
||||
|
||||
protected:
|
||||
Reassembler() { }
|
||||
|
||||
friend class DataBlockList;
|
||||
|
||||
|
@ -307,11 +306,11 @@ protected:
|
|||
DataBlockList block_list;
|
||||
DataBlockList old_block_list;
|
||||
|
||||
uint64_t last_reassem_seq;
|
||||
uint64_t trim_seq; // how far we've trimmed
|
||||
uint32_t max_old_blocks;
|
||||
uint64_t last_reassem_seq = 0;
|
||||
uint64_t trim_seq = 0; // how far we've trimmed
|
||||
uint32_t max_old_blocks = 0;
|
||||
|
||||
ReassemblerType rtype;
|
||||
ReassemblerType rtype = REASSEM_UNKNOWN;
|
||||
|
||||
static uint64_t total_size;
|
||||
static uint64_t sizes[REASSEM_NUM];
|
||||
|
|
|
@ -65,8 +65,8 @@ public:
|
|||
void Describe(ODesc* d) const;
|
||||
|
||||
protected:
|
||||
Timer() {}
|
||||
TimerType type;
|
||||
|
||||
TimerType type{};
|
||||
};
|
||||
|
||||
class TimerMgr : public iosource::IOSource {
|
||||
|
|
|
@ -22,13 +22,12 @@ public:
|
|||
void Dispatch(double t, bool is_expire) override;
|
||||
|
||||
protected:
|
||||
AnalyzerTimer() : analyzer(), timer(), do_expire() {}
|
||||
|
||||
void Init(Analyzer* analyzer, analyzer_timer_func timer, int do_expire);
|
||||
|
||||
Analyzer* analyzer;
|
||||
Analyzer* analyzer = nullptr;
|
||||
analyzer_timer_func timer;
|
||||
int do_expire;
|
||||
int do_expire = 0;
|
||||
};
|
||||
|
||||
} // namespace zeek::analyzer
|
||||
|
|
|
@ -82,7 +82,6 @@ public:
|
|||
{ return seq + length <= seq_to_skip; }
|
||||
|
||||
private:
|
||||
TCP_Reassembler() { }
|
||||
|
||||
void Undelivered(uint64_t up_to_seq) override;
|
||||
void Gap(uint64_t seq, uint64_t len);
|
||||
|
|
|
@ -12,15 +12,6 @@ FileReassembler::FileReassembler(File *f, uint64_t starting_offset)
|
|||
{
|
||||
}
|
||||
|
||||
FileReassembler::FileReassembler()
|
||||
: zeek::Reassembler(), the_file(nullptr), flushing(false)
|
||||
{
|
||||
}
|
||||
|
||||
FileReassembler::~FileReassembler()
|
||||
{
|
||||
}
|
||||
|
||||
uint64_t FileReassembler::Flush()
|
||||
{
|
||||
if ( flushing )
|
||||
|
|
|
@ -15,7 +15,7 @@ class FileReassembler final : public zeek::Reassembler {
|
|||
public:
|
||||
|
||||
FileReassembler(File* f, uint64_t starting_offset);
|
||||
~FileReassembler() override;
|
||||
~FileReassembler() override = default;
|
||||
|
||||
void Done();
|
||||
|
||||
|
@ -48,14 +48,13 @@ public:
|
|||
{ return flushing; }
|
||||
|
||||
protected:
|
||||
FileReassembler();
|
||||
|
||||
void Undelivered(uint64_t up_to_seq) override;
|
||||
void BlockInserted(zeek::DataBlockMap::const_iterator it) override;
|
||||
void Overlap(const u_char* b1, const u_char* b2, uint64_t n) override;
|
||||
|
||||
File* the_file;
|
||||
bool flushing;
|
||||
File* the_file = nullptr;
|
||||
bool flushing = false;
|
||||
};
|
||||
|
||||
} // namespace analyzer::*
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue