mirror of
https://github.com/zeek/zeek.git
synced 2025-10-02 06:38:20 +00:00
Merge remote-tracking branch 'origin/topic/christian/rehash-comphash'
* origin/topic/christian/rehash-comphash: Add btests for new functionality Remove unused HashKey constructor and reorder for consistency Refactor CompHash class to use new HashKey buffering features Add debug string and ODesc support to HashKey class Refactor HashKey class to support read/write operations Add unit tests for memory helpers Add memory sizing/alignment helpers to util.cc/h Ensure table/set HashKey buffer reservation and writes happen in same order
This commit is contained in:
commit
5c0669d9a4
18 changed files with 1404 additions and 940 deletions
56
CHANGES
56
CHANGES
|
@ -1,3 +1,59 @@
|
|||
4.2.0-dev.189 | 2021-09-21 07:45:11 -0700
|
||||
|
||||
* Add btests for new functionality (Christian Kreibich, Corelight)
|
||||
|
||||
- Expand language.set to cover sets of sets
|
||||
- Expand language.table to cover tables indexed with tables
|
||||
- Add language.table-nested-set-ordering to capture the reproducer from GHI-1753
|
||||
|
||||
* Remove unused HashKey constructor and reorder for consistency (Christian Kreibich, Corelight)
|
||||
|
||||
One of the HashKey constructors was only used in the old CompHash code.
|
||||
This aso reorders some constructors and the destructor for readability.
|
||||
|
||||
* Refactor CompHash class to use new HashKey buffering features (Christian Kreibich, Corelight)
|
||||
|
||||
This preserves the previous hash key buffer layout (so the testsuite still
|
||||
passes) and overall approach but gets rid of the codepath for writing singleton
|
||||
serializations. This code path required a fourth switch block over all types
|
||||
(besides reads, writes, and size computation) and was inconsistent with the one
|
||||
for writing non-atomic types.
|
||||
|
||||
* Add debug string and ODesc support to HashKey class (Christian Kreibich, Corelight)
|
||||
|
||||
This allows tracing of hash key buffer reservations, reads, and writes via a new
|
||||
debug stream, and supports printing a summary of a HashKey object via
|
||||
Describe(). The latter comes in handy e.g. in TableVal::Describe() (where
|
||||
including the hash key is now available but commented out).
|
||||
|
||||
* Refactor HashKey class to support read/write operations (Christian Kreibich, Corelight)
|
||||
|
||||
This preserves the optimization of storing values directly in the key_u member
|
||||
union when feasible, and using a variable size buffer otherwise. It also adds
|
||||
bounds-checking for that buffer, moves size arguments to size_t, decouples
|
||||
construction from hash computation, emulates the tagging feature found in
|
||||
SerializationFormat to assist troubleshooting, and switches feasible
|
||||
reinterpret_casts to static_casts.
|
||||
|
||||
* Add unit tests for memory helpers (Christian Kreibich, Corelight)
|
||||
|
||||
* Add memory sizing/alignment helpers to util.cc/h (Christian Kreibich, Corelight)
|
||||
|
||||
This functionality previously lived in the CompHash class, with one difference:
|
||||
this removes a discrepancy between the offset aligner and the memory pointer
|
||||
aligner/padder. The size aligner used to align the provided offset and then add an
|
||||
additional alignment size (for example, 1 aligned to 4 wouldn't yield 4 but 8).
|
||||
Like the memory aligners it now only rounds up as needed.
|
||||
|
||||
Includes unit tests.
|
||||
|
||||
* Ensure table/set HashKey buffer reservation and writes happen in same order (Christian Kreibich, Corelight)
|
||||
|
||||
This takes the existing sorting for table index hashkeys we had in place during
|
||||
hash key writes and applies it also during buffer size reservation. It changes
|
||||
the approach slightly: the underlying map now points to the TableVal entry index
|
||||
vals directly, rather than to the numerical index into an additional list that
|
||||
gets built up to store those indexes. Doing so removes the need for that list.
|
||||
|
||||
4.2.0-dev.179 | 2021-09-20 11:20:50 +0200
|
||||
|
||||
|
|
2
VERSION
2
VERSION
|
@ -1 +1 @@
|
|||
4.2.0-dev.179
|
||||
4.2.0-dev.189
|
||||
|
|
1372
src/CompHash.cc
1372
src/CompHash.cc
File diff suppressed because it is too large
Load diff
|
@ -24,7 +24,6 @@ class CompositeHash
|
|||
{
|
||||
public:
|
||||
explicit CompositeHash(TypeListPtr composite_type);
|
||||
~CompositeHash();
|
||||
|
||||
// Compute the hash corresponding to the given index val,
|
||||
// or nullptr if it fails to typecheck.
|
||||
|
@ -37,65 +36,33 @@ public:
|
|||
"GHI-572.")]] unsigned int
|
||||
MemoryAllocation() const
|
||||
{
|
||||
return padded_sizeof(*this) + util::pad_size(size);
|
||||
return padded_sizeof(*this);
|
||||
}
|
||||
|
||||
protected:
|
||||
std::unique_ptr<HashKey> ComputeSingletonHash(const Val* v, bool type_check) const;
|
||||
|
||||
// Computes the piece of the hash for Val*, returning the new kp.
|
||||
// Used as a helper for ComputeHash in the non-singleton case.
|
||||
char* SingleValHash(bool type_check, char* kp, Type* bt, Val* v, bool optional) const;
|
||||
bool SingleValHash(HashKey& hk, const Val* v, Type* bt, bool type_check, bool optional,
|
||||
bool singleton) const;
|
||||
|
||||
// Recovers just one Val of possibly many; called from RecoverVals.
|
||||
// Upon return, pval will point to the recovered Val of type t.
|
||||
// Returns and updated kp for the next Val. Calls reporter->InternalError()
|
||||
// upon errors, so there is no return value for invalid input.
|
||||
const char* RecoverOneVal(const HashKey& k, const char* kp, const char* const k_end, Type* t,
|
||||
ValPtr* pval, bool optional) const;
|
||||
|
||||
// Rounds the given pointer up to the nearest multiple of the
|
||||
// given size, if not already a multiple.
|
||||
const void* Align(const char* ptr, unsigned int size) const;
|
||||
|
||||
// Rounds the given pointer up to the nearest multiple of the
|
||||
// given size, padding the skipped region with 0 bytes.
|
||||
void* AlignAndPad(char* ptr, unsigned int size) const;
|
||||
|
||||
// Returns offset+size rounded up so it can correctly align data
|
||||
// of the given size.
|
||||
int SizeAlign(int offset, unsigned int size) const;
|
||||
|
||||
template <class T> T* AlignAndPadType(char* ptr) const
|
||||
{
|
||||
return reinterpret_cast<T*>(AlignAndPad(ptr, sizeof(T)));
|
||||
}
|
||||
|
||||
template <class T> const T* AlignType(const char* ptr) const
|
||||
{
|
||||
return reinterpret_cast<const T*>(Align(ptr, sizeof(T)));
|
||||
}
|
||||
|
||||
template <class T> int SizeAlignType(int offset) const { return SizeAlign(offset, sizeof(T)); }
|
||||
bool RecoverOneVal(const HashKey& k, Type* t, ValPtr* pval, bool optional,
|
||||
bool singleton) const;
|
||||
|
||||
// Compute the size of the composite key. If v is non-nil then
|
||||
// the value is computed for the particular list of values.
|
||||
// Returns 0 if the key has an indeterminant size (if v not given),
|
||||
// or if v doesn't match the index type (if given).
|
||||
int ComputeKeySize(const Val* v, bool type_check, bool calc_static_size) const;
|
||||
bool ReserveKeySize(HashKey& hk, const Val* v, bool type_check, bool calc_static_size) const;
|
||||
|
||||
int SingleTypeKeySize(Type*, const Val*, bool type_check, int sz, bool optional,
|
||||
bool calc_static_size) const;
|
||||
bool ReserveSingleTypeKeySize(HashKey& hk, Type*, const Val* v, bool type_check, bool optional,
|
||||
bool calc_static_size, bool singleton) const;
|
||||
|
||||
bool EnsureTypeReserve(HashKey& hk, const Val* v, Type* bt, bool type_check) const;
|
||||
|
||||
TypeListPtr type;
|
||||
char* key; // space for composite key
|
||||
int size;
|
||||
bool is_singleton; // if just one type in index
|
||||
|
||||
// If one type, but not normal "singleton", e.g. record.
|
||||
bool is_complex_type;
|
||||
|
||||
InternalTypeTag singleton_tag;
|
||||
bool is_singleton = false; // if just one type in index
|
||||
};
|
||||
|
||||
} // namespace zeek::detail
|
||||
|
|
|
@ -21,7 +21,9 @@ DebugLogger::Stream DebugLogger::streams[NUM_DBGS] = {
|
|||
{"packet_analysis", 0, false}, {"file_analysis", 0, false}, {"tm", 0, false},
|
||||
{"logging", 0, false}, {"input", 0, false}, {"threading", 0, false},
|
||||
{"plugins", 0, false}, {"zeekygen", 0, false}, {"pktio", 0, false},
|
||||
{"broker", 0, false}, {"scripts", 0, false}, {"supervisor", 0, false}};
|
||||
{"broker", 0, false}, {"scripts", 0, false}, {"supervisor", 0, false},
|
||||
{"hashkey", 0, false},
|
||||
};
|
||||
|
||||
DebugLogger::DebugLogger()
|
||||
{
|
||||
|
|
|
@ -54,6 +54,7 @@ enum DebugStream
|
|||
DBG_BROKER, // Broker communication
|
||||
DBG_SCRIPTS, // Script initialization
|
||||
DBG_SUPERVISOR, // Process supervisor
|
||||
DBG_HASHKEY, // HashKey buffers
|
||||
|
||||
NUM_DBGS // Has to be last
|
||||
};
|
||||
|
|
444
src/Hash.cc
444
src/Hash.cc
|
@ -6,6 +6,8 @@
|
|||
#include <highwayhash/instruction_sets.h>
|
||||
#include <highwayhash/sip_hash.h>
|
||||
|
||||
#include "zeek/DebugLogger.h"
|
||||
#include "zeek/Desc.h"
|
||||
#include "zeek/Reporter.h"
|
||||
#include "zeek/Val.h" // needed for const.bif
|
||||
#include "zeek/ZeekString.h"
|
||||
|
@ -64,39 +66,39 @@ void KeyedHash::InitOptions()
|
|||
|
||||
hash64_t KeyedHash::Hash64(const void* bytes, uint64_t size)
|
||||
{
|
||||
return highwayhash::SipHash(shared_siphash_key, reinterpret_cast<const char*>(bytes), size);
|
||||
return highwayhash::SipHash(shared_siphash_key, static_cast<const char*>(bytes), size);
|
||||
}
|
||||
|
||||
void KeyedHash::Hash128(const void* bytes, uint64_t size, hash128_t* result)
|
||||
{
|
||||
highwayhash::InstructionSets::Run<highwayhash::HighwayHash>(
|
||||
shared_highwayhash_key, reinterpret_cast<const char*>(bytes), size, result);
|
||||
shared_highwayhash_key, static_cast<const char*>(bytes), size, result);
|
||||
}
|
||||
|
||||
void KeyedHash::Hash256(const void* bytes, uint64_t size, hash256_t* result)
|
||||
{
|
||||
highwayhash::InstructionSets::Run<highwayhash::HighwayHash>(
|
||||
shared_highwayhash_key, reinterpret_cast<const char*>(bytes), size, result);
|
||||
shared_highwayhash_key, static_cast<const char*>(bytes), size, result);
|
||||
}
|
||||
|
||||
hash64_t KeyedHash::StaticHash64(const void* bytes, uint64_t size)
|
||||
{
|
||||
hash64_t result = 0;
|
||||
highwayhash::InstructionSets::Run<highwayhash::HighwayHash>(
|
||||
cluster_highwayhash_key, reinterpret_cast<const char*>(bytes), size, &result);
|
||||
cluster_highwayhash_key, static_cast<const char*>(bytes), size, &result);
|
||||
return result;
|
||||
}
|
||||
|
||||
void KeyedHash::StaticHash128(const void* bytes, uint64_t size, hash128_t* result)
|
||||
{
|
||||
highwayhash::InstructionSets::Run<highwayhash::HighwayHash>(
|
||||
cluster_highwayhash_key, reinterpret_cast<const char*>(bytes), size, result);
|
||||
cluster_highwayhash_key, static_cast<const char*>(bytes), size, result);
|
||||
}
|
||||
|
||||
void KeyedHash::StaticHash256(const void* bytes, uint64_t size, hash256_t* result)
|
||||
{
|
||||
highwayhash::InstructionSets::Run<highwayhash::HighwayHash>(
|
||||
cluster_highwayhash_key, reinterpret_cast<const char*>(bytes), size, result);
|
||||
cluster_highwayhash_key, static_cast<const char*>(bytes), size, result);
|
||||
}
|
||||
|
||||
void init_hash_function()
|
||||
|
@ -106,109 +108,91 @@ void init_hash_function()
|
|||
reporter->InternalError("Zeek's hash functions aren't fully initialized");
|
||||
}
|
||||
|
||||
HashKey::HashKey(bro_int_t i)
|
||||
HashKey::HashKey(bool b)
|
||||
{
|
||||
key_u.i = i;
|
||||
key = (void*)&key_u;
|
||||
size = sizeof(i);
|
||||
hash = HashBytes(key, size);
|
||||
Set(b);
|
||||
}
|
||||
|
||||
HashKey::HashKey(bro_uint_t u)
|
||||
HashKey::HashKey(int i)
|
||||
{
|
||||
key_u.i = bro_int_t(u);
|
||||
key = (void*)&key_u;
|
||||
size = sizeof(u);
|
||||
hash = HashBytes(key, size);
|
||||
Set(i);
|
||||
}
|
||||
|
||||
HashKey::HashKey(bro_int_t bi)
|
||||
{
|
||||
Set(bi);
|
||||
}
|
||||
|
||||
HashKey::HashKey(bro_uint_t bu)
|
||||
{
|
||||
Set(bu);
|
||||
}
|
||||
|
||||
HashKey::HashKey(uint32_t u)
|
||||
{
|
||||
key_u.u32 = u;
|
||||
key = (void*)&key_u;
|
||||
size = sizeof(u);
|
||||
hash = HashBytes(key, size);
|
||||
Set(u);
|
||||
}
|
||||
|
||||
HashKey::HashKey(const uint32_t u[], int n)
|
||||
HashKey::HashKey(const uint32_t u[], size_t n)
|
||||
{
|
||||
size = n * sizeof(u[0]);
|
||||
key = (void*)u;
|
||||
hash = HashBytes(key, size);
|
||||
size = write_size = n * sizeof(u[0]);
|
||||
key = (char*)u;
|
||||
}
|
||||
|
||||
HashKey::HashKey(double d)
|
||||
{
|
||||
union {
|
||||
double d;
|
||||
int i[2];
|
||||
} u;
|
||||
|
||||
key_u.d = u.d = d;
|
||||
key = (void*)&key_u;
|
||||
size = sizeof(d);
|
||||
hash = HashBytes(key, size);
|
||||
Set(d);
|
||||
}
|
||||
|
||||
HashKey::HashKey(const void* p)
|
||||
{
|
||||
key_u.p = p;
|
||||
key = (void*)&key_u;
|
||||
size = sizeof(p);
|
||||
hash = HashBytes(key, size);
|
||||
Set(p);
|
||||
}
|
||||
|
||||
HashKey::HashKey(const char* s)
|
||||
{
|
||||
size = strlen(s); // note - skip final \0
|
||||
key = (void*)s;
|
||||
hash = HashBytes(key, size);
|
||||
size = write_size = strlen(s); // note - skip final \0
|
||||
key = (char*)s;
|
||||
}
|
||||
|
||||
HashKey::HashKey(const String* s)
|
||||
{
|
||||
size = s->Len();
|
||||
key = (void*)s->Bytes();
|
||||
hash = HashBytes(key, size);
|
||||
size = write_size = s->Len();
|
||||
key = (char*)s->Bytes();
|
||||
}
|
||||
|
||||
HashKey::HashKey(int copy_key, void* arg_key, int arg_size)
|
||||
HashKey::HashKey(const void* bytes, size_t arg_size)
|
||||
{
|
||||
size = arg_size;
|
||||
size = write_size = arg_size;
|
||||
key = CopyKey((char*)bytes, size);
|
||||
is_our_dynamic = true;
|
||||
|
||||
if ( copy_key )
|
||||
{
|
||||
key = (void*)new char[size];
|
||||
memcpy(key, arg_key, size);
|
||||
}
|
||||
else
|
||||
key = arg_key;
|
||||
|
||||
hash = HashBytes(key, size);
|
||||
}
|
||||
|
||||
HashKey::HashKey(const void* arg_key, int arg_size, hash_t arg_hash)
|
||||
HashKey::HashKey(const void* arg_key, size_t arg_size, hash_t arg_hash)
|
||||
{
|
||||
size = arg_size;
|
||||
size = write_size = arg_size;
|
||||
hash = arg_hash;
|
||||
key = CopyKey(arg_key, size);
|
||||
key = CopyKey((char*)arg_key, size);
|
||||
is_our_dynamic = true;
|
||||
}
|
||||
|
||||
HashKey::HashKey(const void* arg_key, int arg_size, hash_t arg_hash, bool /* dont_copy */)
|
||||
HashKey::HashKey(const void* arg_key, size_t arg_size, hash_t arg_hash, bool /* dont_copy */)
|
||||
{
|
||||
size = arg_size;
|
||||
size = write_size = arg_size;
|
||||
hash = arg_hash;
|
||||
key = const_cast<void*>(arg_key);
|
||||
key = (char*)arg_key;
|
||||
}
|
||||
|
||||
HashKey::HashKey(const void* bytes, int arg_size)
|
||||
hash_t HashKey::Hash() const
|
||||
{
|
||||
size = arg_size;
|
||||
key = CopyKey(bytes, size);
|
||||
if ( hash == 0 )
|
||||
hash = HashBytes(key, size);
|
||||
is_our_dynamic = true;
|
||||
#ifdef DEBUG
|
||||
ODesc d;
|
||||
Describe(&d);
|
||||
DBG_LOG(DBG_HASHKEY, "HashKey %p %s", this, d.Description());
|
||||
#endif
|
||||
return hash;
|
||||
}
|
||||
|
||||
void* HashKey::TakeKey()
|
||||
|
@ -222,16 +206,340 @@ void* HashKey::TakeKey()
|
|||
return CopyKey(key, size);
|
||||
}
|
||||
|
||||
void* HashKey::CopyKey(const void* k, int s) const
|
||||
void HashKey::Describe(ODesc* d) const
|
||||
{
|
||||
void* k_copy = (void*)new char[s];
|
||||
char buf[64];
|
||||
snprintf(buf, 16, "%0" PRIx64, hash);
|
||||
d->Add(buf);
|
||||
d->SP();
|
||||
|
||||
if ( size > 0 )
|
||||
{
|
||||
d->Add(IsAllocated() ? "(" : "[");
|
||||
|
||||
for ( size_t i = 0; i < size; i++ )
|
||||
{
|
||||
if ( i > 0 )
|
||||
{
|
||||
d->SP();
|
||||
// Extra spacing every 8 bytes, for readability.
|
||||
if ( i % 8 == 0 )
|
||||
d->SP();
|
||||
}
|
||||
|
||||
// Don't display unwritten content, only say how much there is.
|
||||
if ( i > write_size )
|
||||
{
|
||||
d->Add("<+");
|
||||
d->Add(static_cast<uint64_t>(size - write_size - 1));
|
||||
d->Add(" of ");
|
||||
d->Add(static_cast<uint64_t>(size));
|
||||
d->Add(" available>");
|
||||
break;
|
||||
}
|
||||
|
||||
snprintf(buf, 3, "%02x", key[i]);
|
||||
d->Add(buf);
|
||||
}
|
||||
|
||||
d->Add(IsAllocated() ? ")" : "]");
|
||||
}
|
||||
}
|
||||
|
||||
char* HashKey::CopyKey(const char* k, size_t s) const
|
||||
{
|
||||
char* k_copy = new char[s]; // s == 0 is okay, returns non-nil
|
||||
memcpy(k_copy, k, s);
|
||||
return k_copy;
|
||||
}
|
||||
|
||||
hash_t HashKey::HashBytes(const void* bytes, int size)
|
||||
hash_t HashKey::HashBytes(const void* bytes, size_t size)
|
||||
{
|
||||
return KeyedHash::Hash64(bytes, size);
|
||||
}
|
||||
|
||||
void HashKey::Set(bool b)
|
||||
{
|
||||
key_u.b = b;
|
||||
key = reinterpret_cast<char*>(&key_u);
|
||||
size = write_size = sizeof(b);
|
||||
}
|
||||
|
||||
void HashKey::Set(int i)
|
||||
{
|
||||
key_u.i = i;
|
||||
key = reinterpret_cast<char*>(&key_u);
|
||||
size = write_size = sizeof(i);
|
||||
}
|
||||
|
||||
void HashKey::Set(bro_int_t bi)
|
||||
{
|
||||
key_u.bi = bi;
|
||||
key = reinterpret_cast<char*>(&key_u);
|
||||
size = write_size = sizeof(bi);
|
||||
}
|
||||
|
||||
void HashKey::Set(bro_uint_t bu)
|
||||
{
|
||||
key_u.bi = bro_int_t(bu);
|
||||
key = reinterpret_cast<char*>(&key_u);
|
||||
size = write_size = sizeof(bu);
|
||||
}
|
||||
|
||||
void HashKey::Set(uint32_t u)
|
||||
{
|
||||
key_u.u32 = u;
|
||||
key = reinterpret_cast<char*>(&key_u);
|
||||
size = write_size = sizeof(u);
|
||||
}
|
||||
|
||||
void HashKey::Set(double d)
|
||||
{
|
||||
key_u.d = d;
|
||||
key = reinterpret_cast<char*>(&key_u);
|
||||
size = write_size = sizeof(d);
|
||||
}
|
||||
|
||||
void HashKey::Set(const void* p)
|
||||
{
|
||||
key_u.p = p;
|
||||
key = reinterpret_cast<char*>(&key_u);
|
||||
size = write_size = sizeof(p);
|
||||
}
|
||||
|
||||
void HashKey::Reserve(const char* tag, size_t addl_size, size_t alignment)
|
||||
{
|
||||
ASSERT(! IsAllocated());
|
||||
size_t s0 = size;
|
||||
size_t s1 = util::memory_size_align(size, alignment);
|
||||
size = s1 + addl_size;
|
||||
|
||||
DBG_LOG(DBG_HASHKEY, "HashKey %p reserving %lu/%lu: %lu -> %lu -> %lu [%s]", this, addl_size,
|
||||
alignment, s0, s1, size, tag);
|
||||
}
|
||||
|
||||
void HashKey::Allocate()
|
||||
{
|
||||
if ( key != nullptr and key != reinterpret_cast<char*>(&key_u) )
|
||||
{
|
||||
reporter->InternalWarning("usage error in HashKey::Allocate(): already allocated");
|
||||
return;
|
||||
}
|
||||
|
||||
is_our_dynamic = true;
|
||||
key = reinterpret_cast<char*>(new double[size / sizeof(double) + 1]);
|
||||
|
||||
read_size = 0;
|
||||
write_size = 0;
|
||||
}
|
||||
|
||||
void HashKey::Write(const char* tag, bool b)
|
||||
{
|
||||
Write(tag, &b, sizeof(b), 0);
|
||||
}
|
||||
|
||||
void HashKey::Write(const char* tag, int i, bool align)
|
||||
{
|
||||
if ( ! IsAllocated() )
|
||||
{
|
||||
Set(i);
|
||||
return;
|
||||
}
|
||||
|
||||
Write(tag, &i, sizeof(i), align ? sizeof(i) : 0);
|
||||
}
|
||||
|
||||
void HashKey::Write(const char* tag, bro_int_t bi, bool align)
|
||||
{
|
||||
if ( ! IsAllocated() )
|
||||
{
|
||||
Set(bi);
|
||||
return;
|
||||
}
|
||||
|
||||
Write(tag, &bi, sizeof(bi), align ? sizeof(bi) : 0);
|
||||
}
|
||||
|
||||
void HashKey::Write(const char* tag, bro_uint_t bu, bool align)
|
||||
{
|
||||
if ( ! IsAllocated() )
|
||||
{
|
||||
Set(bu);
|
||||
return;
|
||||
}
|
||||
|
||||
Write(tag, &bu, sizeof(bu), align ? sizeof(bu) : 0);
|
||||
}
|
||||
|
||||
void HashKey::Write(const char* tag, uint32_t u, bool align)
|
||||
{
|
||||
if ( ! IsAllocated() )
|
||||
{
|
||||
Set(u);
|
||||
return;
|
||||
}
|
||||
|
||||
Write(tag, &u, sizeof(u), align ? sizeof(u) : 0);
|
||||
}
|
||||
|
||||
void HashKey::Write(const char* tag, double d, bool align)
|
||||
{
|
||||
if ( ! IsAllocated() )
|
||||
{
|
||||
Set(d);
|
||||
return;
|
||||
}
|
||||
|
||||
Write(tag, &d, sizeof(d), align ? sizeof(d) : 0);
|
||||
}
|
||||
|
||||
void HashKey::Write(const char* tag, const void* bytes, size_t n, size_t alignment)
|
||||
{
|
||||
size_t s0 = write_size;
|
||||
AlignWrite(alignment);
|
||||
size_t s1 = write_size;
|
||||
EnsureWriteSpace(n);
|
||||
|
||||
memcpy(key + write_size, bytes, n);
|
||||
write_size += n;
|
||||
|
||||
DBG_LOG(DBG_HASHKEY, "HashKey %p writing %lu/%lu: %lu -> %lu -> %lu [%s]", this, n, alignment,
|
||||
s0, s1, write_size, tag);
|
||||
}
|
||||
|
||||
void HashKey::SkipWrite(const char* tag, size_t n)
|
||||
{
|
||||
DBG_LOG(DBG_HASHKEY, "HashKey %p skip-writing %lu: %lu -> %lu [%s]", this, n, write_size,
|
||||
write_size + n, tag);
|
||||
|
||||
EnsureWriteSpace(n);
|
||||
write_size += n;
|
||||
}
|
||||
|
||||
void HashKey::AlignWrite(size_t alignment)
|
||||
{
|
||||
ASSERT(IsAllocated());
|
||||
|
||||
if ( alignment == 0 )
|
||||
return;
|
||||
|
||||
size_t old_size = write_size;
|
||||
|
||||
write_size = util::memory_size_align(write_size, alignment);
|
||||
|
||||
if ( write_size > size )
|
||||
reporter->InternalError("buffer overflow in HashKey::AlignWrite(): "
|
||||
"after alignment, %lu bytes used of %lu allocated",
|
||||
write_size, size);
|
||||
|
||||
while ( old_size < write_size )
|
||||
key[old_size++] = '\0';
|
||||
}
|
||||
|
||||
void HashKey::AlignRead(size_t alignment) const
|
||||
{
|
||||
ASSERT(IsAllocated());
|
||||
|
||||
if ( alignment == 0 )
|
||||
return;
|
||||
|
||||
int old_size = read_size;
|
||||
|
||||
read_size = util::memory_size_align(read_size, alignment);
|
||||
|
||||
if ( read_size > size )
|
||||
reporter->InternalError("buffer overflow in HashKey::AlignRead(): "
|
||||
"after alignment, %lu bytes used of %lu allocated",
|
||||
read_size, size);
|
||||
}
|
||||
|
||||
void HashKey::Read(const char* tag, bool& b) const
|
||||
{
|
||||
Read(tag, &b, sizeof(b), 0);
|
||||
}
|
||||
|
||||
void HashKey::Read(const char* tag, int& i, bool align) const
|
||||
{
|
||||
Read(tag, &i, sizeof(i), align ? sizeof(i) : 0);
|
||||
}
|
||||
|
||||
void HashKey::Read(const char* tag, bro_int_t& i, bool align) const
|
||||
{
|
||||
Read(tag, &i, sizeof(i), align ? sizeof(i) : 0);
|
||||
}
|
||||
|
||||
void HashKey::Read(const char* tag, bro_uint_t& u, bool align) const
|
||||
{
|
||||
Read(tag, &u, sizeof(u), align ? sizeof(u) : 0);
|
||||
}
|
||||
|
||||
void HashKey::Read(const char* tag, uint32_t& u, bool align) const
|
||||
{
|
||||
Read(tag, &u, sizeof(u), align ? sizeof(u) : 0);
|
||||
}
|
||||
|
||||
void HashKey::Read(const char* tag, double& d, bool align) const
|
||||
{
|
||||
Read(tag, &d, sizeof(d), align ? sizeof(d) : 0);
|
||||
}
|
||||
|
||||
void HashKey::Read(const char* tag, void* out, size_t n, size_t alignment) const
|
||||
{
|
||||
size_t s0 = read_size;
|
||||
AlignRead(alignment);
|
||||
size_t s1 = read_size;
|
||||
EnsureReadSpace(n);
|
||||
|
||||
// In case out is nil, make sure nothing is to be read, and only memcpy
|
||||
// when there is a non-zero amount. Memory checkers don't nullpointers
|
||||
// in memcpy even if the size is 0.
|
||||
ASSERT(out != nullptr || (out == nullptr && n == 0));
|
||||
|
||||
if ( n > 0 )
|
||||
{
|
||||
memcpy(out, key + read_size, n);
|
||||
read_size += n;
|
||||
}
|
||||
|
||||
DBG_LOG(DBG_HASHKEY, "HashKey %p reading %lu/%lu: %lu -> %lu -> %lu [%s]", this, n, alignment,
|
||||
s0, s1, read_size, tag);
|
||||
}
|
||||
|
||||
void HashKey::SkipRead(const char* tag, size_t n) const
|
||||
{
|
||||
DBG_LOG(DBG_HASHKEY, "HashKey %p skip-reading %lu: %lu -> %lu [%s]", this, n, read_size,
|
||||
read_size + n, tag);
|
||||
|
||||
EnsureReadSpace(n);
|
||||
read_size += n;
|
||||
}
|
||||
|
||||
void HashKey::EnsureWriteSpace(size_t n) const
|
||||
{
|
||||
if ( n == 0 )
|
||||
return;
|
||||
|
||||
if ( ! IsAllocated() )
|
||||
reporter->InternalError("usage error in HashKey::EnsureWriteSpace(): "
|
||||
"size-checking unreserved buffer");
|
||||
if ( write_size + n > size )
|
||||
reporter->InternalError("buffer overflow in HashKey::Write(): writing %lu "
|
||||
"bytes with %lu remaining",
|
||||
n, size - write_size);
|
||||
}
|
||||
|
||||
void HashKey::EnsureReadSpace(size_t n) const
|
||||
{
|
||||
if ( n == 0 )
|
||||
return;
|
||||
|
||||
if ( ! IsAllocated() )
|
||||
reporter->InternalError("usage error in HashKey::EnsureReadSpace(): "
|
||||
"size-checking unreserved buffer");
|
||||
if ( read_size + n > size )
|
||||
reporter->InternalError("buffer overflow in HashKey::EnsureReadSpace(): reading %lu "
|
||||
"bytes with %lu remaining",
|
||||
n, size - read_size);
|
||||
}
|
||||
|
||||
} // namespace zeek::detail
|
||||
|
|
154
src/Hash.h
154
src/Hash.h
|
@ -1,7 +1,7 @@
|
|||
// See the file "COPYING" in the main distribution directory for copyright.
|
||||
|
||||
/***
|
||||
* This file contains functions to generate hashes used keyed hash functions.
|
||||
* This file contains functions to generate hashes using keyed hash functions.
|
||||
* Keyed hash functions make it difficult/impossible to find information about the
|
||||
* output of a hash when the key is unknown to the attacker. This fact holds, even
|
||||
* when the input value is known.
|
||||
|
@ -10,8 +10,7 @@
|
|||
* It is important that these hashes are not easily guessable to prevent complexity attacks.
|
||||
*
|
||||
* The HashKey class is the actual class that is used to generate Hash keys that are used
|
||||
* internally, e.g. for lookups in hash-tables; the Hashes are also used for connection ID
|
||||
* generation.
|
||||
* internally, e.g. for lookups in hash-tables and connection ID generation.
|
||||
*
|
||||
* This means that the hashes created by most functions in this file will be different each run,
|
||||
* unless a seed file is used. There are a few functions that create hashes that are static over
|
||||
|
@ -29,8 +28,12 @@
|
|||
|
||||
namespace zeek
|
||||
{
|
||||
|
||||
class String;
|
||||
class ODesc;
|
||||
|
||||
}
|
||||
|
||||
namespace zeek::detail
|
||||
{
|
||||
|
||||
|
@ -228,41 +231,35 @@ constexpr int NUM_HASH_KEYS = HASH_KEY_STRING + 1;
|
|||
class HashKey
|
||||
{
|
||||
public:
|
||||
explicit HashKey(bro_int_t i);
|
||||
explicit HashKey(bro_uint_t u);
|
||||
explicit HashKey() { }
|
||||
explicit HashKey(bool b);
|
||||
explicit HashKey(int i);
|
||||
explicit HashKey(bro_int_t bi);
|
||||
explicit HashKey(bro_uint_t bu);
|
||||
explicit HashKey(uint32_t u);
|
||||
HashKey(const uint32_t u[], int n);
|
||||
HashKey(const uint32_t u[], size_t n);
|
||||
explicit HashKey(double d);
|
||||
explicit HashKey(const void* p);
|
||||
explicit HashKey(const char* s);
|
||||
explicit HashKey(const String* s);
|
||||
~HashKey()
|
||||
{
|
||||
if ( is_our_dynamic )
|
||||
delete[](char*) key;
|
||||
}
|
||||
explicit HashKey(const char* s); // No copying, no ownership
|
||||
explicit HashKey(const String* s); // No copying, no ownership
|
||||
|
||||
// Create a HashKey given all of its components. "key" is assumed
|
||||
// to be dynamically allocated and to now belong to this HashKey
|
||||
// (to delete upon destruct'ing). If "copy_key" is true, it's
|
||||
// first copied.
|
||||
//
|
||||
// The calling sequence here is unusual (normally key would be
|
||||
// first) to avoid possible ambiguities with the next constructor,
|
||||
// which is the more commonly used one.
|
||||
HashKey(int copy_key, void* key, int size);
|
||||
// Builds a key from the given chunk of bytes. Copies the data.
|
||||
HashKey(const void* bytes, size_t size);
|
||||
|
||||
// Same, but automatically copies the key.
|
||||
HashKey(const void* key, int size, hash_t hash);
|
||||
|
||||
// Builds a key from the given chunk of bytes.
|
||||
HashKey(const void* bytes, int size);
|
||||
// Create a HashKey given all of its components. Copies the key.
|
||||
HashKey(const void* key, size_t size, hash_t hash);
|
||||
|
||||
// Create a Hashkey given all of its components *without*
|
||||
// copying the key and *without* taking ownership. Note that
|
||||
// "dont_copy" is a type placeholder to differentiate this member
|
||||
// function from the one above; its value is not used.
|
||||
HashKey(const void* key, int size, hash_t hash, bool dont_copy);
|
||||
HashKey(const void* key, size_t size, hash_t hash, bool dont_copy);
|
||||
|
||||
~HashKey()
|
||||
{
|
||||
if ( is_our_dynamic )
|
||||
delete[](char*) key;
|
||||
}
|
||||
|
||||
// Hands over the key to the caller. This means that if the
|
||||
// key is our dynamic, we give it to the caller and mark it
|
||||
|
@ -271,8 +268,8 @@ public:
|
|||
void* TakeKey();
|
||||
|
||||
const void* Key() const { return key; }
|
||||
int Size() const { return size; }
|
||||
hash_t Hash() const { return hash; }
|
||||
size_t Size() const { return size; }
|
||||
hash_t Hash() const;
|
||||
|
||||
[[deprecated("Remove in v5.1. MemoryAllocation() is deprecated and will be removed. See "
|
||||
"GHI-572.")]] unsigned int
|
||||
|
@ -281,22 +278,107 @@ public:
|
|||
return padded_sizeof(*this) + util::pad_size(size);
|
||||
}
|
||||
|
||||
static hash_t HashBytes(const void* bytes, int size);
|
||||
static hash_t HashBytes(const void* bytes, size_t size);
|
||||
|
||||
// A HashKey is "allocated" when the underlying key points somewhere
|
||||
// other than our internal key_u union. This is almost like
|
||||
// is_our_dynamic, but remains true also after TakeKey().
|
||||
bool IsAllocated() const
|
||||
{
|
||||
return (key != nullptr && key != reinterpret_cast<const char*>(&key_u));
|
||||
}
|
||||
|
||||
// Buffer size reservation. Repeated calls to these methods
|
||||
// incrementally build up the eventual buffer size to be allocated via
|
||||
// Allocate().
|
||||
template <typename T> void ReserveType(const char* tag) { Reserve(tag, sizeof(T), sizeof(T)); }
|
||||
void Reserve(const char* tag, size_t addl_size, size_t alignment = 0);
|
||||
|
||||
// Allocates the reserved amount of memory
|
||||
void Allocate();
|
||||
|
||||
// Incremental writes into an allocated HashKey. The tags give context
|
||||
// to what's being written and are only used in debug-build log streams.
|
||||
// When true, the alignment boolean will cause write-marker alignment to
|
||||
// the size of the item being written, otherwise writes happen directly
|
||||
// at the current marker.
|
||||
void Write(const char* tag, bool b);
|
||||
void Write(const char* tag, int i, bool align = true);
|
||||
void Write(const char* tag, bro_int_t bi, bool align = true);
|
||||
void Write(const char* tag, bro_uint_t bu, bool align = true);
|
||||
void Write(const char* tag, uint32_t u, bool align = true);
|
||||
void Write(const char* tag, double d, bool align = true);
|
||||
|
||||
void Write(const char* tag, const void* bytes, size_t n, size_t alignment = 0);
|
||||
|
||||
// For writes that copy directly into the allocated buffer, this method
|
||||
// advances the write marker without modifying content.
|
||||
void SkipWrite(const char* tag, size_t n);
|
||||
|
||||
// Aligns the write marker to the next multiple of the given alignment size.
|
||||
void AlignWrite(size_t alignment);
|
||||
|
||||
// Bounds check: if the buffer does not have at least n bytes available
|
||||
// to write into, triggers an InternalError.
|
||||
void EnsureWriteSpace(size_t n) const;
|
||||
|
||||
// Reads don't modify our internal state except for the read offset
|
||||
// pointer. To blend in more seamlessly with the rest of Zeek we keep
|
||||
// reads a const operation.
|
||||
void ResetRead() const { read_size = 0; }
|
||||
|
||||
// Incremental reads from an allocated HashKey. As with writes, the
|
||||
// tags are only used for debug-build logging, and alignment prior
|
||||
// to the read of the item is controlled by the align boolean.
|
||||
void Read(const char* tag, bool& b) const;
|
||||
void Read(const char* tag, int& i, bool align = true) const;
|
||||
void Read(const char* tag, bro_int_t& bi, bool align = true) const;
|
||||
void Read(const char* tag, bro_uint_t& bu, bool align = true) const;
|
||||
void Read(const char* tag, uint32_t& u, bool align = true) const;
|
||||
void Read(const char* tag, double& d, bool align = true) const;
|
||||
|
||||
void Read(const char* tag, void* out, size_t n, size_t alignment = 0) const;
|
||||
|
||||
// These mirror the corresponding write methods above.
|
||||
void SkipRead(const char* tag, size_t n) const;
|
||||
void AlignRead(size_t alignment) const;
|
||||
void EnsureReadSpace(size_t n) const;
|
||||
|
||||
void* KeyAtWrite() { return static_cast<void*>(key + write_size); }
|
||||
const void* KeyAtRead() const { return static_cast<void*>(key + read_size); }
|
||||
const void* KeyEnd() const { return static_cast<void*>(key + size); }
|
||||
|
||||
void Describe(ODesc* d) const;
|
||||
|
||||
protected:
|
||||
void* CopyKey(const void* key, int size) const;
|
||||
char* CopyKey(const char* key, size_t size) const;
|
||||
|
||||
// Payload setters for types stored directoly in the key_u union. These
|
||||
// adjust the size and write_size markers to indicate a full buffer, and
|
||||
// use the key_u union for storage.
|
||||
void Set(bool b);
|
||||
void Set(int i);
|
||||
void Set(bro_int_t bi);
|
||||
void Set(bro_uint_t bu);
|
||||
void Set(uint32_t u);
|
||||
void Set(double d);
|
||||
void Set(const void* p);
|
||||
|
||||
union {
|
||||
bro_int_t i;
|
||||
bool b;
|
||||
int i;
|
||||
bro_int_t bi;
|
||||
uint32_t u32;
|
||||
double d;
|
||||
const void* p;
|
||||
} key_u;
|
||||
|
||||
void* key;
|
||||
hash_t hash;
|
||||
int size;
|
||||
char* key = nullptr;
|
||||
mutable hash_t hash = 0;
|
||||
size_t size = 0;
|
||||
bool is_our_dynamic = false;
|
||||
size_t write_size = 0;
|
||||
mutable size_t read_size = 0;
|
||||
};
|
||||
|
||||
extern void init_hash_function();
|
||||
|
|
|
@ -2395,6 +2395,9 @@ void TableVal::Describe(ODesc* d) const
|
|||
d_ptr->SP();
|
||||
}
|
||||
|
||||
// The following shows the HashKey state as well:
|
||||
// k->Describe(d_ptr);
|
||||
// d_ptr->SP();
|
||||
vl->Describe(d_ptr);
|
||||
|
||||
if ( table_type->IsSet() )
|
||||
|
|
123
src/util.cc
123
src/util.cc
|
@ -2187,6 +2187,129 @@ void safe_close(int fd)
|
|||
}
|
||||
}
|
||||
|
||||
const void* memory_align(const void* ptr, size_t size)
|
||||
{
|
||||
if ( ! size )
|
||||
return ptr;
|
||||
|
||||
ASSERT(is_power_of_2(size));
|
||||
|
||||
const char* buf = reinterpret_cast<const char*>(ptr);
|
||||
size_t mask = size - 1; // Assume size is a power of 2.
|
||||
unsigned long l_ptr = reinterpret_cast<unsigned long>(ptr);
|
||||
unsigned long offset = l_ptr & mask;
|
||||
|
||||
if ( offset > 0 )
|
||||
return reinterpret_cast<const void*>(buf - offset + size);
|
||||
else
|
||||
return reinterpret_cast<const void*>(buf);
|
||||
}
|
||||
|
||||
TEST_CASE("util memory_align")
|
||||
{
|
||||
void* p1000 = (void*)0x1000;
|
||||
void* p1001 = (void*)0x1001;
|
||||
void* p1002 = (void*)0x1002;
|
||||
void* p1003 = (void*)0x1003;
|
||||
void* p1004 = (void*)0x1004;
|
||||
|
||||
CHECK(memory_align(p1000, 0) == p1000);
|
||||
CHECK(memory_align(p1000, 1) == p1000);
|
||||
CHECK(memory_align(p1000, 2) == p1000);
|
||||
CHECK(memory_align(p1000, 4) == p1000);
|
||||
|
||||
CHECK(memory_align(p1001, 0) == p1001);
|
||||
CHECK(memory_align(p1001, 1) == p1001);
|
||||
CHECK(memory_align(p1001, 2) == p1002);
|
||||
CHECK(memory_align(p1001, 4) == p1004);
|
||||
|
||||
CHECK(memory_align(p1002, 4) == p1004);
|
||||
CHECK(memory_align(p1003, 4) == p1004);
|
||||
}
|
||||
|
||||
void* memory_align_and_pad(void* ptr, size_t size)
|
||||
{
|
||||
if ( ! size )
|
||||
return ptr;
|
||||
|
||||
ASSERT(is_power_of_2(size));
|
||||
|
||||
char* buf = reinterpret_cast<char*>(ptr);
|
||||
size_t mask = size - 1;
|
||||
while ( (reinterpret_cast<unsigned long>(buf) & mask) != 0 )
|
||||
// Not aligned - zero pad.
|
||||
*buf++ = '\0';
|
||||
|
||||
return reinterpret_cast<void*>(buf);
|
||||
}
|
||||
|
||||
TEST_CASE("util memory_align_and_pad")
|
||||
{
|
||||
unsigned char mem[16];
|
||||
|
||||
memset(mem, 0xff, 16);
|
||||
|
||||
CHECK((mem[0] == 0xff && mem[1] == 0xff));
|
||||
|
||||
CHECK(memory_align_and_pad(mem, 0) == mem);
|
||||
CHECK((mem[0] == 0xff && mem[1] == 0xff));
|
||||
|
||||
CHECK(memory_align_and_pad(mem, 2) == mem);
|
||||
CHECK((mem[0] == 0xff && mem[1] == 0xff));
|
||||
|
||||
CHECK(memory_align_and_pad(mem + 1, 2) == mem + 2);
|
||||
for ( int i = 1; i < 2; i++ )
|
||||
CHECK(mem[i] == 0x00);
|
||||
CHECK((mem[0] == 0xff && mem[2] == 0xff));
|
||||
|
||||
memset(mem, 0xff, 16);
|
||||
|
||||
CHECK(memory_align_and_pad(mem + 1, 4) == mem + 4);
|
||||
for ( int i = 1; i < 3; i++ )
|
||||
CHECK(mem[i] == 0x00);
|
||||
CHECK((mem[0] == 0xff && mem[4] == 0xff));
|
||||
|
||||
memset(mem, 0xff, 16);
|
||||
|
||||
CHECK(memory_align_and_pad(mem + 1, 8) == mem + 8);
|
||||
for ( int i = 1; i < 7; i++ )
|
||||
CHECK(mem[i] == 0x00);
|
||||
CHECK((mem[0] == 0xff && mem[8] == 0xff));
|
||||
}
|
||||
|
||||
int memory_size_align(size_t offset, size_t size)
|
||||
{
|
||||
if ( ! size || ! offset )
|
||||
return offset;
|
||||
|
||||
ASSERT(is_power_of_2(size));
|
||||
|
||||
size_t mask = size - 1; // Assume size is a power of 2.
|
||||
if ( offset & mask )
|
||||
{
|
||||
offset &= ~mask; // Round down.
|
||||
offset += size; // Round up.
|
||||
}
|
||||
|
||||
return offset;
|
||||
}
|
||||
|
||||
TEST_CASE("util memory_size_align")
|
||||
{
|
||||
CHECK(memory_size_align(0x1000, 0) == 0x1000);
|
||||
CHECK(memory_size_align(0x1000, 1) == 0x1000);
|
||||
CHECK(memory_size_align(0x1000, 2) == 0x1000);
|
||||
CHECK(memory_size_align(0x1000, 4) == 0x1000);
|
||||
|
||||
CHECK(memory_size_align(0x1001, 0) == 0x1001);
|
||||
CHECK(memory_size_align(0x1001, 1) == 0x1001);
|
||||
CHECK(memory_size_align(0x1001, 2) == 0x1002);
|
||||
CHECK(memory_size_align(0x1001, 4) == 0x1004);
|
||||
|
||||
CHECK(memory_size_align(0x1002, 4) == 0x1004);
|
||||
CHECK(memory_size_align(0x1003, 4) == 0x1004);
|
||||
}
|
||||
|
||||
void get_memory_usage(uint64_t* total, uint64_t* malloced)
|
||||
{
|
||||
uint64_t ret_total;
|
||||
|
|
18
src/util.h
18
src/util.h
|
@ -493,6 +493,24 @@ inline char* safe_strncpy(char* dest, const char* src, size_t n)
|
|||
return result;
|
||||
}
|
||||
|
||||
// Memory alignment helpers.
|
||||
|
||||
inline bool is_power_of_2(bro_uint_t x)
|
||||
{
|
||||
return ((x - 1) & x) == 0;
|
||||
}
|
||||
|
||||
// Rounds the given pointer up to the nearest multiple of the
|
||||
// given size, if not already a multiple.
|
||||
const void* memory_align(const void* ptr, size_t size);
|
||||
|
||||
// Rounds the given pointer up to the nearest multiple of the
|
||||
// given size, padding the skipped region with 0 bytes.
|
||||
void* memory_align_and_pad(void* ptr, size_t size);
|
||||
|
||||
// Returns offset rounded up so it can correctly align data of the given size.
|
||||
int memory_size_align(size_t offset, size_t size);
|
||||
|
||||
// Returns total memory allocations and (if available) amount actually
|
||||
// handed out by malloc.
|
||||
extern void get_memory_usage(uint64_t* total, uint64_t* malloced);
|
||||
|
|
|
@ -70,3 +70,8 @@ equality (FAIL)
|
|||
non-equality (PASS)
|
||||
equality (FAIL)
|
||||
magnitude (FAIL)
|
||||
nested-set-add (PASS)
|
||||
nested-set-add (PASS)
|
||||
nested-set-add (PASS)
|
||||
nested-set-del (PASS)
|
||||
nested-set-in (PASS)
|
||||
|
|
|
@ -0,0 +1,5 @@
|
|||
### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63.
|
||||
[a={
|
||||
SHA1,
|
||||
MD5
|
||||
}, b=12345678901234567890, c=<uninitialized>]
|
|
@ -47,3 +47,5 @@ remove element (PASS)
|
|||
!in operator (PASS)
|
||||
remove element (PASS)
|
||||
!in operator (PASS)
|
||||
nested table addition (PASS)
|
||||
nested table removal (PASS)
|
||||
|
|
|
@ -28,6 +28,8 @@ type r: record {
|
|||
b: set[count];
|
||||
};
|
||||
|
||||
type s: set[set[count]];
|
||||
|
||||
global foo: set[r];
|
||||
global bar = set(1,3,5);
|
||||
|
||||
|
|
|
@ -24,6 +24,7 @@ event zeek_init()
|
|||
local s6: set[port, string, bool] = set();
|
||||
local s7: set[port, string, bool];
|
||||
local s8 = set( [8/tcp, "type inference", T] );
|
||||
local s9: set[set[count]] = set();
|
||||
|
||||
# Type inference tests
|
||||
|
||||
|
@ -181,5 +182,15 @@ event zeek_init()
|
|||
test_case( "equality", a == a | set(5,11) );
|
||||
|
||||
test_case( "magnitude", |a_and_b| == |a_or_b|);
|
||||
|
||||
add s9[set(1,2,3)];
|
||||
test_case( "nested-set-add", |s9| == 1 );
|
||||
add s9[set(1,2,3)];
|
||||
test_case( "nested-set-add", |s9| == 1 );
|
||||
add s9[set(2,3,4)];
|
||||
test_case( "nested-set-add", |s9| == 2 );
|
||||
delete s9[set(1,2,3)];
|
||||
test_case( "nested-set-del", |s9| == 1 );
|
||||
test_case( "nested-set-in", set(2,3,4) in s9 );
|
||||
}
|
||||
|
||||
|
|
25
testing/btest/language/table-nested-set-ordering.zeek
Normal file
25
testing/btest/language/table-nested-set-ordering.zeek
Normal file
|
@ -0,0 +1,25 @@
|
|||
# This testcase used to cause subtle memory overflow problems due to deviating
|
||||
# traversal order of the k$a set members. With 4.2, this will trigger an
|
||||
# InternalError due to new bounds-checking. For context, see GHI-1753.
|
||||
#
|
||||
# @TEST-EXEC: zeek -b %INPUT >out
|
||||
# @TEST-EXEC: btest-diff out
|
||||
|
||||
type Key: record {
|
||||
a: set[string];
|
||||
b: string &optional;
|
||||
c: string &optional;
|
||||
};
|
||||
|
||||
global state: table[Key] of count = {};
|
||||
|
||||
event zeek_init() {
|
||||
|
||||
local k: Key;
|
||||
|
||||
k$a = set("MD5", "SHA1");
|
||||
k$b = "12345678901234567890";
|
||||
|
||||
state[k] = 1;
|
||||
print k;
|
||||
}
|
|
@ -29,6 +29,9 @@ event zeek_init()
|
|||
local t11: table[conn_id, bool] of count = {
|
||||
[ [$orig_h=1.1.1.1, $orig_p=1234/tcp,
|
||||
$resp_h=2.2.2.2, $resp_p=4321/tcp], T ] = 42 };
|
||||
local t12: table[table[count] of string] of string = {
|
||||
[table([1] = "foo", [2] = "bar")] = "oh1"
|
||||
};
|
||||
|
||||
# Type inference tests
|
||||
|
||||
|
@ -159,5 +162,10 @@ event zeek_init()
|
|||
delete t11[cid, T];
|
||||
test_case( "remove element", |t11| == 1 );
|
||||
test_case( "!in operator", [cid, T] !in t11 );
|
||||
|
||||
t12[table([2] = "blum", [3] = "frub")] = "oh2";
|
||||
test_case( "nested table addition", |t12| == 2 );
|
||||
delete t12[table([1] = "foo", [2] = "bar")];
|
||||
test_case( "nested table removal", |t12| == 1 );
|
||||
}
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue