mirror of
https://github.com/zeek/zeek.git
synced 2025-10-05 16:18:19 +00:00
Remove linked list from reassembly data structures
Everything, including iteration is now done via an std::map
This commit is contained in:
parent
9b13825e16
commit
e1e779e90b
8 changed files with 248 additions and 254 deletions
42
src/Frag.cc
42
src/Frag.cc
|
@ -185,24 +185,32 @@ void FragReassembler::Overlap(const u_char* b1, const u_char* b2, uint64_t n)
|
|||
Weird("fragment_overlap");
|
||||
}
|
||||
|
||||
void FragReassembler::BlockInserted(const DataBlock* /* start_block */)
|
||||
void FragReassembler::BlockInserted(DataBlockMap::const_iterator /* it */)
|
||||
{
|
||||
const DataBlock* b = block_list.Head();
|
||||
// TODO: review all iteration here to see if it can be done better
|
||||
auto it = block_list.Begin();
|
||||
|
||||
if ( b->seq > 0 || ! frag_size )
|
||||
if ( it->second->seq > 0 || ! frag_size )
|
||||
// For sure don't have it all yet.
|
||||
return;
|
||||
|
||||
auto next = std::next(it);
|
||||
|
||||
// We might have it all - look for contiguous all the way.
|
||||
for ( ; b->next; b = b->next )
|
||||
if ( b->upper != b->next->seq )
|
||||
while ( next != block_list.End() )
|
||||
{
|
||||
if ( it->second->upper != next->second->seq )
|
||||
break;
|
||||
|
||||
if ( b->next )
|
||||
++it;
|
||||
++next;
|
||||
}
|
||||
|
||||
auto last = std::prev(block_list.End())->second;
|
||||
|
||||
if ( next != block_list.End() )
|
||||
{
|
||||
// We have a hole.
|
||||
if ( b->upper >= frag_size )
|
||||
if ( it->second->upper >= frag_size )
|
||||
{
|
||||
// We're stuck. The point where we stopped is
|
||||
// contiguous up through the expected end of
|
||||
|
@ -215,19 +223,19 @@ void FragReassembler::BlockInserted(const DataBlock* /* start_block */)
|
|||
// We decide to analyze the contiguous portion now.
|
||||
// Extend the fragment up through the end of what
|
||||
// we have.
|
||||
frag_size = b->upper;
|
||||
frag_size = it->second->upper;
|
||||
}
|
||||
else
|
||||
return;
|
||||
}
|
||||
|
||||
else if ( block_list.Tail()->upper > frag_size )
|
||||
else if ( last->upper > frag_size )
|
||||
{
|
||||
Weird("fragment_size_inconsistency");
|
||||
frag_size = block_list.Tail()->upper;
|
||||
frag_size = last->upper;
|
||||
}
|
||||
|
||||
else if ( block_list.Tail()->upper < frag_size )
|
||||
else if ( last->upper < frag_size )
|
||||
// Missing the tail.
|
||||
return;
|
||||
|
||||
|
@ -248,12 +256,15 @@ void FragReassembler::BlockInserted(const DataBlock* /* start_block */)
|
|||
|
||||
pkt += proto_hdr_len;
|
||||
|
||||
for ( b = block_list.Head(); b; b = b->next )
|
||||
for ( it = block_list.Begin(); it != block_list.End(); ++it )
|
||||
{
|
||||
auto b = it->second;
|
||||
DataBlock* prev = it == block_list.Begin() ? nullptr : std::prev(it)->second;
|
||||
|
||||
// If we're above a hole, stop. This can happen because
|
||||
// the logic above regarding a hole that's above the
|
||||
// expected fragment size.
|
||||
if ( b->prev && b->prev->upper < b->seq )
|
||||
if ( prev && prev->upper < b->seq )
|
||||
break;
|
||||
|
||||
if ( b->upper > n )
|
||||
|
@ -265,8 +276,7 @@ void FragReassembler::BlockInserted(const DataBlock* /* start_block */)
|
|||
return;
|
||||
}
|
||||
|
||||
memcpy((void*) &pkt[b->seq], (const void*) b->block,
|
||||
b->upper - b->seq);
|
||||
memcpy(&pkt[b->seq], b->block, b->upper - b->seq);
|
||||
}
|
||||
|
||||
delete reassembled_pkt;
|
||||
|
|
|
@ -37,7 +37,7 @@ public:
|
|||
const FragReassemblerKey& Key() const { return key; }
|
||||
|
||||
protected:
|
||||
void BlockInserted(const DataBlock* start_block) override;
|
||||
void BlockInserted(DataBlockMap::const_iterator it) override;
|
||||
void Overlap(const u_char* b1, const u_char* b2, uint64_t n) override;
|
||||
void Weird(const char* name) const;
|
||||
|
||||
|
|
249
src/Reassem.cc
249
src/Reassem.cc
|
@ -1,7 +1,6 @@
|
|||
// See the file "COPYING" in the main distribution directory for copyright.
|
||||
|
||||
#include <algorithm>
|
||||
#include <vector>
|
||||
|
||||
#include "zeek-config.h"
|
||||
|
||||
|
@ -10,39 +9,22 @@
|
|||
uint64_t Reassembler::total_size = 0;
|
||||
uint64_t Reassembler::sizes[REASSEM_NUM];
|
||||
|
||||
DataBlock::DataBlock(DataBlockList* list, DataBlockMap::const_iterator hint,
|
||||
const u_char* data, uint64_t size, uint64_t arg_seq,
|
||||
DataBlock* arg_prev, DataBlock* arg_next)
|
||||
DataBlock::DataBlock(const u_char* data, uint64_t size, uint64_t arg_seq)
|
||||
{
|
||||
// TODO: make a separate DataBlockList::Insert()
|
||||
seq = arg_seq;
|
||||
upper = seq + size;
|
||||
block = new u_char[size];
|
||||
|
||||
memcpy((void*) block, (const void*) data, size);
|
||||
|
||||
prev = arg_prev;
|
||||
next = arg_next;
|
||||
|
||||
if ( prev )
|
||||
prev->next = this;
|
||||
if ( next )
|
||||
next->prev = this;
|
||||
|
||||
list->block_map.emplace_hint(hint, seq, this);
|
||||
// TODO: no longer need a separate block count since the map can provide that
|
||||
++list->total_blocks;
|
||||
list->total_data_size += size;
|
||||
|
||||
Reassembler::sizes[list->reassembler->rtype] += pad_size(size) + padded_sizeof(DataBlock);
|
||||
Reassembler::total_size += pad_size(size) + padded_sizeof(DataBlock);
|
||||
memcpy(block, data, size);
|
||||
}
|
||||
|
||||
void DataBlockList::DataSize(uint64_t seq_cutoff, uint64_t* below, uint64_t* above) const
|
||||
{
|
||||
// TODO: just have book-keeping to track this info and avoid iterating ?
|
||||
for ( auto b = head; b; b = b->next )
|
||||
// TODO: add warnings that this is O(n) and slow
|
||||
|
||||
for ( const auto& e : block_map )
|
||||
{
|
||||
auto b = e.second;
|
||||
|
||||
if ( b->seq <= seq_cutoff )
|
||||
*above += b->Size();
|
||||
else
|
||||
|
@ -50,63 +32,47 @@ void DataBlockList::DataSize(uint64_t seq_cutoff, uint64_t* below, uint64_t* abo
|
|||
}
|
||||
}
|
||||
|
||||
void DataBlockList::DeleteBlock(DataBlock* b)
|
||||
void DataBlockList::Delete(DataBlockMap::const_iterator it)
|
||||
{
|
||||
// TODO: Separate Remove / Delete ?
|
||||
auto b = it->second;
|
||||
auto size = b->Size();
|
||||
|
||||
--total_blocks;
|
||||
block_map.erase(it);
|
||||
total_data_size -= size;
|
||||
|
||||
Reassembler::total_size -= pad_size(size) + padded_sizeof(DataBlock);
|
||||
Reassembler::sizes[reassembler->rtype] -= pad_size(size) + padded_sizeof(DataBlock);
|
||||
|
||||
// TODO: most of the time we'd better erase via iterator rather than search
|
||||
//block_map.erase(b->seq);
|
||||
|
||||
delete b;
|
||||
Reassembler::total_size -= size + sizeof(DataBlock);
|
||||
Reassembler::sizes[reassembler->rtype] -= size + sizeof(DataBlock);
|
||||
}
|
||||
|
||||
DataBlock* DataBlockList::Remove(DataBlockMap::const_iterator it)
|
||||
{
|
||||
auto b = it->second;
|
||||
auto size = b->Size();
|
||||
|
||||
block_map.erase(it);
|
||||
total_data_size -= size;
|
||||
|
||||
return b;
|
||||
}
|
||||
|
||||
void DataBlockList::Clear()
|
||||
{
|
||||
// TODO: can be done more efficiently
|
||||
while ( head )
|
||||
{
|
||||
auto next = head->next;
|
||||
DeleteBlock(head);
|
||||
head = next;
|
||||
}
|
||||
// TODO: maybe can just use clear()
|
||||
while ( ! block_map.empty() )
|
||||
Delete(block_map.begin());
|
||||
|
||||
block_map.clear();
|
||||
tail = nullptr;
|
||||
}
|
||||
|
||||
void DataBlockList::Append(DataBlock* block, uint64_t limit)
|
||||
{
|
||||
++total_blocks;
|
||||
total_data_size += block->Size();
|
||||
block->next = nullptr;
|
||||
|
||||
if ( tail )
|
||||
{
|
||||
block->prev = tail;
|
||||
tail->next = block;
|
||||
}
|
||||
else
|
||||
{
|
||||
block->prev = nullptr;
|
||||
head = tail = block;
|
||||
}
|
||||
|
||||
block_map.emplace_hint(block_map.end(), block->seq, block);
|
||||
|
||||
while ( head && total_blocks > limit )
|
||||
{
|
||||
auto next = head->next;
|
||||
DeleteBlock(head);
|
||||
block_map.erase(block_map.begin());
|
||||
head = next;
|
||||
}
|
||||
while ( block_map.size() > limit )
|
||||
Delete(block_map.begin());
|
||||
}
|
||||
|
||||
DataBlockMap::const_iterator DataBlockList::FindFirstBlockBefore(uint64_t seq) const
|
||||
|
@ -123,23 +89,35 @@ DataBlockMap::const_iterator DataBlockList::FindFirstBlockBefore(uint64_t seq) c
|
|||
return std::prev(it);
|
||||
}
|
||||
|
||||
DataBlock* DataBlockList::Insert(uint64_t seq, uint64_t upper,
|
||||
const u_char* data,
|
||||
DataBlockMap::const_iterator
|
||||
DataBlockList::Insert(uint64_t seq, uint64_t upper, const u_char* data,
|
||||
DataBlockMap::const_iterator hint)
|
||||
{
|
||||
auto size = upper - seq;
|
||||
auto db = new DataBlock(data, size, seq);
|
||||
|
||||
auto rval = block_map.emplace_hint(hint, seq, db);
|
||||
|
||||
total_data_size += size;
|
||||
Reassembler::sizes[reassembler->rtype] += size + sizeof(DataBlock);
|
||||
Reassembler::total_size += size + sizeof(DataBlock);
|
||||
|
||||
return rval;
|
||||
}
|
||||
|
||||
DataBlockMap::const_iterator
|
||||
DataBlockList::Insert(uint64_t seq, uint64_t upper, const u_char* data,
|
||||
DataBlockMap::const_iterator* hint)
|
||||
{
|
||||
// Empty list.
|
||||
if ( ! head )
|
||||
{
|
||||
head = tail = new DataBlock(this, block_map.end(), data, upper - seq, seq, 0, 0);
|
||||
return head;
|
||||
}
|
||||
if ( block_map.empty() )
|
||||
return Insert(seq, upper, data, block_map.end());
|
||||
|
||||
auto last = block_map.rbegin()->second;
|
||||
|
||||
// Special check for the common case of appending to the end.
|
||||
if ( tail && seq == tail->upper )
|
||||
{
|
||||
tail = new DataBlock(this, block_map.end(), data, upper - seq, seq, tail, 0);
|
||||
return tail;
|
||||
}
|
||||
if ( seq == last->upper )
|
||||
return Insert(seq, upper, data, block_map.end());
|
||||
|
||||
// Find the first block that doesn't come completely before the new data.
|
||||
DataBlockMap::const_iterator it;
|
||||
|
@ -160,40 +138,33 @@ DataBlock* DataBlockList::Insert(uint64_t seq, uint64_t upper,
|
|||
DataBlock* b = it->second;
|
||||
|
||||
if ( b->upper <= seq )
|
||||
{
|
||||
// b is the last block, and it comes completely before the new block.
|
||||
tail = new DataBlock(this, block_map.end(), data, upper - seq, seq, b, 0);
|
||||
return tail;
|
||||
}
|
||||
|
||||
DataBlock* new_b = 0;
|
||||
return Insert(seq, upper, data, block_map.end());
|
||||
|
||||
if ( upper <= b->seq )
|
||||
{
|
||||
// The new block comes completely before b.
|
||||
new_b = new DataBlock(this, it, data, upper - seq, seq, b->prev, b);
|
||||
return Insert(seq, upper, data, it);
|
||||
|
||||
if ( b == head )
|
||||
head = new_b;
|
||||
|
||||
return new_b;
|
||||
}
|
||||
DataBlock* new_b;
|
||||
DataBlockMap::const_iterator rval;
|
||||
|
||||
// The blocks overlap.
|
||||
if ( seq < b->seq )
|
||||
{
|
||||
// The new block has a prefix that comes before b.
|
||||
uint64_t prefix_len = b->seq - seq;
|
||||
new_b = new DataBlock(this, it, data, prefix_len, seq, b->prev, b);
|
||||
|
||||
if ( b == head )
|
||||
head = new_b;
|
||||
rval = Insert(seq, seq + prefix_len, data, it);
|
||||
new_b = rval->second;
|
||||
|
||||
data += prefix_len;
|
||||
seq += prefix_len;
|
||||
}
|
||||
else
|
||||
{
|
||||
rval = it;
|
||||
new_b = b;
|
||||
}
|
||||
|
||||
uint64_t overlap_start = seq;
|
||||
uint64_t overlap_offset = overlap_start - b->seq;
|
||||
|
@ -207,16 +178,13 @@ DataBlock* DataBlockList::Insert(uint64_t seq, uint64_t upper,
|
|||
data += overlap_len;
|
||||
seq += overlap_len;
|
||||
|
||||
auto r = Insert(seq, upper, data, &it);
|
||||
|
||||
if ( new_b == b )
|
||||
new_b = Insert(seq, upper, data, &it);
|
||||
else
|
||||
Insert(seq, upper, data, &it);
|
||||
rval = r;
|
||||
}
|
||||
|
||||
if ( new_b->prev == tail )
|
||||
tail = new_b;
|
||||
|
||||
return new_b;
|
||||
return rval;
|
||||
}
|
||||
|
||||
uint64_t DataBlockList::Trim(uint64_t seq, uint64_t max_old,
|
||||
|
@ -227,18 +195,18 @@ uint64_t DataBlockList::Trim(uint64_t seq, uint64_t max_old,
|
|||
// Do this accounting before looking for Undelivered data,
|
||||
// since that will alter last_reassem_seq.
|
||||
|
||||
if ( head )
|
||||
if ( ! block_map.empty() )
|
||||
{
|
||||
if ( head->seq > reassembler->LastReassemSeq() )
|
||||
// An initial hole.
|
||||
num_missing += head->seq - reassembler->LastReassemSeq();
|
||||
}
|
||||
auto first = block_map.begin()->second;
|
||||
|
||||
if ( first->seq > reassembler->LastReassemSeq() )
|
||||
// An initial hole.
|
||||
num_missing += first->seq - reassembler->LastReassemSeq();
|
||||
}
|
||||
else if ( seq > reassembler->LastReassemSeq() )
|
||||
{ // Trimming data we never delivered.
|
||||
if ( ! head )
|
||||
// We won't have any accounting based on blocks
|
||||
// for this hole.
|
||||
{
|
||||
// Trimming data we never delivered.
|
||||
// We won't have any accounting based on blocks for this hole.
|
||||
num_missing += seq - reassembler->LastReassemSeq();
|
||||
}
|
||||
|
||||
|
@ -248,56 +216,48 @@ uint64_t DataBlockList::Trim(uint64_t seq, uint64_t max_old,
|
|||
reassembler->Undelivered(seq);
|
||||
}
|
||||
|
||||
auto first_removed = head && head->upper <= seq ? block_map.begin() : block_map.end();
|
||||
auto last_removed = first_removed;
|
||||
|
||||
while ( head && head->upper <= seq )
|
||||
while ( ! block_map.empty() )
|
||||
{
|
||||
DataBlock* b = head->next;
|
||||
auto first_it = block_map.begin();
|
||||
auto first = first_it->second;
|
||||
|
||||
if ( b && b->seq <= seq )
|
||||
if ( first->upper > seq )
|
||||
break;
|
||||
|
||||
auto next_it = std::next(first_it);
|
||||
auto next = next_it == block_map.end() ? nullptr : next_it->second;
|
||||
|
||||
if ( next && next->seq <= seq )
|
||||
{
|
||||
if ( head->upper != b->seq )
|
||||
num_missing += b->seq - head->upper;
|
||||
if ( first->upper != next->seq )
|
||||
num_missing += next->seq - first->upper;
|
||||
}
|
||||
else
|
||||
{
|
||||
// No more blocks - did this one make it to seq?
|
||||
// Second half of test is for acks of FINs, which
|
||||
// don't get entered into the sequence space.
|
||||
if ( head->upper != seq && head->upper != seq - 1 )
|
||||
num_missing += seq - head->upper;
|
||||
if ( first->upper != seq && first->upper != seq - 1 )
|
||||
num_missing += seq - first->upper;
|
||||
}
|
||||
|
||||
// NOTE: erasing the node from the map takes place over the full
|
||||
// range getting removed (see below) rather than one node at a time.
|
||||
if ( max_old )
|
||||
{
|
||||
--total_blocks;
|
||||
total_data_size -= head->Size();
|
||||
old_list->Append(head, max_old);
|
||||
}
|
||||
old_list->Append(Remove(first_it), max_old);
|
||||
else
|
||||
DeleteBlock(head);
|
||||
|
||||
++last_removed;
|
||||
head = b;
|
||||
Delete(first_it);
|
||||
}
|
||||
|
||||
block_map.erase(first_removed, last_removed);
|
||||
|
||||
if ( head )
|
||||
if ( ! block_map.empty() )
|
||||
{
|
||||
head->prev = 0;
|
||||
auto first_it = block_map.begin();
|
||||
auto first = first_it->second;
|
||||
|
||||
// If we skipped over some undeliverable data, then
|
||||
// it's possible that this block is now deliverable.
|
||||
// Give it a try.
|
||||
if ( head->seq == reassembler->LastReassemSeq() )
|
||||
reassembler->BlockInserted(head);
|
||||
if ( first->seq == reassembler->LastReassemSeq() )
|
||||
reassembler->BlockInserted(first_it);
|
||||
}
|
||||
else
|
||||
tail = 0;
|
||||
|
||||
reassembler->SetTrimSeq(seq);
|
||||
return num_missing;
|
||||
|
@ -317,25 +277,22 @@ void Reassembler::CheckOverlap(const DataBlockList& list,
|
|||
if ( list.Empty() )
|
||||
return;
|
||||
|
||||
auto head = list.Head();
|
||||
auto tail = list.Tail();
|
||||
auto last = list.LastBlock();
|
||||
|
||||
if ( seq == tail->upper )
|
||||
if ( seq == last->upper )
|
||||
// Special case check for common case of appending to the end.
|
||||
return;
|
||||
|
||||
uint64_t upper = (seq + len);
|
||||
|
||||
auto it = list.FindFirstBlockBefore(seq);
|
||||
const DataBlock* start;
|
||||
|
||||
if ( it == list.block_map.end() )
|
||||
start = head;
|
||||
else
|
||||
start = it->second;
|
||||
if ( it == list.End() )
|
||||
it = list.Begin();
|
||||
|
||||
for ( auto b = start; b; b = b->next )
|
||||
for ( ; it != list.End(); ++it )
|
||||
{
|
||||
auto b = it->second;
|
||||
uint64_t nseq = seq;
|
||||
uint64_t nupper = upper;
|
||||
const u_char* ndata = data;
|
||||
|
@ -387,8 +344,8 @@ void Reassembler::NewBlock(double t, uint64_t seq, uint64_t len, const u_char* d
|
|||
len -= amount_old;
|
||||
}
|
||||
|
||||
auto start_block = block_list.Insert(seq, upper_seq, data);;
|
||||
BlockInserted(start_block);
|
||||
auto it = block_list.Insert(seq, upper_seq, data);;
|
||||
BlockInserted(it);
|
||||
}
|
||||
|
||||
uint64_t Reassembler::TrimToSeq(uint64_t seq)
|
||||
|
|
|
@ -21,26 +21,24 @@ enum ReassemblerType {
|
|||
};
|
||||
|
||||
class Reassembler;
|
||||
class DataBlock;
|
||||
class DataBlockList;
|
||||
using DataBlockMap = std::map<uint64_t, DataBlock*>;
|
||||
|
||||
class DataBlock {
|
||||
public:
|
||||
DataBlock(DataBlockList* list, DataBlockMap::const_iterator hint,
|
||||
const u_char* data, uint64_t size, uint64_t seq,
|
||||
DataBlock* prev, DataBlock* next);
|
||||
DataBlock(const u_char* data, uint64_t size, uint64_t seq);
|
||||
|
||||
~DataBlock() { delete [] block; }
|
||||
~DataBlock()
|
||||
{ delete [] block; }
|
||||
|
||||
uint64_t Size() const { return upper - seq; }
|
||||
uint64_t Size() const
|
||||
{ return upper - seq; }
|
||||
|
||||
DataBlock* next; // next block with higher seq #
|
||||
DataBlock* prev; // previous block with lower seq #
|
||||
uint64_t seq, upper;
|
||||
uint64_t seq;
|
||||
uint64_t upper;
|
||||
u_char* block;
|
||||
};
|
||||
|
||||
using DataBlockMap = std::map<uint64_t, DataBlock*>;
|
||||
|
||||
// TODO: add comments
|
||||
class DataBlockList {
|
||||
public:
|
||||
|
@ -54,17 +52,23 @@ public:
|
|||
~DataBlockList()
|
||||
{ Clear(); }
|
||||
|
||||
const DataBlock* Head() const
|
||||
{ return head; }
|
||||
DataBlockMap::const_iterator Begin() const
|
||||
{ return block_map.begin(); }
|
||||
|
||||
const DataBlock* Tail() const
|
||||
{ return tail; }
|
||||
DataBlockMap::const_iterator End() const
|
||||
{ return block_map.end(); }
|
||||
|
||||
DataBlock* FirstBlock() const
|
||||
{ return block_map.begin()->second; }
|
||||
|
||||
DataBlock* LastBlock() const
|
||||
{ return block_map.rbegin()->second; }
|
||||
|
||||
bool Empty() const
|
||||
{ return head == nullptr; };
|
||||
{ return block_map.empty(); };
|
||||
|
||||
size_t NumBlocks() const
|
||||
{ return total_blocks; };
|
||||
{ return block_map.size(); };
|
||||
|
||||
size_t DataSize() const
|
||||
{ return total_data_size; }
|
||||
|
@ -73,7 +77,8 @@ public:
|
|||
|
||||
void Clear();
|
||||
|
||||
DataBlock* Insert(uint64_t seq, uint64_t upper, const u_char* data,
|
||||
DataBlockMap::const_iterator
|
||||
Insert(uint64_t seq, uint64_t upper, const u_char* data,
|
||||
DataBlockMap::const_iterator* hint = nullptr);
|
||||
|
||||
void Append(DataBlock* block, uint64_t limit);
|
||||
|
@ -84,15 +89,15 @@ public:
|
|||
|
||||
private:
|
||||
|
||||
void DeleteBlock(DataBlock* b);
|
||||
DataBlockMap::const_iterator
|
||||
Insert(uint64_t seq, uint64_t upper, const u_char* data,
|
||||
DataBlockMap::const_iterator hint);
|
||||
|
||||
friend class DataBlock;
|
||||
friend class Reassembler;
|
||||
void Delete(DataBlockMap::const_iterator it);
|
||||
|
||||
DataBlock* Remove(DataBlockMap::const_iterator it);
|
||||
|
||||
Reassembler* reassembler = nullptr;
|
||||
DataBlock* head = nullptr;
|
||||
DataBlock* tail = nullptr;
|
||||
size_t total_blocks = 0;
|
||||
size_t total_data_size = 0;
|
||||
DataBlockMap block_map;
|
||||
};
|
||||
|
@ -138,12 +143,11 @@ public:
|
|||
protected:
|
||||
Reassembler() { }
|
||||
|
||||
friend class DataBlock;
|
||||
friend class DataBlockList;
|
||||
|
||||
virtual void Undelivered(uint64_t up_to_seq);
|
||||
|
||||
virtual void BlockInserted(const DataBlock* b) = 0;
|
||||
virtual void BlockInserted(DataBlockMap::const_iterator it) = 0;
|
||||
virtual void Overlap(const u_char* b1, const u_char* b2, uint64_t n) = 0;
|
||||
|
||||
void CheckOverlap(const DataBlockList& list,
|
||||
|
@ -152,7 +156,6 @@ protected:
|
|||
DataBlockList block_list;
|
||||
DataBlockList old_block_list;
|
||||
|
||||
// TODO: maybe roll some of these stats into DataBlockList ?
|
||||
uint64_t last_reassem_seq;
|
||||
uint64_t trim_seq; // how far we've trimmed
|
||||
uint32_t max_old_blocks;
|
||||
|
|
|
@ -66,11 +66,14 @@ void TCP_Reassembler::Done()
|
|||
|
||||
if ( record_contents_file )
|
||||
{ // Record any undelivered data.
|
||||
auto last_block = block_list.Tail();
|
||||
if ( ! block_list.Empty() )
|
||||
{
|
||||
auto last_block = std::prev(block_list.End())->second;
|
||||
|
||||
if ( ! block_list.Empty() && last_reassem_seq < last_block->upper )
|
||||
if ( last_reassem_seq < last_block->upper )
|
||||
RecordToSeq(last_reassem_seq, last_block->upper,
|
||||
record_contents_file);
|
||||
}
|
||||
|
||||
record_contents_file->Close();
|
||||
}
|
||||
|
@ -85,12 +88,11 @@ void TCP_Reassembler::SizeBufferedData(uint64_t& waiting_on_hole,
|
|||
|
||||
uint64_t TCP_Reassembler::NumUndeliveredBytes() const
|
||||
{
|
||||
auto last_block = block_list.Tail();
|
||||
|
||||
if ( last_block )
|
||||
return last_block->upper - last_reassem_seq;
|
||||
|
||||
if ( block_list.Empty() )
|
||||
return 0;
|
||||
|
||||
auto last_block = std::prev(block_list.End())->second;
|
||||
return last_block->upper - last_reassem_seq;
|
||||
}
|
||||
|
||||
void TCP_Reassembler::SetContentsFile(BroFile* f)
|
||||
|
@ -107,7 +109,7 @@ void TCP_Reassembler::SetContentsFile(BroFile* f)
|
|||
else
|
||||
{
|
||||
if ( ! block_list.Empty() )
|
||||
RecordToSeq(block_list.Head()->seq, last_reassem_seq, f);
|
||||
RecordToSeq(block_list.Begin()->second->seq, last_reassem_seq, f);
|
||||
}
|
||||
|
||||
Ref(f);
|
||||
|
@ -235,14 +237,16 @@ void TCP_Reassembler::Undelivered(uint64_t up_to_seq)
|
|||
if ( ! skip_deliveries )
|
||||
{
|
||||
// If we have blocks that begin below up_to_seq, deliver them.
|
||||
auto b = block_list.Head();
|
||||
while ( b )
|
||||
auto it = block_list.Begin();
|
||||
|
||||
while ( it != block_list.End() )
|
||||
{
|
||||
// TODO: better way to do this iteration ?
|
||||
auto b = it->second;
|
||||
|
||||
if ( b->seq < last_reassem_seq )
|
||||
{
|
||||
// Already delivered this block.
|
||||
b = b->next;
|
||||
++it;
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -255,10 +259,10 @@ void TCP_Reassembler::Undelivered(uint64_t up_to_seq)
|
|||
|
||||
Gap(gap_at_seq, gap_len);
|
||||
last_reassem_seq += gap_len;
|
||||
BlockInserted(b);
|
||||
BlockInserted(it);
|
||||
// Inserting a block may cause trimming of what's buffered,
|
||||
// so have to assume 'b' is invalid, hence re-assign to start.
|
||||
b = block_list.Head();
|
||||
it = block_list.Begin();
|
||||
}
|
||||
|
||||
if ( up_to_seq > last_reassem_seq )
|
||||
|
@ -285,8 +289,8 @@ void TCP_Reassembler::MatchUndelivered(uint64_t up_to_seq, bool use_last_upper)
|
|||
if ( block_list.Empty() || ! rule_matcher )
|
||||
return;
|
||||
|
||||
auto last_block = block_list.Tail();
|
||||
ASSERT(last_block);
|
||||
auto last_block = std::prev(block_list.End())->second;
|
||||
|
||||
if ( use_last_upper )
|
||||
up_to_seq = last_block->upper;
|
||||
|
||||
|
@ -304,38 +308,45 @@ void TCP_Reassembler::MatchUndelivered(uint64_t up_to_seq, bool use_last_upper)
|
|||
// Skip blocks that are already delivered (but not ACK'ed).
|
||||
// Question: shall we instead keep a pointer to the first undelivered
|
||||
// block?
|
||||
// TODO: better way to iterate ?
|
||||
const DataBlock* b;
|
||||
for ( b = block_list.Head(); b && b->upper <= last_reassem_seq; b = b->next )
|
||||
|
||||
for ( auto it = block_list.Begin(); it != block_list.End(); ++it )
|
||||
{
|
||||
auto b = it->second;
|
||||
|
||||
if ( b->upper > last_reassem_seq )
|
||||
break;
|
||||
|
||||
tcp_analyzer->Conn()->Match(Rule::PAYLOAD, b->block, b->Size(),
|
||||
false, false, IsOrig(), false);
|
||||
|
||||
ASSERT(b);
|
||||
}
|
||||
}
|
||||
|
||||
void TCP_Reassembler::RecordToSeq(uint64_t start_seq, uint64_t stop_seq, BroFile* f)
|
||||
{
|
||||
auto b = block_list.Head();
|
||||
// TODO: better way to iterate ?
|
||||
// Skip over blocks up to the start seq.
|
||||
while ( b && b->upper <= start_seq )
|
||||
b = b->next;
|
||||
auto it = block_list.Begin();
|
||||
|
||||
if ( ! b )
|
||||
// Skip over blocks up to the start seq.
|
||||
while ( it != block_list.End() && it->second->upper <= start_seq )
|
||||
++it;
|
||||
|
||||
if ( it == block_list.End() )
|
||||
return;
|
||||
|
||||
uint64_t last_seq = start_seq;
|
||||
while ( b && b->upper <= stop_seq )
|
||||
|
||||
while ( it != block_list.End() && it->second->upper <= stop_seq )
|
||||
{
|
||||
auto b = it->second;
|
||||
|
||||
if ( b->seq > last_seq )
|
||||
RecordGap(last_seq, b->seq, f);
|
||||
|
||||
RecordBlock(b, f);
|
||||
last_seq = b->upper;
|
||||
b = b->next;
|
||||
++it;
|
||||
}
|
||||
|
||||
if ( b )
|
||||
if ( it != block_list.End() )
|
||||
// Check for final gap.
|
||||
if ( last_seq < stop_seq )
|
||||
RecordGap(last_seq, stop_seq, f);
|
||||
|
@ -375,8 +386,10 @@ void TCP_Reassembler::RecordGap(uint64_t start_seq, uint64_t upper_seq, BroFile*
|
|||
}
|
||||
}
|
||||
|
||||
void TCP_Reassembler::BlockInserted(const DataBlock* start_block)
|
||||
void TCP_Reassembler::BlockInserted(DataBlockMap::const_iterator it)
|
||||
{
|
||||
auto start_block = it->second;
|
||||
|
||||
if ( start_block->seq > last_reassem_seq ||
|
||||
start_block->upper <= last_reassem_seq )
|
||||
return;
|
||||
|
@ -387,10 +400,13 @@ void TCP_Reassembler::BlockInserted(const DataBlock* start_block)
|
|||
// new stuff off into its own block(s), but in the following
|
||||
// loop we have to take care not to deliver already-delivered
|
||||
// data.
|
||||
// TODO: better way to iterate ?
|
||||
for ( auto b = start_block;
|
||||
b && b->seq <= last_reassem_seq; b = b->next )
|
||||
while ( it != block_list.End() )
|
||||
{
|
||||
auto b = it->second;
|
||||
|
||||
if ( b->seq > last_reassem_seq )
|
||||
break;
|
||||
|
||||
if ( b->seq == last_reassem_seq )
|
||||
{ // New stuff.
|
||||
uint64_t len = b->Size();
|
||||
|
@ -402,6 +418,8 @@ void TCP_Reassembler::BlockInserted(const DataBlock* start_block)
|
|||
|
||||
DeliverBlock(seq, len, b->block);
|
||||
}
|
||||
|
||||
++it;
|
||||
}
|
||||
|
||||
TCP_Endpoint* e = endp;
|
||||
|
|
|
@ -90,7 +90,7 @@ private:
|
|||
void RecordBlock(const DataBlock* b, BroFile* f);
|
||||
void RecordGap(uint64_t start_seq, uint64_t upper_seq, BroFile* f);
|
||||
|
||||
void BlockInserted(const DataBlock* b) override;
|
||||
void BlockInserted(DataBlockMap::const_iterator it) override;
|
||||
void Overlap(const u_char* b1, const u_char* b2, uint64_t n) override;
|
||||
|
||||
TCP_Endpoint* endp;
|
||||
|
|
|
@ -26,10 +26,11 @@ uint64_t FileReassembler::Flush()
|
|||
if ( flushing )
|
||||
return 0;
|
||||
|
||||
auto last_block = block_list.Tail();
|
||||
if ( block_list.Empty() )
|
||||
return 0;
|
||||
|
||||
auto last_block = std::prev(block_list.End())->second;
|
||||
|
||||
if ( last_block )
|
||||
{
|
||||
// This is expected to call back into FileReassembler::Undelivered().
|
||||
flushing = true;
|
||||
uint64_t rval = TrimToSeq(last_block->upper);
|
||||
|
@ -37,9 +38,6 @@ uint64_t FileReassembler::Flush()
|
|||
return rval;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint64_t FileReassembler::FlushTo(uint64_t sequence)
|
||||
{
|
||||
if ( flushing )
|
||||
|
@ -52,22 +50,29 @@ uint64_t FileReassembler::FlushTo(uint64_t sequence)
|
|||
return rval;
|
||||
}
|
||||
|
||||
void FileReassembler::BlockInserted(const DataBlock* start_block)
|
||||
void FileReassembler::BlockInserted(DataBlockMap::const_iterator it)
|
||||
{
|
||||
auto start_block = it->second;
|
||||
|
||||
if ( start_block->seq > last_reassem_seq ||
|
||||
start_block->upper <= last_reassem_seq )
|
||||
return;
|
||||
|
||||
// TODO: better way to iterate ?
|
||||
for ( auto b = start_block;
|
||||
b && b->seq <= last_reassem_seq; b = b->next )
|
||||
while ( it != block_list.End() )
|
||||
{
|
||||
auto b = it->second;
|
||||
|
||||
if ( b->seq > last_reassem_seq )
|
||||
break;
|
||||
|
||||
if ( b->seq == last_reassem_seq )
|
||||
{ // New stuff.
|
||||
uint64_t len = b->Size();
|
||||
last_reassem_seq += len;
|
||||
the_file->DeliverStream(b->block, len);
|
||||
}
|
||||
|
||||
++it;
|
||||
}
|
||||
|
||||
// Throw out forwarded data
|
||||
|
@ -77,15 +82,16 @@ void FileReassembler::BlockInserted(const DataBlock* start_block)
|
|||
void FileReassembler::Undelivered(uint64_t up_to_seq)
|
||||
{
|
||||
// If we have blocks that begin below up_to_seq, deliver them.
|
||||
const DataBlock* b = block_list.Head();
|
||||
auto it = block_list.Begin();
|
||||
|
||||
// TODO: better way to iterate ?
|
||||
while ( b )
|
||||
while ( it != block_list.End() )
|
||||
{
|
||||
auto b = it->second;
|
||||
|
||||
if ( b->seq < last_reassem_seq )
|
||||
{
|
||||
// Already delivered this block.
|
||||
b = b->next;
|
||||
++it;
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -97,10 +103,10 @@ void FileReassembler::Undelivered(uint64_t up_to_seq)
|
|||
uint64_t gap_len = b->seq - last_reassem_seq;
|
||||
the_file->Gap(gap_at_seq, gap_len);
|
||||
last_reassem_seq += gap_len;
|
||||
BlockInserted(b);
|
||||
BlockInserted(it);
|
||||
// Inserting a block may cause trimming of what's buffered,
|
||||
// so have to assume 'b' is invalid, hence re-assign to start.
|
||||
b = block_list.Head();
|
||||
it = block_list.Begin();
|
||||
}
|
||||
|
||||
if ( up_to_seq > last_reassem_seq )
|
||||
|
|
|
@ -51,7 +51,7 @@ protected:
|
|||
FileReassembler();
|
||||
|
||||
void Undelivered(uint64_t up_to_seq) override;
|
||||
void BlockInserted(const DataBlock* b) override;
|
||||
void BlockInserted(DataBlockMap::const_iterator it) override;
|
||||
void Overlap(const u_char* b1, const u_char* b2, uint64_t n) override;
|
||||
|
||||
File* the_file;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue