mirror of
https://github.com/zeek/zeek.git
synced 2025-10-06 16:48:19 +00:00
More file reassembly work.
- The reassembly behavior can be modified per-file by enabling or disabling the reassembler and/or modifying the size of the reassembly buffer. - Changed the file extraction analyzer to use the stream to avoid issues with the chunk based approach not immediately triggering the file_new event due to mime-type detection delay. Early chunks frequently ended up lost before. - Generally things are working now and I'd consider this in testing.
This commit is contained in:
parent
0b78f444a1
commit
38dbba7622
23 changed files with 375 additions and 159 deletions
|
@ -14,7 +14,7 @@ Extract::Extract(RecordVal* args, File* file, const string& arg_filename,
|
|||
: file_analysis::Analyzer(file_mgr->GetComponentTag("EXTRACT"), args, file),
|
||||
filename(arg_filename), limit(arg_limit)
|
||||
{
|
||||
fd = open(filename.c_str(), O_WRONLY | O_CREAT | O_TRUNC, 0666);
|
||||
fd = open(filename.c_str(), O_WRONLY | O_CREAT | O_TRUNC | O_APPEND, 0666);
|
||||
|
||||
if ( fd < 0 )
|
||||
{
|
||||
|
@ -53,7 +53,7 @@ file_analysis::Analyzer* Extract::Instantiate(RecordVal* args, File* file)
|
|||
limit->AsCount());
|
||||
}
|
||||
|
||||
static bool check_limit_exceeded(uint64 lim, uint64 off, uint64 len, uint64* n)
|
||||
static bool check_limit_exceeded(uint64 lim, uint64 len, uint64* n)
|
||||
{
|
||||
if ( lim == 0 )
|
||||
{
|
||||
|
@ -61,13 +61,13 @@ static bool check_limit_exceeded(uint64 lim, uint64 off, uint64 len, uint64* n)
|
|||
return false;
|
||||
}
|
||||
|
||||
if ( off >= lim )
|
||||
{
|
||||
*n = 0;
|
||||
return true;
|
||||
}
|
||||
|
||||
*n = lim - off;
|
||||
//if ( off >= lim )
|
||||
// {
|
||||
// *n = 0;
|
||||
// return true;
|
||||
// }
|
||||
//
|
||||
//*n = lim - off;
|
||||
|
||||
if ( len > *n )
|
||||
return true;
|
||||
|
@ -77,13 +77,13 @@ static bool check_limit_exceeded(uint64 lim, uint64 off, uint64 len, uint64* n)
|
|||
return false;
|
||||
}
|
||||
|
||||
bool Extract::DeliverChunk(const u_char* data, uint64 len, uint64 offset)
|
||||
bool Extract::DeliverStream(const u_char* data, uint64 len)
|
||||
{
|
||||
if ( ! fd )
|
||||
return false;
|
||||
|
||||
uint64 towrite = 0;
|
||||
bool limit_exceeded = check_limit_exceeded(limit, offset, len, &towrite);
|
||||
bool limit_exceeded = check_limit_exceeded(limit, len, &towrite);
|
||||
|
||||
if ( limit_exceeded && file_extraction_limit )
|
||||
{
|
||||
|
@ -92,16 +92,15 @@ bool Extract::DeliverChunk(const u_char* data, uint64 len, uint64 offset)
|
|||
vl->append(f->GetVal()->Ref());
|
||||
vl->append(Args()->Ref());
|
||||
vl->append(new Val(limit, TYPE_COUNT));
|
||||
vl->append(new Val(offset, TYPE_COUNT));
|
||||
vl->append(new Val(len, TYPE_COUNT));
|
||||
f->FileEvent(file_extraction_limit, vl);
|
||||
|
||||
// Limit may have been modified by BIF, re-check it.
|
||||
limit_exceeded = check_limit_exceeded(limit, offset, len, &towrite);
|
||||
limit_exceeded = check_limit_exceeded(limit, len, &towrite);
|
||||
}
|
||||
|
||||
if ( towrite > 0 )
|
||||
safe_pwrite(fd, data, towrite, offset);
|
||||
safe_write(fd, (const char *) data, towrite);
|
||||
|
||||
return ( ! limit_exceeded );
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue