mirror of
https://github.com/zeek/zeek.git
synced 2025-10-04 23:58:20 +00:00
Threaded logging framework.
This is based on Gilbert's code but I ended up refactoring it quite a bit. That's why I didn't do a direct merge but started with a new branch and copied things over to adapt. It looks quite a bit different now as I tried to generalize things a bit more to also support the Input Framework. The larger changes code are: - Moved all logging code into subdirectory src/logging/. Code here is in namespace "logging". - Moved all threading code into subdirectory src/threading/. Code here is in namespace "threading". - Introduced a central thread manager that tracks threads and is in charge of termination and (eventually) statistics. - Refactored logging independent threading code into base classes BasicThread and MsgThread. The former encapsulates all the pthread code with simple start/stop methods and provides a single Run() method to override. The latter is derived from BasicThread and adds bi-directional message passing between main and child threads. The hope is that the Input Framework can reuse this part quite directly. - A log writer is now split into a general WriterFrontend (LogEmissary in Gilbert's code) and a type-specific WriterBackend. Specific writers are implemented by deriving from the latter. (The plugin interface is almost unchanged compared to the 2.0 version.). Frontend and backend communicate via MsgThread's message passing. - MsgThread (and thus WriterBackend) has a Heartbeat() method that a thread can override to execute code on a regular basis. It's triggered roughly once a second by the main thread. - Integration into "the rest of Bro". Threads can send messages to the reporter and do debugging output; they are hooked into the I/O loop for sending messages back; and there's a new debugging stream "threading" that logs, well, threading activity. This all seems to work for the most part, but it's not done yet. TODO list: - Not all tests pass yet. In particular, diffs for the external tests seem to indicate some memory problem (no crashes, just an occasional weird character). - Only tested in --enable-debug mode. - Only tested on Linux. - Needs leak check. - Each log write is currently a single inter-thread message. Bring Gilbert's bulk writes back. - Code needs further cleanup. - Document the class API. - Document the internal structure of the logging framework. - Check for robustness: live traffic, aborting, signals, etc. - Add thread statistics to profile.log (most of the code is there). - Customize the OS-visible thread names on platforms that support it.
This commit is contained in:
parent
60ae6f01d1
commit
e4e770d475
28 changed files with 1745 additions and 503 deletions
150
src/threading/Queue.h
Normal file
150
src/threading/Queue.h
Normal file
|
@ -0,0 +1,150 @@
|
|||
|
||||
#ifndef THREADING_QUEUE_H
|
||||
#define THREADING_QUEUE_H
|
||||
|
||||
#include <pthread.h>
|
||||
#include <queue>
|
||||
#include <deque>
|
||||
#include <stdint.h>
|
||||
|
||||
#include "Reporter.h"
|
||||
|
||||
namespace threading {
|
||||
|
||||
/**
|
||||
* Just a simple threaded queue wrapper class. Uses multiple queues and reads / writes in rotary fashion in an attempt to limit contention.
|
||||
* Due to locking granularity, bulk put / get is no faster than single put / get as long as FIFO guarantee is required.
|
||||
*/
|
||||
|
||||
template<typename T>
|
||||
class Queue_
|
||||
{
|
||||
public:
|
||||
Queue_();
|
||||
~Queue_();
|
||||
|
||||
T Get();
|
||||
void Put(T data);
|
||||
bool Ready();
|
||||
uint64_t Size();
|
||||
|
||||
private:
|
||||
static const int NUM_QUEUES = 8;
|
||||
|
||||
pthread_mutex_t mutex[NUM_QUEUES]; // Mutex protected shared accesses.
|
||||
pthread_cond_t has_data[NUM_QUEUES]; // Signals when data becomes available
|
||||
std::queue<T> messages[NUM_QUEUES]; // Actually holds the queued messages
|
||||
|
||||
int read_ptr; // Where the next operation will read from
|
||||
int write_ptr; // Where the next operation will write to
|
||||
uint64_t size;
|
||||
};
|
||||
|
||||
inline static void safe_lock(pthread_mutex_t* mutex)
|
||||
{
|
||||
if ( pthread_mutex_lock(mutex) != 0 )
|
||||
reporter->FatalErrorWithCore("cannot lock mutex");
|
||||
}
|
||||
|
||||
inline static void safe_unlock(pthread_mutex_t* mutex)
|
||||
{
|
||||
if ( pthread_mutex_unlock(mutex) != 0 )
|
||||
reporter->FatalErrorWithCore("cannot unlock mutex");
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
inline Queue_<T>::Queue_()
|
||||
{
|
||||
read_ptr = 0;
|
||||
write_ptr = 0;
|
||||
|
||||
for( int i = 0; i < NUM_QUEUES; ++i )
|
||||
{
|
||||
if ( pthread_cond_init(&has_data[i], NULL) != 0 )
|
||||
reporter->FatalError("cannot init queue condition variable");
|
||||
|
||||
if ( pthread_mutex_init(&mutex[i], NULL) != 0 )
|
||||
reporter->FatalError("cannot init queue mutex");
|
||||
}
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
inline Queue_<T>::~Queue_()
|
||||
{
|
||||
for( int i = 0; i < NUM_QUEUES; ++i )
|
||||
{
|
||||
pthread_cond_destroy(&has_data[i]);
|
||||
pthread_mutex_destroy(&mutex[i]);
|
||||
}
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
inline T Queue_<T>::Get()
|
||||
{
|
||||
safe_lock(&mutex[read_ptr]);
|
||||
|
||||
int old_read_ptr = read_ptr;
|
||||
|
||||
if ( messages[read_ptr].empty() )
|
||||
pthread_cond_wait(&has_data[read_ptr], &mutex[read_ptr]);
|
||||
|
||||
T data = messages[read_ptr].front();
|
||||
messages[read_ptr].pop();
|
||||
--size;
|
||||
|
||||
read_ptr = (read_ptr + 1) % NUM_QUEUES;
|
||||
|
||||
safe_unlock(&mutex[old_read_ptr]);
|
||||
|
||||
return data;
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
inline void Queue_<T>::Put(T data)
|
||||
{
|
||||
safe_lock(&mutex[write_ptr]);
|
||||
|
||||
int old_write_ptr = write_ptr;
|
||||
|
||||
bool need_signal = messages[write_ptr].empty();
|
||||
|
||||
messages[write_ptr].push(data);
|
||||
++size;
|
||||
|
||||
if ( need_signal )
|
||||
pthread_cond_signal(&has_data[write_ptr]);
|
||||
|
||||
write_ptr = (write_ptr + 1) % NUM_QUEUES;
|
||||
|
||||
safe_unlock(&mutex[old_write_ptr]);
|
||||
}
|
||||
|
||||
|
||||
template<typename T>
|
||||
inline bool Queue_<T>::Ready()
|
||||
{
|
||||
safe_lock(&mutex[read_ptr]);
|
||||
|
||||
bool ret = (messages[read_ptr].size());
|
||||
|
||||
safe_unlock(&mutex[read_ptr]);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
inline uint64_t Queue_<T>::Size()
|
||||
{
|
||||
safe_lock(&mutex[read_ptr]);
|
||||
|
||||
uint64_t s = size;
|
||||
|
||||
safe_unlock(&mutex[read_ptr]);
|
||||
|
||||
return s;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
|
Loading…
Add table
Add a link
Reference in a new issue