mirror of
https://github.com/zeek/zeek.git
synced 2025-10-14 12:38:20 +00:00
Merge remote-tracking branch 'origin/topic/timw/storage-framework'
* origin/topic/timw/storage-framework: (52 commits) Update docs submodule [nomail] Cleanup/update comments across the storage C++ files Split storage.bif file into events/sync/async, add more comments Update comments in script files, run zeek-format on all of them Allow sync methods to be called from when conditions, add related btest Redis: Handle disconnection correctly via callback Redis: Fix sync erase, add btest for it Remove default argument for callbacks, reorder function arguments Remove file-local expire_running variable Pass network time down to Expire() Add IN_PROGRESS return code, handle for async backends Store sqlite3_stmts directly instead of looking up from a map Reduce code duplication in storage.bif Add OperationResult::MakeVal, use it to reduce some code duplication Rearrange visibility of Backend methods, add DoPoll/DoExpire, add return comments Implement Storage::backend_opened and Storage::backend_lost events SQLite: expand expiration test SQLite: Handle other return values from sqlite3_step Redis: Fix thread-contention issues with Expire(), add more tests Change how redis-server is run during btests, removing redis.conf ...
This commit is contained in:
commit
75fef4b2cf
112 changed files with 4357 additions and 21 deletions
3
.gitmodules
vendored
3
.gitmodules
vendored
|
@ -82,3 +82,6 @@
|
|||
[submodule "src/cluster/websocket/auxil/IXWebSocket"]
|
||||
path = src/cluster/websocket/auxil/IXWebSocket
|
||||
url = https://github.com/zeek/IXWebSocket.git
|
||||
[submodule "auxil/expected-lite"]
|
||||
path = auxil/expected-lite
|
||||
url = https://github.com/martinmoene/expected-lite.git
|
||||
|
|
|
@ -343,6 +343,7 @@ add_zeek_dynamic_plugin_build_interface_include_directories(
|
|||
${PROJECT_SOURCE_DIR}/auxil/broker/libbroker
|
||||
${PROJECT_SOURCE_DIR}/auxil/paraglob/include
|
||||
${PROJECT_SOURCE_DIR}/auxil/prometheus-cpp/core/include
|
||||
${PROJECT_SOURCE_DIR}/auxil/expected-lite/include
|
||||
${CMAKE_BINARY_DIR}/src
|
||||
${CMAKE_BINARY_DIR}/src/include
|
||||
${CMAKE_BINARY_DIR}/auxil/binpac/lib
|
||||
|
@ -353,6 +354,10 @@ target_include_directories(
|
|||
zeek_dynamic_plugin_base SYSTEM
|
||||
INTERFACE $<INSTALL_INTERFACE:include/zeek/3rdparty/prometheus-cpp/include>)
|
||||
|
||||
target_include_directories(
|
||||
zeek_dynamic_plugin_base SYSTEM
|
||||
INTERFACE $<INSTALL_INTERFACE:include/zeek/3rdparty/expected-lite/include>)
|
||||
|
||||
# Convenience function for adding an OBJECT library that feeds directly into the
|
||||
# main target(s).
|
||||
#
|
||||
|
@ -1015,6 +1020,9 @@ install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/auxil/prometheus-cpp/core/include/
|
|||
install(DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/auxil/prometheus-cpp/core/include/prometheus
|
||||
DESTINATION include/zeek/3rdparty/prometheus-cpp/include)
|
||||
|
||||
install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/auxil/expected-lite/include/nonstd
|
||||
DESTINATION include/zeek/3rdparty/)
|
||||
|
||||
# Create 3rdparty/ghc within the build directory so that the include for
|
||||
# "zeek/3rdparty/ghc/filesystem.hpp" works within the build tree.
|
||||
execute_process(COMMAND "${CMAKE_COMMAND}" -E make_directory
|
||||
|
@ -1025,6 +1033,13 @@ execute_process(
|
|||
"${CMAKE_CURRENT_SOURCE_DIR}/auxil/filesystem/include/ghc"
|
||||
"${CMAKE_CURRENT_BINARY_DIR}/3rdparty/ghc")
|
||||
|
||||
# Do the same for nonstd.
|
||||
execute_process(
|
||||
COMMAND
|
||||
"${CMAKE_COMMAND}" -E create_symlink
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/auxil/expected-lite/include/nonstd"
|
||||
"${CMAKE_CURRENT_BINARY_DIR}/3rdparty/nonstd")
|
||||
|
||||
# Optional Dependencies
|
||||
|
||||
set(USE_GEOIP false)
|
||||
|
@ -1128,6 +1143,7 @@ include(FindKqueue)
|
|||
|
||||
include(FindPrometheusCpp)
|
||||
include_directories(BEFORE "auxil/out_ptr/include")
|
||||
include_directories(BEFORE "auxil/expected-lite/include")
|
||||
|
||||
if ((OPENSSL_VERSION VERSION_EQUAL "1.1.0") OR (OPENSSL_VERSION VERSION_GREATER "1.1.0"))
|
||||
set(ZEEK_HAVE_OPENSSL_1_1 true CACHE INTERNAL "" FORCE)
|
||||
|
@ -1495,6 +1511,10 @@ message(
|
|||
"\n - Broker: ON"
|
||||
"\n - ZeroMQ: ${ENABLE_CLUSTER_BACKEND_ZEROMQ}"
|
||||
"\n"
|
||||
"\nStorage backends:"
|
||||
"\n - SQLite: ON"
|
||||
"\n - Redis: ${ENABLE_STORAGE_BACKEND_REDIS}"
|
||||
"\n"
|
||||
"\nFuzz Targets: ${ZEEK_ENABLE_FUZZERS}"
|
||||
"\nFuzz Engine: ${ZEEK_FUZZING_ENGINE}"
|
||||
"\n"
|
||||
|
|
1
auxil/expected-lite
Submodule
1
auxil/expected-lite
Submodule
|
@ -0,0 +1 @@
|
|||
Subproject commit f339d2f73730f8fee4412f5e4938717866ecef48
|
|
@ -24,6 +24,7 @@ RUN apt-get update && apt-get -y install \
|
|||
jq \
|
||||
lcov \
|
||||
libkrb5-dev \
|
||||
libhiredis-dev \
|
||||
libmaxminddb-dev \
|
||||
libpcap-dev \
|
||||
libssl-dev \
|
||||
|
@ -31,6 +32,7 @@ RUN apt-get update && apt-get -y install \
|
|||
python3 \
|
||||
python3-dev \
|
||||
python3-pip \
|
||||
redis-server \
|
||||
ruby \
|
||||
sqlite3 \
|
||||
swig \
|
||||
|
|
2
doc
2
doc
|
@ -1 +1 @@
|
|||
Subproject commit 625c534db57c54b1eaf410eb63e0e261ecad3df0
|
||||
Subproject commit 66a9cf6283e3086c9f95bbe114abcc20e172e119
|
3
scripts/base/frameworks/storage/__load__.zeek
Normal file
3
scripts/base/frameworks/storage/__load__.zeek
Normal file
|
@ -0,0 +1,3 @@
|
|||
@load ./async
|
||||
@load ./main
|
||||
@load ./sync
|
111
scripts/base/frameworks/storage/async.zeek
Normal file
111
scripts/base/frameworks/storage/async.zeek
Normal file
|
@ -0,0 +1,111 @@
|
|||
##! Asynchronous operation methods for the storage framework.
|
||||
|
||||
@load ./main
|
||||
|
||||
module Storage::Async;
|
||||
|
||||
export {
|
||||
## Opens a new backend connection based on a configuration object asynchronously.
|
||||
## This method must be called via a :zeek:see:`when` condition or an error will
|
||||
## be returned.
|
||||
##
|
||||
## btype: A tag indicating what type of backend should be opened. These are
|
||||
## defined by the backend plugins loaded.
|
||||
##
|
||||
## options: A record containing the configuration for the connection.
|
||||
##
|
||||
## key_type: The script-level type of keys stored in the backend. Used for
|
||||
## validation of keys passed to other framework methods.
|
||||
##
|
||||
## val_type: The script-level type of keys stored in the backend. Used for
|
||||
## validation of values passed to :zeek:see:`Storage::Async::put` as
|
||||
## well as for type conversions for return values from
|
||||
## :zeek:see:`Storage::Async::get`.
|
||||
##
|
||||
## Returns: A record containing the status of the operation, and either an error
|
||||
## string on failure or a value on success. The value returned here will
|
||||
## be an ``opaque of BackendHandle``.
|
||||
global open_backend: function(btype: Storage::Backend,
|
||||
options: Storage::BackendOptions, key_type: any, val_type: any)
|
||||
: Storage::OperationResult;
|
||||
|
||||
## Closes an existing backend connection asynchronously. This method must be
|
||||
## called via a :zeek:see:`when` condition or an error will be returned.
|
||||
##
|
||||
## backend: A handle to a backend connection.
|
||||
##
|
||||
## Returns: A record containing the status of the operation and an optional error
|
||||
## string for failures.
|
||||
global close_backend: function(backend: opaque of Storage::BackendHandle)
|
||||
: Storage::OperationResult;
|
||||
|
||||
## Inserts a new entry into a backend asynchronously. This method must be called
|
||||
## via a :zeek:see:`when` condition or an error will be returned.
|
||||
##
|
||||
## backend: A handle to a backend connection.
|
||||
##
|
||||
## args: A :zeek:see:`Storage::PutArgs` record containing the arguments for the
|
||||
## operation.
|
||||
##
|
||||
## Returns: A record containing the status of the operation and an optional error
|
||||
## string for failures.
|
||||
global put: function(backend: opaque of Storage::BackendHandle,
|
||||
args: Storage::PutArgs): Storage::OperationResult;
|
||||
|
||||
## Gets an entry from the backend asynchronously. This method must be called via a
|
||||
## :zeek:see:`when` condition or an error will be returned.
|
||||
##
|
||||
## backend: A handle to a backend connection.
|
||||
##
|
||||
## key: The key to look up.
|
||||
##
|
||||
## Returns: A record containing the status of the operation, an optional error
|
||||
## string for failures, and an optional value for success. The value
|
||||
## returned here will be of the type passed into
|
||||
## :zeek:see:`Storage::Async::open_backend`.
|
||||
global get: function(backend: opaque of Storage::BackendHandle, key: any)
|
||||
: Storage::OperationResult;
|
||||
|
||||
## Erases an entry from the backend asynchronously. This method must be called via
|
||||
## a :zeek:see:`when` condition or an error will be returned.
|
||||
##
|
||||
## backend: A handle to a backend connection.
|
||||
##
|
||||
## key: The key to erase.
|
||||
##
|
||||
## Returns: A record containing the status of the operation and an optional error
|
||||
## string for failures.
|
||||
global erase: function(backend: opaque of Storage::BackendHandle, key: any)
|
||||
: Storage::OperationResult;
|
||||
}
|
||||
|
||||
function open_backend(btype: Storage::Backend, options: Storage::BackendOptions,
|
||||
key_type: any, val_type: any): Storage::OperationResult
|
||||
{
|
||||
return Storage::Async::__open_backend(btype, options, key_type, val_type);
|
||||
}
|
||||
|
||||
function close_backend(backend: opaque of Storage::BackendHandle)
|
||||
: Storage::OperationResult
|
||||
{
|
||||
return Storage::Async::__close_backend(backend);
|
||||
}
|
||||
|
||||
function put(backend: opaque of Storage::BackendHandle, args: Storage::PutArgs)
|
||||
: Storage::OperationResult
|
||||
{
|
||||
return Storage::Async::__put(backend, args$key, args$value, args$overwrite,
|
||||
args$expire_time);
|
||||
}
|
||||
|
||||
function get(backend: opaque of Storage::BackendHandle, key: any)
|
||||
: Storage::OperationResult
|
||||
{
|
||||
return Storage::Async::__get(backend, key);
|
||||
}
|
||||
|
||||
function erase(backend: opaque of Storage::BackendHandle, key: any)
|
||||
: Storage::OperationResult
|
||||
{
|
||||
return Storage::Async::__erase(backend, key);
|
||||
}
|
29
scripts/base/frameworks/storage/main.zeek
Normal file
29
scripts/base/frameworks/storage/main.zeek
Normal file
|
@ -0,0 +1,29 @@
|
|||
##! The storage framework provides a way to store long-term data to disk.
|
||||
|
||||
module Storage;
|
||||
|
||||
export {
|
||||
## Base record for backend options that can be passed to
|
||||
## :zeek:see:`Storage::Async::open_backend` and
|
||||
## :zeek:see:`Storage::Sync::open_backend`. Backend plugins can redef this record
|
||||
## to add relevant fields to it.
|
||||
type BackendOptions: record { };
|
||||
|
||||
## Record for passing arguments to :zeek:see:`Storage::Async::put` and
|
||||
## :zeek:see:`Storage::Sync::put`.
|
||||
type PutArgs: record {
|
||||
# The key to store the value under.
|
||||
key: any;
|
||||
|
||||
# The value to store associated with the key.
|
||||
value: any;
|
||||
|
||||
# Indicates whether this value should overwrite an existing entry for the
|
||||
# key.
|
||||
overwrite: bool &default=T;
|
||||
|
||||
# An interval of time until the entry is automatically removed from the
|
||||
# backend.
|
||||
expire_time: interval &default=0sec;
|
||||
};
|
||||
}
|
105
scripts/base/frameworks/storage/sync.zeek
Normal file
105
scripts/base/frameworks/storage/sync.zeek
Normal file
|
@ -0,0 +1,105 @@
|
|||
##! Synchronous operation methods for the storage framework.
|
||||
|
||||
@load ./main
|
||||
|
||||
module Storage::Sync;
|
||||
|
||||
export {
|
||||
## Opens a new backend connection based on a configuration object.
|
||||
##
|
||||
## btype: A tag indicating what type of backend should be opened. These are
|
||||
## defined by the backend plugins loaded.
|
||||
##
|
||||
## options: A record containing the configuration for the connection.
|
||||
##
|
||||
## key_type: The script-level type of keys stored in the backend. Used for
|
||||
## validation of keys passed to other framework methods.
|
||||
##
|
||||
## val_type: The script-level type of keys stored in the backend. Used for
|
||||
## validation of values passed to :zeek:see:`Storage::Sync::put` as well
|
||||
## as for type conversions for return values from
|
||||
## :zeek:see:`Storage::Sync::get`.
|
||||
##
|
||||
## Returns: A record containing the status of the operation, and either an error
|
||||
## string on failure or a value on success. The value returned here will
|
||||
## be an ``opaque of BackendHandle``.
|
||||
global open_backend: function(btype: Storage::Backend,
|
||||
options: Storage::BackendOptions, key_type: any, val_type: any)
|
||||
: Storage::OperationResult;
|
||||
|
||||
## Closes an existing backend connection.
|
||||
##
|
||||
## backend: A handle to a backend connection.
|
||||
##
|
||||
## Returns: A record containing the status of the operation and an optional error
|
||||
## string for failures.
|
||||
global close_backend: function(backend: opaque of Storage::BackendHandle)
|
||||
: Storage::OperationResult;
|
||||
|
||||
## Inserts a new entry into a backend.
|
||||
##
|
||||
## backend: A handle to a backend connection.
|
||||
##
|
||||
## args: A :zeek:see:`Storage::PutArgs` record containing the arguments for the
|
||||
## operation.
|
||||
##
|
||||
## Returns: A record containing the status of the operation and an optional error
|
||||
## string for failures.
|
||||
global put: function(backend: opaque of Storage::BackendHandle,
|
||||
args: Storage::PutArgs): Storage::OperationResult;
|
||||
|
||||
## Gets an entry from the backend.
|
||||
##
|
||||
## backend: A handle to a backend connection.
|
||||
##
|
||||
## key: The key to look up.
|
||||
##
|
||||
## Returns: A record containing the status of the operation, an optional error
|
||||
## string for failures, and an optional value for success. The value
|
||||
## returned here will be of the type passed into
|
||||
## :zeek:see:`Storage::Sync::open_backend`.
|
||||
global get: function(backend: opaque of Storage::BackendHandle, key: any)
|
||||
: Storage::OperationResult;
|
||||
|
||||
## Erases an entry from the backend.
|
||||
##
|
||||
## backend: A handle to a backend connection.
|
||||
##
|
||||
## key: The key to erase.
|
||||
##
|
||||
## Returns: A record containing the status of the operation and an optional error
|
||||
## string for failures.
|
||||
global erase: function(backend: opaque of Storage::BackendHandle, key: any)
|
||||
: Storage::OperationResult;
|
||||
}
|
||||
|
||||
function open_backend(btype: Storage::Backend, options: Storage::BackendOptions,
|
||||
key_type: any, val_type: any): Storage::OperationResult
|
||||
{
|
||||
return Storage::Sync::__open_backend(btype, options, key_type, val_type);
|
||||
}
|
||||
|
||||
function close_backend(backend: opaque of Storage::BackendHandle)
|
||||
: Storage::OperationResult
|
||||
{
|
||||
return Storage::Sync::__close_backend(backend);
|
||||
}
|
||||
|
||||
function put(backend: opaque of Storage::BackendHandle, args: Storage::PutArgs)
|
||||
: Storage::OperationResult
|
||||
{
|
||||
return Storage::Sync::__put(backend, args$key, args$value, args$overwrite,
|
||||
args$expire_time);
|
||||
}
|
||||
|
||||
function get(backend: opaque of Storage::BackendHandle, key: any)
|
||||
: Storage::OperationResult
|
||||
{
|
||||
return Storage::Sync::__get(backend, key);
|
||||
}
|
||||
|
||||
function erase(backend: opaque of Storage::BackendHandle, key: any)
|
||||
: Storage::OperationResult
|
||||
{
|
||||
return Storage::Sync::__erase(backend, key);
|
||||
}
|
|
@ -6210,6 +6210,62 @@ export {
|
|||
};
|
||||
}
|
||||
|
||||
module Storage;
|
||||
|
||||
export {
|
||||
## The interval used by the storage framework for automatic expiration
|
||||
## of elements in all backends that don't support it natively, or if
|
||||
## using expiration while reading pcap files.
|
||||
const expire_interval = 15.0secs &redef;
|
||||
|
||||
## Common set of statuses that can be returned by storage operations. Backend plugins
|
||||
## can add to this enum if custom values are needed.
|
||||
type ReturnCode: enum {
|
||||
## Operation succeeded.
|
||||
SUCCESS,
|
||||
## Type of value passed to operation does not match type of
|
||||
## value passed when opening backend.
|
||||
VAL_TYPE_MISMATCH,
|
||||
## Type of key passed to operation does not match type of
|
||||
## key passed when opening backend.
|
||||
KEY_TYPE_MISMATCH,
|
||||
## Backend is not connected.
|
||||
NOT_CONNECTED,
|
||||
## Operation timed out.
|
||||
TIMEOUT,
|
||||
## Connection to backed was lost unexpectedly.
|
||||
CONNECTION_LOST,
|
||||
## Generic operation failed.
|
||||
OPERATION_FAILED,
|
||||
## Key requested was not found in backend.
|
||||
KEY_NOT_FOUND,
|
||||
## Key requested for overwrite already exists.
|
||||
KEY_EXISTS,
|
||||
## Generic connection-setup failure. This is not if the connection
|
||||
## was lost, but if it failed to be setup in the first place.
|
||||
CONNECTION_FAILED,
|
||||
## Generic disconnection failure.
|
||||
DISCONNECTION_FAILED,
|
||||
## Generic initialization failure.
|
||||
INITIALIZATION_FAILED,
|
||||
## Returned from async operations when the backend is waiting
|
||||
## for a result.
|
||||
IN_PROGRESS,
|
||||
} &redef;
|
||||
|
||||
## Returned as the result of the various storage operations.
|
||||
type OperationResult: record {
|
||||
## One of a set of backend-redefinable return codes.
|
||||
code: ReturnCode;
|
||||
## An optional error string. This should be set when the
|
||||
## ``code`` field is not set ``SUCCESS``.
|
||||
error_str: string &optional;
|
||||
## An optional value returned by ``get`` operations when a match
|
||||
## was found the key requested.
|
||||
value: any &optional;
|
||||
};
|
||||
}
|
||||
|
||||
module GLOBAL;
|
||||
|
||||
@load base/bif/event.bif
|
||||
|
|
|
@ -44,6 +44,7 @@
|
|||
@load base/frameworks/openflow
|
||||
@load base/frameworks/netcontrol
|
||||
@load base/frameworks/telemetry
|
||||
@load base/frameworks/storage
|
||||
|
||||
@if ( have_spicy() )
|
||||
@load base/frameworks/spicy
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
@load ./main.zeek
|
30
scripts/policy/frameworks/storage/backend/redis/main.zeek
Normal file
30
scripts/policy/frameworks/storage/backend/redis/main.zeek
Normal file
|
@ -0,0 +1,30 @@
|
|||
##! Redis storage backend support
|
||||
|
||||
@load base/frameworks/storage/main
|
||||
|
||||
module Storage::Backend::Redis;
|
||||
|
||||
export {
|
||||
## Options record for the built-in Redis backend.
|
||||
type Options: record {
|
||||
# Address or hostname of the server.
|
||||
server_host: string &optional;
|
||||
|
||||
# Port for the server.
|
||||
server_port: port &default=6379/tcp;
|
||||
|
||||
# Server unix socket file. This can be used instead of the address and
|
||||
# port above to connect to a local server. In order to use this, the
|
||||
# ``server_host`` field must be unset.
|
||||
server_unix_socket: string &optional;
|
||||
|
||||
# Prefix used in key values stored to differentiate varying types of data
|
||||
# on the same server. Defaults to an empty string, but preferably should
|
||||
# be set to a unique value per Redis backend opened.
|
||||
key_prefix: string &default="";
|
||||
};
|
||||
}
|
||||
|
||||
redef record Storage::BackendOptions += {
|
||||
redis: Storage::Backend::Redis::Options &optional;
|
||||
};
|
|
@ -0,0 +1 @@
|
|||
@load ./main.zeek
|
36
scripts/policy/frameworks/storage/backend/sqlite/main.zeek
Normal file
36
scripts/policy/frameworks/storage/backend/sqlite/main.zeek
Normal file
|
@ -0,0 +1,36 @@
|
|||
##! SQLite storage backend support
|
||||
|
||||
@load base/frameworks/storage/main
|
||||
|
||||
module Storage::Backend::SQLite;
|
||||
|
||||
export {
|
||||
## Options record for the built-in SQLite backend.
|
||||
type Options: record {
|
||||
## Path to the database file on disk. Setting this to ":memory:" will tell
|
||||
## SQLite to use an in-memory database. Relative paths will be opened
|
||||
## relative to the directory where Zeek was started from. Zeek will not
|
||||
## create intermediate directories if they do not already exist. See
|
||||
## https://www.sqlite.org/c3ref/open.html for more rules on paths that can
|
||||
## be passed here.
|
||||
database_path: string;
|
||||
|
||||
## Name of the table used for storing data. It is possible to use the same
|
||||
## database file for two separate tables, as long as the this value is
|
||||
## different between the two.
|
||||
table_name: string;
|
||||
|
||||
## Key/value table for passing tuning parameters when opening the
|
||||
## database. These must be pairs that can be passed to the ``pragma``
|
||||
## command in sqlite.
|
||||
tuning_params: table[string] of string &default=table(
|
||||
["journal_mode"] = "WAL",
|
||||
["synchronous"] = "normal",
|
||||
["temp_store"] = "memory"
|
||||
);
|
||||
};
|
||||
}
|
||||
|
||||
redef record Storage::BackendOptions += {
|
||||
sqlite: Storage::Backend::SQLite::Options &optional;
|
||||
};
|
|
@ -83,6 +83,10 @@
|
|||
# @load frameworks/spicy/record-spicy-batch.zeek
|
||||
# @load frameworks/spicy/resource-usage.zeek
|
||||
@load frameworks/software/windows-version-detection.zeek
|
||||
@load frameworks/storage/backend/redis/__load__.zeek
|
||||
@load frameworks/storage/backend/redis/main.zeek
|
||||
@load frameworks/storage/backend/sqlite/__load__.zeek
|
||||
@load frameworks/storage/backend/sqlite/main.zeek
|
||||
@load frameworks/telemetry/log.zeek
|
||||
@load integration/collective-intel/__load__.zeek
|
||||
@load integration/collective-intel/main.zeek
|
||||
|
|
|
@ -1 +1 @@
|
|||
Subproject commit 0d15c8be14851914c8fd2d378bb700dae7e7b991
|
||||
Subproject commit 059a4a369f2a52d8013f0645b69e1bf2194e97c6
|
|
@ -205,6 +205,7 @@ add_subdirectory(iosource)
|
|||
add_subdirectory(logging)
|
||||
add_subdirectory(probabilistic)
|
||||
add_subdirectory(session)
|
||||
add_subdirectory(storage)
|
||||
|
||||
if (HAVE_SPICY)
|
||||
add_subdirectory(spicy)
|
||||
|
|
|
@ -22,7 +22,7 @@ DebugLogger::Stream DebugLogger::streams[NUM_DBGS] =
|
|||
{"tm", 0, false}, {"logging", 0, false}, {"input", 0, false}, {"threading", 0, false},
|
||||
{"plugins", 0, false}, {"zeekygen", 0, false}, {"pktio", 0, false}, {"broker", 0, false},
|
||||
{"scripts", 0, false}, {"supervisor", 0, false}, {"hashkey", 0, false}, {"spicy", 0, false},
|
||||
{"cluster", 0, false}};
|
||||
{"cluster", 0, false}, {"storage", 0, false}};
|
||||
|
||||
DebugLogger::~DebugLogger() {
|
||||
if ( file && file != stderr )
|
||||
|
|
|
@ -57,6 +57,7 @@ enum DebugStream {
|
|||
DBG_HASHKEY, // HashKey buffers
|
||||
DBG_SPICY, // Spicy functionality
|
||||
DBG_CLUSTER, // Cluster functionality
|
||||
DBG_STORAGE, // Storage framework
|
||||
|
||||
NUM_DBGS // Has to be last
|
||||
};
|
||||
|
|
|
@ -144,10 +144,7 @@ public:
|
|||
}
|
||||
|
||||
IntrusivePtr& operator=(std::nullptr_t) noexcept {
|
||||
if ( ptr_ ) {
|
||||
Unref(ptr_);
|
||||
ptr_ = nullptr;
|
||||
}
|
||||
reset();
|
||||
return *this;
|
||||
}
|
||||
|
||||
|
@ -161,6 +158,23 @@ public:
|
|||
|
||||
explicit operator bool() const noexcept { return ptr_ != nullptr; }
|
||||
|
||||
void reset() noexcept {
|
||||
if ( ptr_ ) {
|
||||
Unref(ptr_);
|
||||
ptr_ = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
void reset(T* ptr) {
|
||||
if ( ptr_ )
|
||||
Unref(ptr_);
|
||||
|
||||
if ( ptr )
|
||||
Ref(ptr);
|
||||
|
||||
ptr_ = ptr;
|
||||
}
|
||||
|
||||
private:
|
||||
pointer ptr_ = nullptr;
|
||||
};
|
||||
|
|
|
@ -51,6 +51,7 @@ const char* TimerNames[] = {
|
|||
"UnknownProtocolExpire",
|
||||
"LogDelayExpire",
|
||||
"LogFlushWriteBufferTimer",
|
||||
"StorageExpire",
|
||||
};
|
||||
|
||||
const char* timer_type_to_string(TimerType type) { return TimerNames[type]; }
|
||||
|
|
|
@ -58,8 +58,9 @@ enum TimerType : uint8_t {
|
|||
TIMER_UNKNOWN_PROTOCOL_EXPIRE,
|
||||
TIMER_LOG_DELAY_EXPIRE,
|
||||
TIMER_LOG_FLUSH_WRITE_BUFFER,
|
||||
TIMER_STORAGE_EXPIRE,
|
||||
};
|
||||
constexpr int NUM_TIMER_TYPES = int(TIMER_LOG_FLUSH_WRITE_BUFFER) + 1;
|
||||
constexpr int NUM_TIMER_TYPES = int(TIMER_STORAGE_EXPIRE) + 1;
|
||||
|
||||
extern const char* timer_type_to_string(TimerType type);
|
||||
|
||||
|
|
|
@ -32,3 +32,5 @@ const Threading::heartbeat_interval: interval;
|
|||
|
||||
const Log::flush_interval: interval;
|
||||
const Log::write_buffer_size: count;
|
||||
|
||||
const Storage::expire_interval: interval;
|
||||
|
|
|
@ -45,6 +45,8 @@ void Component::Describe(ODesc* d) const {
|
|||
|
||||
case component::LOG_SERIALIZER: d->Add("Log Serializer"); break;
|
||||
|
||||
case component::STORAGE_BACKEND: d->Add("Storage Backend"); break;
|
||||
|
||||
default:
|
||||
reporter->InternalWarning("unknown component type in plugin::Component::Describe");
|
||||
d->Add("<unknown component type>");
|
||||
|
|
|
@ -33,6 +33,7 @@ enum Type {
|
|||
CLUSTER_BACKEND, /// A cluster backend.
|
||||
EVENT_SERIALIZER, /// A serializer for events, used by cluster backends.
|
||||
LOG_SERIALIZER, /// A serializer for log batches, used by cluster backends.
|
||||
STORAGE_BACKEND, /// A backend for the storage framework.
|
||||
};
|
||||
|
||||
} // namespace component
|
||||
|
|
|
@ -148,6 +148,16 @@ static std::unordered_map<std::string, unsigned int> func_attrs = {
|
|||
{"Reporter::warning", ATTR_NO_SCRIPT_SIDE_EFFECTS},
|
||||
{"Spicy::__resource_usage", ATTR_NO_ZEEK_SIDE_EFFECTS},
|
||||
{"Spicy::__toggle_analyzer", ATTR_NO_SCRIPT_SIDE_EFFECTS},
|
||||
{"Storage::Async::__close_backend", ATTR_NO_SCRIPT_SIDE_EFFECTS},
|
||||
{"Storage::Async::__erase", ATTR_NO_SCRIPT_SIDE_EFFECTS},
|
||||
{"Storage::Async::__get", ATTR_NO_SCRIPT_SIDE_EFFECTS},
|
||||
{"Storage::Async::__open_backend", ATTR_NO_SCRIPT_SIDE_EFFECTS},
|
||||
{"Storage::Async::__put", ATTR_NO_SCRIPT_SIDE_EFFECTS},
|
||||
{"Storage::Sync::__close_backend", ATTR_NO_SCRIPT_SIDE_EFFECTS},
|
||||
{"Storage::Sync::__erase", ATTR_NO_SCRIPT_SIDE_EFFECTS},
|
||||
{"Storage::Sync::__get", ATTR_NO_SCRIPT_SIDE_EFFECTS},
|
||||
{"Storage::Sync::__open_backend", ATTR_NO_SCRIPT_SIDE_EFFECTS},
|
||||
{"Storage::Sync::__put", ATTR_NO_SCRIPT_SIDE_EFFECTS},
|
||||
{"Supervisor::__create", ATTR_NO_SCRIPT_SIDE_EFFECTS},
|
||||
{"Supervisor::__destroy", ATTR_NO_SCRIPT_SIDE_EFFECTS},
|
||||
{"Supervisor::__is_supervised", ATTR_IDEMPOTENT},
|
||||
|
|
164
src/storage/Backend.cc
Normal file
164
src/storage/Backend.cc
Normal file
|
@ -0,0 +1,164 @@
|
|||
// See the file "COPYING" in the main distribution directory for copyright.
|
||||
|
||||
#include "zeek/storage/Backend.h"
|
||||
|
||||
#include "zeek/Trigger.h"
|
||||
#include "zeek/broker/Data.h"
|
||||
#include "zeek/storage/ReturnCode.h"
|
||||
#include "zeek/storage/storage-events.bif.h"
|
||||
|
||||
namespace zeek::storage {
|
||||
|
||||
RecordValPtr OperationResult::BuildVal() { return MakeVal(code, err_str, value); }
|
||||
|
||||
RecordValPtr OperationResult::MakeVal(EnumValPtr code, std::string_view err_str, ValPtr value) {
|
||||
static auto op_result_type = zeek::id::find_type<zeek::RecordType>("Storage::OperationResult");
|
||||
|
||||
auto rec = zeek::make_intrusive<zeek::RecordVal>(op_result_type);
|
||||
rec->Assign(0, std::move(code));
|
||||
if ( ! err_str.empty() )
|
||||
rec->Assign(1, std::string{err_str});
|
||||
if ( value )
|
||||
rec->Assign(2, std::move(value));
|
||||
|
||||
return rec;
|
||||
}
|
||||
|
||||
ResultCallback::ResultCallback(zeek::detail::trigger::TriggerPtr trigger, const void* assoc)
|
||||
: trigger(std::move(trigger)), assoc(assoc) {}
|
||||
|
||||
void ResultCallback::Timeout() {
|
||||
static const auto& op_result_type = zeek::id::find_type<zeek::RecordType>("Storage::OperationResult");
|
||||
|
||||
if ( ! IsSyncCallback() )
|
||||
trigger->Cache(assoc, OperationResult::MakeVal(ReturnCode::TIMEOUT).release());
|
||||
}
|
||||
|
||||
OperationResultCallback::OperationResultCallback(zeek::detail::trigger::TriggerPtr trigger, const void* assoc)
|
||||
: ResultCallback(std::move(trigger), assoc) {}
|
||||
|
||||
void OperationResultCallback::Complete(OperationResult res) {
|
||||
// If this is a sync callback, there isn't a trigger to process. Store the result and bail.
|
||||
if ( IsSyncCallback() ) {
|
||||
result = std::move(res);
|
||||
return;
|
||||
}
|
||||
|
||||
auto res_val = res.BuildVal();
|
||||
trigger->Cache(assoc, res_val.get());
|
||||
trigger->Release();
|
||||
}
|
||||
|
||||
OpenResultCallback::OpenResultCallback(IntrusivePtr<detail::BackendHandleVal> backend)
|
||||
: ResultCallback(), backend(std::move(backend)) {}
|
||||
|
||||
OpenResultCallback::OpenResultCallback(zeek::detail::trigger::TriggerPtr trigger, const void* assoc,
|
||||
IntrusivePtr<detail::BackendHandleVal> backend)
|
||||
: ResultCallback(std::move(trigger), assoc), backend(std::move(backend)) {}
|
||||
|
||||
void OpenResultCallback::Complete(OperationResult res) {
|
||||
if ( res.code == ReturnCode::SUCCESS ) {
|
||||
backend->backend->EnqueueBackendOpened();
|
||||
}
|
||||
|
||||
// Set the result's value to the backend so that it ends up in the result getting either
|
||||
// passed back to the trigger or the one stored for sync backends.
|
||||
res.value = backend;
|
||||
|
||||
// If this is a sync callback, there isn't a trigger to process. Store the result and bail.
|
||||
if ( IsSyncCallback() ) {
|
||||
result = std::move(res);
|
||||
return;
|
||||
}
|
||||
|
||||
auto res_val = res.BuildVal();
|
||||
trigger->Cache(assoc, res_val.get());
|
||||
trigger->Release();
|
||||
}
|
||||
|
||||
OperationResult Backend::Open(OpenResultCallback* cb, RecordValPtr options, TypePtr kt, TypePtr vt) {
|
||||
key_type = std::move(kt);
|
||||
val_type = std::move(vt);
|
||||
backend_options = options;
|
||||
|
||||
auto ret = DoOpen(cb, std::move(options));
|
||||
if ( ! ret.value )
|
||||
ret.value = cb->Backend();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
OperationResult Backend::Close(OperationResultCallback* cb) { return DoClose(cb); }
|
||||
|
||||
OperationResult Backend::Put(OperationResultCallback* cb, ValPtr key, ValPtr value, bool overwrite,
|
||||
double expiration_time) {
|
||||
// The intention for this method is to do some other heavy lifting in regard
|
||||
// to backends that need to pass data through the manager instead of directly
|
||||
// through the workers. For the first versions of the storage framework it
|
||||
// just calls the backend itself directly.
|
||||
if ( ! same_type(key->GetType(), key_type) ) {
|
||||
auto ret = OperationResult{ReturnCode::KEY_TYPE_MISMATCH};
|
||||
CompleteCallback(cb, ret);
|
||||
return ret;
|
||||
}
|
||||
if ( ! same_type(value->GetType(), val_type) ) {
|
||||
auto ret = OperationResult{ReturnCode::VAL_TYPE_MISMATCH};
|
||||
CompleteCallback(cb, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return DoPut(cb, std::move(key), std::move(value), overwrite, expiration_time);
|
||||
}
|
||||
|
||||
OperationResult Backend::Get(OperationResultCallback* cb, ValPtr key) {
|
||||
// See the note in Put().
|
||||
if ( ! same_type(key->GetType(), key_type) ) {
|
||||
auto ret = OperationResult{ReturnCode::KEY_TYPE_MISMATCH};
|
||||
CompleteCallback(cb, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return DoGet(cb, std::move(key));
|
||||
}
|
||||
|
||||
OperationResult Backend::Erase(OperationResultCallback* cb, ValPtr key) {
|
||||
// See the note in Put().
|
||||
if ( ! same_type(key->GetType(), key_type) ) {
|
||||
auto ret = OperationResult{ReturnCode::KEY_TYPE_MISMATCH};
|
||||
CompleteCallback(cb, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return DoErase(cb, std::move(key));
|
||||
}
|
||||
|
||||
void Backend::CompleteCallback(ResultCallback* cb, const OperationResult& data) const {
|
||||
cb->Complete(data);
|
||||
if ( ! cb->IsSyncCallback() ) {
|
||||
delete cb;
|
||||
}
|
||||
}
|
||||
|
||||
void Backend::EnqueueBackendOpened() {
|
||||
event_mgr.Enqueue(Storage::backend_opened, make_intrusive<StringVal>(Tag()), backend_options);
|
||||
}
|
||||
|
||||
void Backend::EnqueueBackendLost(std::string_view reason) {
|
||||
event_mgr.Enqueue(Storage::backend_lost, make_intrusive<StringVal>(Tag()), backend_options,
|
||||
make_intrusive<StringVal>(reason));
|
||||
}
|
||||
|
||||
zeek::OpaqueTypePtr detail::backend_opaque;
|
||||
IMPLEMENT_OPAQUE_VALUE(detail::BackendHandleVal)
|
||||
|
||||
std::optional<BrokerData> detail::BackendHandleVal::DoSerializeData() const {
|
||||
// Cannot serialize.
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
bool detail::BackendHandleVal::DoUnserializeData(BrokerDataView) {
|
||||
// Cannot unserialize.
|
||||
return false;
|
||||
}
|
||||
|
||||
} // namespace zeek::storage
|
348
src/storage/Backend.h
Normal file
348
src/storage/Backend.h
Normal file
|
@ -0,0 +1,348 @@
|
|||
// See the file "COPYING" in the main distribution directory for copyright.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "zeek/OpaqueVal.h"
|
||||
#include "zeek/Val.h"
|
||||
|
||||
namespace zeek::detail::trigger {
|
||||
class Trigger;
|
||||
using TriggerPtr = IntrusivePtr<Trigger>;
|
||||
} // namespace zeek::detail::trigger
|
||||
|
||||
namespace zeek::storage {
|
||||
|
||||
class Manager;
|
||||
|
||||
/**
|
||||
* A structure mapped to the script-level Storage::OperationResult type for returning
|
||||
* status from storage operations.
|
||||
*/
|
||||
struct OperationResult {
|
||||
/**
|
||||
* One of a set of return values used to return a code-based status. The default set
|
||||
* of these values is automatically looked up by the `ReturnCode` class, but
|
||||
* additional codes may be added by backends. See the script-level
|
||||
* `Storage::ReturnCode` enum for documentation for the default available statuses.
|
||||
*/
|
||||
EnumValPtr code;
|
||||
|
||||
/**
|
||||
* An optional error string that can be passed in the result in the case of failure.
|
||||
*/
|
||||
std::string err_str;
|
||||
|
||||
/**
|
||||
* A generic value pointer for operations that can return values, such as `Open()` and
|
||||
* `Get()`.
|
||||
*/
|
||||
ValPtr value;
|
||||
|
||||
/**
|
||||
* Returns a RecordVal of the script-level type `Storage::OperationResult` from the
|
||||
* values stored.
|
||||
*/
|
||||
RecordValPtr BuildVal();
|
||||
|
||||
/**
|
||||
* Static version of `BuildVal()` that returns a RecordVal of the script-level type
|
||||
* `Storage::OperationResult` from the values provided.
|
||||
*/
|
||||
static RecordValPtr MakeVal(EnumValPtr code, std::string_view err_str = "", ValPtr value = nullptr);
|
||||
};
|
||||
|
||||
/**
|
||||
* Base callback object for asynchronous operations.
|
||||
*/
|
||||
class ResultCallback {
|
||||
public:
|
||||
ResultCallback() = default;
|
||||
ResultCallback(detail::trigger::TriggerPtr trigger, const void* assoc);
|
||||
virtual ~ResultCallback() = default;
|
||||
|
||||
/**
|
||||
* Called on the callback when an operation times out. Sets the resulting status to
|
||||
* TIMEOUT and times out the trigger.
|
||||
*/
|
||||
void Timeout();
|
||||
|
||||
/**
|
||||
* Returns whether the callback was created in an async context. This can be used to
|
||||
* determine whether an operation was called synchronously or asynchronously.
|
||||
*/
|
||||
bool IsSyncCallback() const { return ! trigger; }
|
||||
|
||||
/**
|
||||
* Completes a callback, releasing the trigger if it was valid or storing the result
|
||||
* for later usage if needed.
|
||||
*/
|
||||
virtual void Complete(OperationResult res) = 0;
|
||||
|
||||
protected:
|
||||
zeek::detail::trigger::TriggerPtr trigger;
|
||||
const void* assoc = nullptr;
|
||||
};
|
||||
|
||||
/**
|
||||
* A callback that returns an `OperationResult` when it is complete. This is used by most
|
||||
* of the storage operations for returning status.
|
||||
*/
|
||||
class OperationResultCallback : public ResultCallback {
|
||||
public:
|
||||
OperationResultCallback() = default;
|
||||
OperationResultCallback(detail::trigger::TriggerPtr trigger, const void* assoc);
|
||||
void Complete(OperationResult res) override;
|
||||
OperationResult Result() { return result; }
|
||||
|
||||
private:
|
||||
OperationResult result;
|
||||
};
|
||||
|
||||
class OpenResultCallback;
|
||||
|
||||
/**
|
||||
* A list of available modes that backends can support. A combination of these is passed
|
||||
* to `Backend::Backend` during plugin initialization.
|
||||
*/
|
||||
enum SupportedModes : uint8_t { SYNC = 0x01, ASYNC = 0x02 };
|
||||
|
||||
class Backend : public zeek::Obj {
|
||||
public:
|
||||
/**
|
||||
* Returns a descriptive tag representing the source for debugging.
|
||||
*/
|
||||
const char* Tag() { return tag.c_str(); }
|
||||
|
||||
/**
|
||||
* Store a new key/value pair in the backend.
|
||||
*
|
||||
* @param cb A callback object for returning status if being called via an async
|
||||
* context.
|
||||
* @param key the key for the data being inserted.
|
||||
* @param value the value for the data being inserted.
|
||||
* @param overwrite whether an existing value for a key should be overwritten.
|
||||
* @param expiration_time the time when this entry should be automatically
|
||||
* removed. Set to zero to disable expiration. This time is based on the current network
|
||||
* time.
|
||||
* @return A struct describing the result of the operation, containing a code, an
|
||||
* optional error string, and a ValPtr for operations that return values.
|
||||
*/
|
||||
OperationResult Put(OperationResultCallback* cb, ValPtr key, ValPtr value, bool overwrite = true,
|
||||
double expiration_time = 0);
|
||||
|
||||
/**
|
||||
* Retrieve a value from the backend for a provided key.
|
||||
*
|
||||
* @param cb A callback object for returning status if being called via an async
|
||||
* context.
|
||||
* @param key the key to lookup in the backend.
|
||||
* @return A struct describing the result of the operation, containing a code, an
|
||||
* optional error string, and a ValPtr for operations that return values.
|
||||
*/
|
||||
OperationResult Get(OperationResultCallback* cb, ValPtr key);
|
||||
|
||||
/**
|
||||
* Erases the value for a key from the backend.
|
||||
*
|
||||
* @param cb A callback object for returning status if being called via an async
|
||||
* context.
|
||||
* @param key the key to erase
|
||||
* @return A struct describing the result of the operation, containing a code, an
|
||||
* optional error string, and a ValPtr for operations that return values.
|
||||
*/
|
||||
OperationResult Erase(OperationResultCallback* cb, ValPtr key);
|
||||
|
||||
/**
|
||||
* Returns whether the backend is opened.
|
||||
*/
|
||||
virtual bool IsOpen() = 0;
|
||||
|
||||
bool SupportsSync() const { return (modes & SupportedModes::SYNC) == SupportedModes::SYNC; }
|
||||
bool SupportsAsync() const { return (modes & SupportedModes::ASYNC) == SupportedModes::ASYNC; }
|
||||
|
||||
/**
|
||||
* Optional method to allow a backend to poll for data. This can be used to
|
||||
* mimic sync mode even if the backend only supports async.
|
||||
*/
|
||||
void Poll() { DoPoll(); }
|
||||
|
||||
/**
|
||||
* Returns the options record that was passed to `Manager::OpenBackend` when the
|
||||
* backend was opened.
|
||||
*/
|
||||
const RecordValPtr& Options() const { return backend_options; }
|
||||
|
||||
protected:
|
||||
// Allow the manager to call Open/Close.
|
||||
friend class storage::Manager;
|
||||
|
||||
// Allow OpenResultCallback to call EnqueueConnectionEstablished.
|
||||
friend class storage::OpenResultCallback;
|
||||
|
||||
/**
|
||||
* Constructor
|
||||
*
|
||||
* @param modes A combination of values from SupportedModes. These modes
|
||||
# define whether a backend only supports sync or async or both.
|
||||
* @param tag A string representation of the tag for this backend. This
|
||||
* is passed from the Manager through the component factory.
|
||||
*/
|
||||
Backend(uint8_t modes, std::string_view tag) : tag(tag), modes(modes) {}
|
||||
|
||||
/**
|
||||
* Called by the manager system to open the backend.
|
||||
*
|
||||
* @param cb A callback object for returning status if being called via an async
|
||||
* context.
|
||||
* @param options A record storing configuration options for the backend.
|
||||
* @param kt The script-side type of the keys stored in the backend. Used for
|
||||
* validation of types.
|
||||
* @param vt The script-side type of the values stored in the backend. Used for
|
||||
* validation of types and conversion during retrieval.
|
||||
* @return A struct describing the result of the operation, containing a code, an
|
||||
* optional error string, and a ValPtr for operations that return values.
|
||||
*/
|
||||
OperationResult Open(OpenResultCallback* cb, RecordValPtr options, TypePtr kt, TypePtr vt);
|
||||
|
||||
/**
|
||||
* Finalizes the backend when it's being closed.
|
||||
*
|
||||
* @param cb A callback object for returning status if being called via an async
|
||||
* context.
|
||||
* @return A struct describing the result of the operation, containing a code, an
|
||||
* optional error string, and a ValPtr for operations that return values.
|
||||
*/
|
||||
OperationResult Close(OperationResultCallback* cb);
|
||||
|
||||
/**
|
||||
* Removes any entries in the backend that have expired. Can be overridden by
|
||||
* derived classes.
|
||||
*
|
||||
* @param current_network_time The network time as of the start of the
|
||||
* expiration operation.
|
||||
*/
|
||||
void Expire(double current_network_time) { DoExpire(current_network_time); }
|
||||
|
||||
/**
|
||||
* Enqueues the Storage::backend_opened event. This is called automatically
|
||||
* when an OpenResultCallback is completed successfully.
|
||||
*/
|
||||
void EnqueueBackendOpened();
|
||||
|
||||
/**
|
||||
* Enqueues the Storage::backend_lost event with an optional reason
|
||||
* string. This should be called by the backends whenever they lose their
|
||||
* connection.
|
||||
*/
|
||||
void EnqueueBackendLost(std::string_view reason);
|
||||
|
||||
/**
|
||||
* Completes a callback and cleans up the memory if the callback was from a sync
|
||||
* context. This should be called by backends instead of calling the callback's
|
||||
* \a`Complete` method directly.
|
||||
*/
|
||||
void CompleteCallback(ResultCallback* cb, const OperationResult& data) const;
|
||||
|
||||
TypePtr key_type;
|
||||
TypePtr val_type;
|
||||
RecordValPtr backend_options;
|
||||
|
||||
std::string tag;
|
||||
|
||||
private:
|
||||
/**
|
||||
* Workhorse method for calls to `Manager::OpenBackend()`. See that method for
|
||||
* documentation of the arguments. This must be overridden by all backends.
|
||||
*/
|
||||
virtual OperationResult DoOpen(OpenResultCallback* cb, RecordValPtr options) = 0;
|
||||
|
||||
/**
|
||||
* Workhorse method for calls to `Manager::CloseBackend()`. See that method for
|
||||
* documentation of the arguments. This must be overridden by all backends.
|
||||
*/
|
||||
virtual OperationResult DoClose(OperationResultCallback* cb) = 0;
|
||||
|
||||
/**
|
||||
* Workhorse method for calls to `Backend::Put()`. See that method for
|
||||
* documentation of the arguments. This must be overridden by all backends.
|
||||
*/
|
||||
virtual OperationResult DoPut(OperationResultCallback* cb, ValPtr key, ValPtr value, bool overwrite,
|
||||
double expiration_time) = 0;
|
||||
|
||||
/**
|
||||
* Workhorse method for calls to `Backend::Get()`. See that method for
|
||||
* documentation of the arguments. This must be overridden by all backends.
|
||||
*/
|
||||
virtual OperationResult DoGet(OperationResultCallback* cb, ValPtr key) = 0;
|
||||
|
||||
/**
|
||||
* Workhorse method for calls to `Backend::Erase()`. See that method for
|
||||
* documentation of the arguments. This must be overridden by all backends.
|
||||
*/
|
||||
virtual OperationResult DoErase(OperationResultCallback* cb, ValPtr key) = 0;
|
||||
|
||||
/**
|
||||
* Optional method for backends to override to provide direct polling. This should be
|
||||
* implemented to support synchronous operations on backends that only provide
|
||||
* asynchronous communication. See the built-in Redis backend for an example.
|
||||
*/
|
||||
virtual void DoPoll() {}
|
||||
|
||||
/**
|
||||
* Optional method for backends to override to provide non-native expiration of
|
||||
* items. This is called by the manager on a timer. This can also be used to implement
|
||||
* expiration when reading packet captures.
|
||||
*
|
||||
* @param current_network_time The current network time at which expiration is
|
||||
* happening.
|
||||
*/
|
||||
virtual void DoExpire(double current_network_time) {}
|
||||
|
||||
uint8_t modes;
|
||||
};
|
||||
|
||||
using BackendPtr = zeek::IntrusivePtr<Backend>;
|
||||
|
||||
namespace detail {
|
||||
|
||||
extern OpaqueTypePtr backend_opaque;
|
||||
|
||||
/**
|
||||
* OpaqueVal interface for returning BackendHandle objects to script-land.
|
||||
*/
|
||||
class BackendHandleVal : public OpaqueVal {
|
||||
public:
|
||||
BackendHandleVal() : OpaqueVal(detail::backend_opaque) {}
|
||||
BackendHandleVal(BackendPtr backend) : OpaqueVal(detail::backend_opaque), backend(std::move(backend)) {}
|
||||
~BackendHandleVal() override = default;
|
||||
|
||||
BackendPtr backend;
|
||||
|
||||
protected:
|
||||
IntrusivePtr<Val> DoClone(CloneState* state) override { return {NewRef{}, this}; }
|
||||
|
||||
DECLARE_OPAQUE_VALUE_DATA(BackendHandleVal)
|
||||
};
|
||||
|
||||
} // namespace detail
|
||||
|
||||
/**
|
||||
* A specialized version of callback for returning from `open` operations. This returns a
|
||||
* `BackendHandleVal` in the `value` field of the result when successful.
|
||||
*/
|
||||
class OpenResultCallback : public ResultCallback {
|
||||
public:
|
||||
OpenResultCallback(IntrusivePtr<detail::BackendHandleVal> backend);
|
||||
OpenResultCallback(zeek::detail::trigger::TriggerPtr trigger, const void* assoc,
|
||||
IntrusivePtr<detail::BackendHandleVal> backend);
|
||||
void Complete(OperationResult res) override;
|
||||
|
||||
OperationResult Result() const { return result; }
|
||||
IntrusivePtr<detail::BackendHandleVal> Backend() const { return backend; }
|
||||
|
||||
private:
|
||||
OperationResult result{};
|
||||
IntrusivePtr<detail::BackendHandleVal> backend;
|
||||
};
|
||||
|
||||
} // namespace zeek::storage
|
13
src/storage/CMakeLists.txt
Normal file
13
src/storage/CMakeLists.txt
Normal file
|
@ -0,0 +1,13 @@
|
|||
zeek_add_subdir_library(
|
||||
storage
|
||||
SOURCES
|
||||
Manager.cc
|
||||
Backend.cc
|
||||
Component.cc
|
||||
ReturnCode.cc
|
||||
BIFS
|
||||
storage-async.bif
|
||||
storage-events.bif
|
||||
storage-sync.bif)
|
||||
|
||||
add_subdirectory(backend)
|
25
src/storage/Component.cc
Normal file
25
src/storage/Component.cc
Normal file
|
@ -0,0 +1,25 @@
|
|||
// See the file "COPYING" in the main distribution directory for copyright.
|
||||
|
||||
#include "zeek/storage/Component.h"
|
||||
|
||||
#include "zeek/Desc.h"
|
||||
#include "zeek/storage/Manager.h"
|
||||
|
||||
namespace zeek::storage {
|
||||
|
||||
Component::Component(const std::string& name, factory_callback arg_factory)
|
||||
: plugin::Component(plugin::component::STORAGE_BACKEND, name, 0, storage_mgr->GetTagType()) {
|
||||
factory = arg_factory;
|
||||
}
|
||||
|
||||
void Component::Initialize() {
|
||||
InitializeTag();
|
||||
storage_mgr->RegisterComponent(this);
|
||||
}
|
||||
|
||||
void Component::DoDescribe(ODesc* d) const {
|
||||
d->Add("Storage::STORAGE_BACKEND_");
|
||||
d->Add(CanonicalName());
|
||||
}
|
||||
|
||||
} // namespace zeek::storage
|
59
src/storage/Component.h
Normal file
59
src/storage/Component.h
Normal file
|
@ -0,0 +1,59 @@
|
|||
// See the file "COPYING" in the main distribution directory for copyright.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "zeek/plugin/Component.h"
|
||||
|
||||
namespace zeek::storage {
|
||||
|
||||
class Backend;
|
||||
|
||||
/**
|
||||
* Component description for plugins providing storage backends.
|
||||
*/
|
||||
class Component : public plugin::Component {
|
||||
public:
|
||||
using factory_callback = IntrusivePtr<Backend> (*)(std::string_view);
|
||||
|
||||
/**
|
||||
* Constructor.
|
||||
*
|
||||
* @param name The name of the provided backend. This name is used
|
||||
* across the system to identify the backend.
|
||||
*
|
||||
* @param factory A factory function to instantiate instances of the
|
||||
* backend's class, which must be derived directly or indirectly from
|
||||
* storage::Backend. This is typically a static \c Instantiate()
|
||||
* method inside the class that just allocates and returns a new
|
||||
* instance.
|
||||
*/
|
||||
Component(const std::string& name, factory_callback factory);
|
||||
|
||||
/**
|
||||
* Destructor.
|
||||
*/
|
||||
~Component() override = default;
|
||||
|
||||
/**
|
||||
* Initialization function. This function has to be called before any
|
||||
* plugin component functionality is used; it is used to add the
|
||||
* plugin component to the list of components and to initialize tags
|
||||
*/
|
||||
void Initialize() override;
|
||||
|
||||
/**
|
||||
* Returns the backend's factory function.
|
||||
*/
|
||||
factory_callback Factory() const { return factory; }
|
||||
|
||||
protected:
|
||||
/**
|
||||
* Overridden from plugin::Component.
|
||||
*/
|
||||
void DoDescribe(ODesc* d) const override;
|
||||
|
||||
private:
|
||||
factory_callback factory;
|
||||
};
|
||||
|
||||
} // namespace zeek::storage
|
142
src/storage/Manager.cc
Normal file
142
src/storage/Manager.cc
Normal file
|
@ -0,0 +1,142 @@
|
|||
// See the file "COPYING" in the main distribution directory for copyright.
|
||||
|
||||
#include "zeek/storage/Manager.h"
|
||||
|
||||
#include <atomic>
|
||||
|
||||
#include "zeek/Desc.h"
|
||||
#include "zeek/RunState.h"
|
||||
#include "zeek/storage/ReturnCode.h"
|
||||
|
||||
std::atomic_flag expire_running;
|
||||
|
||||
namespace zeek::storage {
|
||||
|
||||
void detail::ExpirationTimer::Dispatch(double t, bool is_expire) {
|
||||
if ( is_expire )
|
||||
return;
|
||||
|
||||
// If there isn't an active thread, spin up a new one. Expiration may take
|
||||
// some time to complete and we want it to get all the way done before we
|
||||
// start another one running. If this causes us to skip a cycle, that's not
|
||||
// a big deal as the next cycle will catch anything that should be expired
|
||||
// in the interim.
|
||||
if ( ! expire_running.test_and_set() ) {
|
||||
DBG_LOG(DBG_STORAGE, "Starting new expiration thread");
|
||||
storage_mgr->expiration_thread = std::jthread([]() { storage_mgr->Expire(); });
|
||||
}
|
||||
|
||||
storage_mgr->StartExpirationTimer();
|
||||
}
|
||||
|
||||
Manager::Manager() : plugin::ComponentManager<storage::Component>("Storage", "Backend") {}
|
||||
|
||||
Manager::~Manager() {
|
||||
// TODO: should we shut down any existing backends? force-poll until all of their existing
|
||||
// operations finish and close them?
|
||||
|
||||
// Don't leave all of these static objects to leak.
|
||||
ReturnCode::Cleanup();
|
||||
|
||||
// NOTE: The expiration_thread object is a jthread and will be automatically joined
|
||||
// here as the object is destroyed.
|
||||
}
|
||||
|
||||
void Manager::InitPostScript() {
|
||||
ReturnCode::Initialize();
|
||||
|
||||
detail::backend_opaque = make_intrusive<OpaqueType>("Storage::Backend");
|
||||
StartExpirationTimer();
|
||||
}
|
||||
|
||||
zeek::expected<BackendPtr, std::string> Manager::Instantiate(const Tag& type) {
|
||||
Component* c = Lookup(type);
|
||||
if ( ! c ) {
|
||||
return zeek::unexpected<std::string>(
|
||||
util::fmt("Request to open unknown backend (%d:%d)", type.Type(), type.Subtype()));
|
||||
}
|
||||
|
||||
if ( ! c->Factory() ) {
|
||||
return zeek::unexpected<std::string>(
|
||||
util::fmt("Factory invalid for backend %s", GetComponentName(type).c_str()));
|
||||
}
|
||||
|
||||
ODesc d;
|
||||
type.AsVal()->Describe(&d);
|
||||
|
||||
BackendPtr bp = c->Factory()(d.Description());
|
||||
|
||||
if ( ! bp ) {
|
||||
return zeek::unexpected<std::string>(
|
||||
util::fmt("Failed to instantiate backend %s", GetComponentName(type).c_str()));
|
||||
}
|
||||
|
||||
return bp;
|
||||
}
|
||||
|
||||
OperationResult Manager::OpenBackend(BackendPtr backend, OpenResultCallback* cb, RecordValPtr options, TypePtr key_type,
|
||||
TypePtr val_type) {
|
||||
auto res = backend->Open(cb, std::move(options), std::move(key_type), std::move(val_type));
|
||||
if ( res.code != ReturnCode::SUCCESS && res.code != ReturnCode::IN_PROGRESS ) {
|
||||
res.err_str = util::fmt("Failed to open backend %s: %s", backend->Tag(), res.err_str.c_str());
|
||||
return res;
|
||||
}
|
||||
|
||||
RegisterBackend(std::move(backend));
|
||||
|
||||
// TODO: post Storage::backend_opened event
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
OperationResult Manager::CloseBackend(BackendPtr backend, OperationResultCallback* cb) {
|
||||
// Expiration runs on a separate thread and loops over the vector of backends. The mutex
|
||||
// here ensures exclusive access. This one happens in a block because we can remove the
|
||||
// backend from the vector before actually closing it.
|
||||
{
|
||||
std::unique_lock<std::mutex> lk(backends_mtx);
|
||||
auto it = std::find(backends.begin(), backends.end(), backend);
|
||||
if ( it != backends.end() )
|
||||
backends.erase(it);
|
||||
}
|
||||
|
||||
auto res = backend->Close(cb);
|
||||
|
||||
// TODO: post Storage::backend_lost event
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
void Manager::Expire() {
|
||||
// Expiration runs on a separate thread and loops over the vector of backends. The mutex
|
||||
// here ensures exclusive access.
|
||||
std::unique_lock<std::mutex> lk(backends_mtx);
|
||||
|
||||
DBG_LOG(DBG_STORAGE, "Expiration running, have %zu backends to check", backends.size());
|
||||
|
||||
double current_network_time = run_state::network_time;
|
||||
for ( auto it = backends.begin(); it != backends.end() && ! run_state::terminating; ++it ) {
|
||||
if ( (*it)->IsOpen() )
|
||||
(*it)->Expire(current_network_time);
|
||||
}
|
||||
|
||||
expire_running.clear();
|
||||
}
|
||||
|
||||
void Manager::StartExpirationTimer() {
|
||||
zeek::detail::timer_mgr->Add(
|
||||
new detail::ExpirationTimer(run_state::network_time + zeek::BifConst::Storage::expire_interval));
|
||||
DBG_LOG(DBG_STORAGE, "Next expiration check at %f",
|
||||
run_state::network_time + zeek::BifConst::Storage::expire_interval);
|
||||
}
|
||||
|
||||
void Manager::RegisterBackend(BackendPtr backend) {
|
||||
// Expiration runs on a separate thread and loops over the vector of backends. The mutex
|
||||
// here ensures exclusive access.
|
||||
std::unique_lock<std::mutex> lk(backends_mtx);
|
||||
|
||||
backends.push_back(std::move(backend));
|
||||
DBG_LOG(DBG_STORAGE, "Registered backends: %zu", backends.size());
|
||||
}
|
||||
|
||||
} // namespace zeek::storage
|
103
src/storage/Manager.h
Normal file
103
src/storage/Manager.h
Normal file
|
@ -0,0 +1,103 @@
|
|||
// See the file "COPYING" in the main distribution directory for copyright.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <mutex>
|
||||
#include <thread>
|
||||
|
||||
#include "zeek/3rdparty/jthread.hpp"
|
||||
#include "zeek/Timer.h"
|
||||
#include "zeek/plugin/ComponentManager.h"
|
||||
#include "zeek/storage/Backend.h"
|
||||
#include "zeek/storage/Component.h"
|
||||
|
||||
namespace zeek::storage {
|
||||
|
||||
namespace detail {
|
||||
|
||||
class ExpirationTimer final : public zeek::detail::Timer {
|
||||
public:
|
||||
ExpirationTimer(double t) : zeek::detail::Timer(t, zeek::detail::TIMER_STORAGE_EXPIRE) {}
|
||||
~ExpirationTimer() override {}
|
||||
void Dispatch(double t, bool is_expire) override;
|
||||
};
|
||||
|
||||
} // namespace detail
|
||||
|
||||
class Manager final : public plugin::ComponentManager<Component> {
|
||||
public:
|
||||
Manager();
|
||||
~Manager();
|
||||
|
||||
/**
|
||||
* Initialization of the manager. This is called late during Zeek's initialization
|
||||
* after any scripts are processed.
|
||||
*/
|
||||
void InitPostScript();
|
||||
|
||||
/**
|
||||
* Instantiates a new backend object. The backend will be in a closed state, and
|
||||
* OpenBackend() will need to be called to fully initialize it.
|
||||
*
|
||||
* @param type The tag for the type of backend being opened.
|
||||
* @return A std::expected containing either a valid BackendPtr with the result of the
|
||||
* operation or a string containing an error message for failure.
|
||||
*/
|
||||
zeek::expected<BackendPtr, std::string> Instantiate(const Tag& type);
|
||||
|
||||
/**
|
||||
* Opens a new storage backend.
|
||||
*
|
||||
* @param backend The backend object to open.
|
||||
* @param cb A callback object for returning status if being called via an async
|
||||
* context.
|
||||
* @param key_type The script-side type of the keys stored in the backend. Used for
|
||||
* validation of types for `key` arguments during all operations.
|
||||
* @param val_type The script-side type of the values stored in the backend. Used for
|
||||
* validation of types for `put` operations and type conversion during `get`
|
||||
* operations.
|
||||
* @return A struct describing the result of the operation, containing a code, an
|
||||
* optional error string, and a ValPtr for operations that return values.
|
||||
*/
|
||||
OperationResult OpenBackend(BackendPtr backend, OpenResultCallback* cb, RecordValPtr options, TypePtr key_type,
|
||||
TypePtr val_type);
|
||||
|
||||
/**
|
||||
* Closes a storage backend.
|
||||
*
|
||||
* @param backend A pointer to the backend being closed.
|
||||
* @param cb A callback object for returning status if being called via an async
|
||||
* context.
|
||||
* @return A struct describing the result of the operation, containing a code, an
|
||||
* optional error string, and a ValPtr for operations that return values.
|
||||
*/
|
||||
OperationResult CloseBackend(BackendPtr backend, OperationResultCallback* cb);
|
||||
|
||||
/**
|
||||
* Runs an expire operation on all open backends. This is called by the expiration
|
||||
* timer and shouldn't be called directly otherwise, since it should only happen on a
|
||||
* separate thread.
|
||||
*/
|
||||
void Expire();
|
||||
|
||||
protected:
|
||||
friend class storage::detail::ExpirationTimer;
|
||||
void RunExpireThread();
|
||||
void StartExpirationTimer();
|
||||
std::jthread expiration_thread;
|
||||
|
||||
friend class storage::OpenResultCallback;
|
||||
void RegisterBackend(BackendPtr backend);
|
||||
|
||||
private:
|
||||
std::vector<BackendPtr> backends;
|
||||
std::mutex backends_mtx;
|
||||
};
|
||||
|
||||
} // namespace zeek::storage
|
||||
|
||||
namespace zeek {
|
||||
|
||||
extern storage::Manager* storage_mgr;
|
||||
|
||||
} // namespace zeek
|
82
src/storage/ReturnCode.cc
Normal file
82
src/storage/ReturnCode.cc
Normal file
|
@ -0,0 +1,82 @@
|
|||
// See the file "COPYING" in the main distribution directory for copyright.
|
||||
|
||||
#include "zeek/storage/ReturnCode.h"
|
||||
|
||||
#include "zeek/Val.h"
|
||||
|
||||
namespace zeek::storage {
|
||||
|
||||
EnumValPtr ReturnCode::SUCCESS;
|
||||
EnumValPtr ReturnCode::VAL_TYPE_MISMATCH;
|
||||
EnumValPtr ReturnCode::KEY_TYPE_MISMATCH;
|
||||
EnumValPtr ReturnCode::NOT_CONNECTED;
|
||||
EnumValPtr ReturnCode::TIMEOUT;
|
||||
EnumValPtr ReturnCode::CONNECTION_LOST;
|
||||
EnumValPtr ReturnCode::OPERATION_FAILED;
|
||||
EnumValPtr ReturnCode::KEY_NOT_FOUND;
|
||||
EnumValPtr ReturnCode::KEY_EXISTS;
|
||||
EnumValPtr ReturnCode::CONNECTION_FAILED;
|
||||
EnumValPtr ReturnCode::DISCONNECTION_FAILED;
|
||||
EnumValPtr ReturnCode::INITIALIZATION_FAILED;
|
||||
EnumValPtr ReturnCode::IN_PROGRESS;
|
||||
|
||||
void ReturnCode::Initialize() {
|
||||
static const auto& return_code_type = zeek::id::find_type<zeek::EnumType>("Storage::ReturnCode");
|
||||
|
||||
auto tmp = return_code_type->Lookup("Storage::SUCCESS");
|
||||
SUCCESS = return_code_type->GetEnumVal(tmp);
|
||||
|
||||
tmp = return_code_type->Lookup("Storage::VAL_TYPE_MISMATCH");
|
||||
VAL_TYPE_MISMATCH = return_code_type->GetEnumVal(tmp);
|
||||
|
||||
tmp = return_code_type->Lookup("Storage::KEY_TYPE_MISMATCH");
|
||||
KEY_TYPE_MISMATCH = return_code_type->GetEnumVal(tmp);
|
||||
|
||||
tmp = return_code_type->Lookup("Storage::NOT_CONNECTED");
|
||||
NOT_CONNECTED = return_code_type->GetEnumVal(tmp);
|
||||
|
||||
tmp = return_code_type->Lookup("Storage::TIMEOUT");
|
||||
TIMEOUT = return_code_type->GetEnumVal(tmp);
|
||||
|
||||
tmp = return_code_type->Lookup("Storage::CONNECTION_LOST");
|
||||
CONNECTION_LOST = return_code_type->GetEnumVal(tmp);
|
||||
|
||||
tmp = return_code_type->Lookup("Storage::OPERATION_FAILED");
|
||||
OPERATION_FAILED = return_code_type->GetEnumVal(tmp);
|
||||
|
||||
tmp = return_code_type->Lookup("Storage::KEY_NOT_FOUND");
|
||||
KEY_NOT_FOUND = return_code_type->GetEnumVal(tmp);
|
||||
|
||||
tmp = return_code_type->Lookup("Storage::KEY_EXISTS");
|
||||
KEY_EXISTS = return_code_type->GetEnumVal(tmp);
|
||||
|
||||
tmp = return_code_type->Lookup("Storage::CONNECTION_FAILED");
|
||||
CONNECTION_FAILED = return_code_type->GetEnumVal(tmp);
|
||||
|
||||
tmp = return_code_type->Lookup("Storage::DISCONNECTION_FAILED");
|
||||
DISCONNECTION_FAILED = return_code_type->GetEnumVal(tmp);
|
||||
|
||||
tmp = return_code_type->Lookup("Storage::INITIALIZATION_FAILED");
|
||||
INITIALIZATION_FAILED = return_code_type->GetEnumVal(tmp);
|
||||
|
||||
tmp = return_code_type->Lookup("Storage::IN_PROGRESS");
|
||||
IN_PROGRESS = return_code_type->GetEnumVal(tmp);
|
||||
}
|
||||
|
||||
void ReturnCode::Cleanup() {
|
||||
SUCCESS.reset();
|
||||
VAL_TYPE_MISMATCH.reset();
|
||||
KEY_TYPE_MISMATCH.reset();
|
||||
NOT_CONNECTED.reset();
|
||||
TIMEOUT.reset();
|
||||
CONNECTION_LOST.reset();
|
||||
OPERATION_FAILED.reset();
|
||||
KEY_NOT_FOUND.reset();
|
||||
KEY_EXISTS.reset();
|
||||
CONNECTION_FAILED.reset();
|
||||
DISCONNECTION_FAILED.reset();
|
||||
INITIALIZATION_FAILED.reset();
|
||||
IN_PROGRESS.reset();
|
||||
}
|
||||
|
||||
} // namespace zeek::storage
|
38
src/storage/ReturnCode.h
Normal file
38
src/storage/ReturnCode.h
Normal file
|
@ -0,0 +1,38 @@
|
|||
// See the file "COPYING" in the main distribution directory for copyright.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "zeek/IntrusivePtr.h"
|
||||
|
||||
namespace zeek {
|
||||
class EnumVal;
|
||||
using EnumValPtr = IntrusivePtr<EnumVal>;
|
||||
|
||||
namespace storage {
|
||||
|
||||
/**
|
||||
* A collection of EnumValPtrs for the default set of result codes in the storage framework.
|
||||
* should be kept up-to-date with the Storage::ReturnCodes script-level enum.
|
||||
*/
|
||||
class ReturnCode {
|
||||
public:
|
||||
static void Initialize();
|
||||
static void Cleanup();
|
||||
|
||||
static EnumValPtr SUCCESS;
|
||||
static EnumValPtr VAL_TYPE_MISMATCH;
|
||||
static EnumValPtr KEY_TYPE_MISMATCH;
|
||||
static EnumValPtr NOT_CONNECTED;
|
||||
static EnumValPtr TIMEOUT;
|
||||
static EnumValPtr CONNECTION_LOST;
|
||||
static EnumValPtr OPERATION_FAILED;
|
||||
static EnumValPtr KEY_NOT_FOUND;
|
||||
static EnumValPtr KEY_EXISTS;
|
||||
static EnumValPtr CONNECTION_FAILED;
|
||||
static EnumValPtr DISCONNECTION_FAILED;
|
||||
static EnumValPtr INITIALIZATION_FAILED;
|
||||
static EnumValPtr IN_PROGRESS;
|
||||
};
|
||||
|
||||
} // namespace storage
|
||||
} // namespace zeek
|
2
src/storage/backend/CMakeLists.txt
Normal file
2
src/storage/backend/CMakeLists.txt
Normal file
|
@ -0,0 +1,2 @@
|
|||
add_subdirectory(sqlite)
|
||||
add_subdirectory(redis)
|
22
src/storage/backend/redis/CMakeLists.txt
Normal file
22
src/storage/backend/redis/CMakeLists.txt
Normal file
|
@ -0,0 +1,22 @@
|
|||
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake")
|
||||
|
||||
find_package(Hiredis)
|
||||
|
||||
# Default to building Reids only if the hiredis library was found.
|
||||
#
|
||||
# If a user enabled the backend explicitly (-D ENABLE_STORAGE_BACKEND_REDIS:bool=ON),
|
||||
# but hiredis wasn't found, hard bail.
|
||||
option(ENABLE_STORAGE_BACKEND_REDIS "Enable the Redis storage backend" ${HIREDIS_FOUND})
|
||||
|
||||
if (ENABLE_STORAGE_BACKEND_REDIS)
|
||||
if (NOT HIREDIS_FOUND)
|
||||
message(STATUS "ENABLE_STORAGE_BACKEND_REDIS set, but hiredis library not available.")
|
||||
endif ()
|
||||
|
||||
zeek_add_plugin(
|
||||
Zeek Storage_Backend_Redis
|
||||
INCLUDE_DIRS ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR} ${HIREDIS_INCLUDE_DIRS}
|
||||
DEPENDENCIES ${HIREDIS_LIBRARIES}
|
||||
SOURCES Plugin.cc Redis.cc)
|
||||
|
||||
endif ()
|
22
src/storage/backend/redis/Plugin.cc
Normal file
22
src/storage/backend/redis/Plugin.cc
Normal file
|
@ -0,0 +1,22 @@
|
|||
// See the file "COPYING" in the main distribution directory for copyright.
|
||||
|
||||
#include "zeek/plugin/Plugin.h"
|
||||
|
||||
#include "zeek/storage/Component.h"
|
||||
#include "zeek/storage/backend/redis/Redis.h"
|
||||
|
||||
namespace zeek::storage::backend::redis {
|
||||
|
||||
class Plugin : public plugin::Plugin {
|
||||
public:
|
||||
plugin::Configuration Configure() override {
|
||||
AddComponent(new storage::Component("REDIS", backend::redis::Redis::Instantiate));
|
||||
|
||||
plugin::Configuration config;
|
||||
config.name = "Zeek::Storage_Backend_Redis";
|
||||
config.description = "Redis backend for storage framework";
|
||||
return config;
|
||||
}
|
||||
} plugin;
|
||||
|
||||
} // namespace zeek::storage::backend::redis
|
532
src/storage/backend/redis/Redis.cc
Normal file
532
src/storage/backend/redis/Redis.cc
Normal file
|
@ -0,0 +1,532 @@
|
|||
// See the file "COPYING" in the main distribution directory for copyright.
|
||||
|
||||
#include "zeek/storage/backend/redis/Redis.h"
|
||||
|
||||
#include "zeek/DebugLogger.h"
|
||||
#include "zeek/Func.h"
|
||||
#include "zeek/RunState.h"
|
||||
#include "zeek/Val.h"
|
||||
#include "zeek/iosource/Manager.h"
|
||||
#include "zeek/storage/ReturnCode.h"
|
||||
|
||||
#include "hiredis/adapters/poll.h"
|
||||
#include "hiredis/async.h"
|
||||
#include "hiredis/hiredis.h"
|
||||
|
||||
// Anonymous callback handler methods for the hiredis async API.
|
||||
namespace {
|
||||
|
||||
class Tracer {
|
||||
public:
|
||||
Tracer(const std::string& where) : where(where) {} // DBG_LOG(zeek::DBG_STORAGE, "%s", where.c_str()); }
|
||||
~Tracer() {} // DBG_LOG(zeek::DBG_STORAGE, "%s done", where.c_str()); }
|
||||
std::string where;
|
||||
};
|
||||
|
||||
void redisOnConnect(const redisAsyncContext* ctx, int status) {
|
||||
auto t = Tracer("connect");
|
||||
auto backend = static_cast<zeek::storage::backend::redis::Redis*>(ctx->data);
|
||||
backend->OnConnect(status);
|
||||
}
|
||||
|
||||
void redisOnDisconnect(const redisAsyncContext* ctx, int status) {
|
||||
auto t = Tracer("disconnect");
|
||||
auto backend = static_cast<zeek::storage::backend::redis::Redis*>(ctx->data);
|
||||
backend->OnDisconnect(status);
|
||||
}
|
||||
|
||||
void redisPut(redisAsyncContext* ctx, void* reply, void* privdata) {
|
||||
auto t = Tracer("put");
|
||||
auto backend = static_cast<zeek::storage::backend::redis::Redis*>(ctx->data);
|
||||
auto callback = static_cast<zeek::storage::OperationResultCallback*>(privdata);
|
||||
backend->HandlePutResult(static_cast<redisReply*>(reply), callback);
|
||||
}
|
||||
|
||||
void redisGet(redisAsyncContext* ctx, void* reply, void* privdata) {
|
||||
auto t = Tracer("get");
|
||||
auto backend = static_cast<zeek::storage::backend::redis::Redis*>(ctx->data);
|
||||
auto callback = static_cast<zeek::storage::OperationResultCallback*>(privdata);
|
||||
backend->HandleGetResult(static_cast<redisReply*>(reply), callback);
|
||||
}
|
||||
|
||||
void redisErase(redisAsyncContext* ctx, void* reply, void* privdata) {
|
||||
auto t = Tracer("erase");
|
||||
auto backend = static_cast<zeek::storage::backend::redis::Redis*>(ctx->data);
|
||||
auto callback = static_cast<zeek::storage::OperationResultCallback*>(privdata);
|
||||
backend->HandleEraseResult(static_cast<redisReply*>(reply), callback);
|
||||
}
|
||||
|
||||
void redisZADD(redisAsyncContext* ctx, void* reply, void* privdata) {
|
||||
auto t = Tracer("generic");
|
||||
auto backend = static_cast<zeek::storage::backend::redis::Redis*>(ctx->data);
|
||||
|
||||
// We don't care about the reply from the ZADD, m1ostly because blocking to poll
|
||||
// for it adds a bunch of complication to DoPut() with having to handle the
|
||||
// reply from SET first.
|
||||
backend->HandleGeneric(nullptr);
|
||||
freeReplyObject(reply);
|
||||
}
|
||||
|
||||
void redisGeneric(redisAsyncContext* ctx, void* reply, void* privdata) {
|
||||
auto t = Tracer("generic");
|
||||
auto backend = static_cast<zeek::storage::backend::redis::Redis*>(ctx->data);
|
||||
backend->HandleGeneric(static_cast<redisReply*>(reply));
|
||||
}
|
||||
|
||||
// Because we called redisPollAttach in DoOpen(), privdata here is a
|
||||
// redisPollEvents object. We can go through that object to get the context's
|
||||
// data, which contains the backend. Because we overrode these callbacks in
|
||||
// DoOpen, we still want to mimic their callbacks to redisPollTick functions
|
||||
// correctly.
|
||||
//
|
||||
// Additionally, if we're in the middle of running a manual Expire() because
|
||||
// we're reading a pcap, don't add the file descriptor into iosource_mgr. Manual
|
||||
// calls to Poll() during that will handle reading/writing any data, and we
|
||||
// don't want the contention with the main loop.
|
||||
void redisAddRead(void* privdata) {
|
||||
auto t = Tracer("addread");
|
||||
auto rpe = static_cast<redisPollEvents*>(privdata);
|
||||
auto backend = static_cast<zeek::storage::backend::redis::Redis*>(rpe->context->data);
|
||||
|
||||
if ( rpe->reading == 0 && ! backend->ExpireRunning() )
|
||||
zeek::iosource_mgr->RegisterFd(rpe->fd, backend, zeek::iosource::IOSource::READ);
|
||||
rpe->reading = 1;
|
||||
}
|
||||
|
||||
void redisDelRead(void* privdata) {
|
||||
auto t = Tracer("delread");
|
||||
auto rpe = static_cast<redisPollEvents*>(privdata);
|
||||
auto backend = static_cast<zeek::storage::backend::redis::Redis*>(rpe->context->data);
|
||||
|
||||
if ( rpe->reading == 1 && ! backend->ExpireRunning() )
|
||||
zeek::iosource_mgr->UnregisterFd(rpe->fd, backend, zeek::iosource::IOSource::READ);
|
||||
rpe->reading = 0;
|
||||
}
|
||||
|
||||
void redisAddWrite(void* privdata) {
|
||||
auto t = Tracer("addwrite");
|
||||
auto rpe = static_cast<redisPollEvents*>(privdata);
|
||||
auto backend = static_cast<zeek::storage::backend::redis::Redis*>(rpe->context->data);
|
||||
|
||||
if ( rpe->writing == 0 && ! backend->ExpireRunning() )
|
||||
zeek::iosource_mgr->RegisterFd(rpe->fd, backend, zeek::iosource::IOSource::WRITE);
|
||||
rpe->writing = 1;
|
||||
}
|
||||
|
||||
void redisDelWrite(void* privdata) {
|
||||
auto rpe = static_cast<redisPollEvents*>(privdata);
|
||||
auto t = Tracer("delwrite");
|
||||
auto backend = static_cast<zeek::storage::backend::redis::Redis*>(rpe->context->data);
|
||||
|
||||
if ( rpe->writing == 1 && ! backend->ExpireRunning() )
|
||||
zeek::iosource_mgr->UnregisterFd(rpe->fd, backend, zeek::iosource::IOSource::WRITE);
|
||||
rpe->writing = 0;
|
||||
}
|
||||
|
||||
// Creates a unique_lock based on a condition against a mutex. This is used to
|
||||
// conditionally lock the expire_mutex. We only need to do it while reading
|
||||
// pcaps. The only thread contention happens during Expire(), which only happens
|
||||
// when reading pcaps. It's not worth the cycles to lock the mutex otherwise,
|
||||
// and hiredis will deal with other cross-command contention correctly as long
|
||||
// as it's in a single thread.
|
||||
std::unique_lock<std::mutex> conditionally_lock(bool condition, std::mutex& mutex) {
|
||||
return condition ? std::unique_lock<std::mutex>(mutex) : std::unique_lock<std::mutex>();
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
namespace zeek::storage::backend::redis {
|
||||
|
||||
storage::BackendPtr Redis::Instantiate(std::string_view tag) { return make_intrusive<Redis>(tag); }
|
||||
|
||||
/**
|
||||
* Called by the manager system to open the backend.
|
||||
*/
|
||||
OperationResult Redis::DoOpen(OpenResultCallback* cb, RecordValPtr options) {
|
||||
RecordValPtr backend_options = options->GetField<RecordVal>("redis");
|
||||
|
||||
key_prefix = backend_options->GetField<StringVal>("key_prefix")->ToStdString();
|
||||
|
||||
redisOptions opt = {0};
|
||||
|
||||
StringValPtr host = backend_options->GetField<StringVal>("server_host");
|
||||
if ( host ) {
|
||||
PortValPtr port = backend_options->GetField<PortVal>("server_port");
|
||||
server_addr = util::fmt("%s:%d", host->ToStdStringView().data(), port->Port());
|
||||
REDIS_OPTIONS_SET_TCP(&opt, host->ToStdStringView().data(), port->Port());
|
||||
}
|
||||
else {
|
||||
StringValPtr unix_sock = backend_options->GetField<StringVal>("server_unix_socket");
|
||||
if ( ! unix_sock ) {
|
||||
return {ReturnCode::CONNECTION_FAILED,
|
||||
"Either server_host/server_port or server_unix_socket must be set in Redis options record"};
|
||||
}
|
||||
|
||||
server_addr = unix_sock->ToStdString();
|
||||
REDIS_OPTIONS_SET_UNIX(&opt, server_addr.c_str());
|
||||
}
|
||||
|
||||
opt.options |= REDIS_OPT_PREFER_IPV4;
|
||||
opt.options |= REDIS_OPT_NOAUTOFREEREPLIES;
|
||||
|
||||
struct timeval timeout = {5, 0};
|
||||
opt.connect_timeout = &timeout;
|
||||
|
||||
// The connection request below should be operation #1.
|
||||
active_ops = 1;
|
||||
|
||||
async_ctx = redisAsyncConnectWithOptions(&opt);
|
||||
if ( async_ctx == nullptr || async_ctx->err ) {
|
||||
// This block doesn't necessarily mean the connection failed. It means
|
||||
// that hiredis failed to set up the async context. Connection failure
|
||||
// is returned later via the OnConnect callback.
|
||||
std::string errmsg = util::fmt("Failed to open connection to Redis server at %s", server_addr.c_str());
|
||||
if ( async_ctx ) {
|
||||
errmsg.append(": ");
|
||||
errmsg.append(async_ctx->errstr);
|
||||
}
|
||||
|
||||
redisAsyncFree(async_ctx);
|
||||
async_ctx = nullptr;
|
||||
return {ReturnCode::CONNECTION_FAILED, errmsg};
|
||||
}
|
||||
|
||||
// There's no way to pass privdata down to the connect handler like there is for
|
||||
// the other callbacks. Store the open callback so that it can be dealt with from
|
||||
// OnConnect().
|
||||
open_cb = cb;
|
||||
|
||||
// TODO: Sort out how to pass the zeek callbacks for both open/done to the async
|
||||
// callbacks from hiredis so they can return errors.
|
||||
|
||||
// The context is passed to the handler methods. Setting this data object
|
||||
// pointer allows us to look up the backend in the handlers.
|
||||
async_ctx->data = this;
|
||||
|
||||
redisPollAttach(async_ctx);
|
||||
redisAsyncSetConnectCallback(async_ctx, redisOnConnect);
|
||||
redisAsyncSetDisconnectCallback(async_ctx, redisOnDisconnect);
|
||||
|
||||
// redisAsyncSetConnectCallback sets the flag in the redisPollEvent for writing
|
||||
// so we can add this to our loop as well.
|
||||
zeek::iosource_mgr->RegisterFd(async_ctx->c.fd, this, zeek::iosource::IOSource::WRITE);
|
||||
|
||||
// These four callbacks handle the file descriptor coming and going for read
|
||||
// and write operations for hiredis. Their subsequent callbacks will
|
||||
// register/unregister with iosource_mgr as needed. I tried just registering
|
||||
// full time for both read and write but it leads to weird syncing issues
|
||||
// within the hiredis code. This is safer in regards to the library, even if
|
||||
// it results in waking up our IO loop more frequently.
|
||||
//
|
||||
// redisPollAttach sets these to functions internal to the poll attachment,
|
||||
// but we override them for our own uses. See the callbacks for more info
|
||||
// about why.
|
||||
async_ctx->ev.addRead = redisAddRead;
|
||||
async_ctx->ev.delRead = redisDelRead;
|
||||
async_ctx->ev.addWrite = redisAddWrite;
|
||||
async_ctx->ev.delWrite = redisDelWrite;
|
||||
|
||||
return {ReturnCode::IN_PROGRESS};
|
||||
}
|
||||
|
||||
/**
|
||||
* Finalizes the backend when it's being closed.
|
||||
*/
|
||||
OperationResult Redis::DoClose(OperationResultCallback* cb) {
|
||||
auto locked_scope = conditionally_lock(zeek::run_state::reading_traces, expire_mutex);
|
||||
|
||||
connected = false;
|
||||
close_cb = cb;
|
||||
|
||||
redisAsyncDisconnect(async_ctx);
|
||||
++active_ops;
|
||||
|
||||
return {ReturnCode::IN_PROGRESS};
|
||||
}
|
||||
|
||||
/**
|
||||
* The workhorse method for Put(). This must be implemented by plugins.
|
||||
*/
|
||||
OperationResult Redis::DoPut(OperationResultCallback* cb, ValPtr key, ValPtr value, bool overwrite,
|
||||
double expiration_time) {
|
||||
// The async context will queue operations until it's connected fully.
|
||||
if ( ! connected && ! async_ctx )
|
||||
return {ReturnCode::NOT_CONNECTED};
|
||||
|
||||
auto locked_scope = conditionally_lock(zeek::run_state::reading_traces, expire_mutex);
|
||||
|
||||
std::string format = "SET %s:%s %s";
|
||||
if ( ! overwrite )
|
||||
format.append(" NX");
|
||||
|
||||
auto json_key = key->ToJSON()->ToStdString();
|
||||
auto json_value = value->ToJSON()->ToStdString();
|
||||
|
||||
int status;
|
||||
// Use built-in expiration if reading live data, since time will move
|
||||
// forward consistently. If reading pcaps, we'll do something else.
|
||||
if ( expiration_time > 0.0 && ! zeek::run_state::reading_traces ) {
|
||||
format.append(" PXAT %" PRIu64);
|
||||
status = redisAsyncCommand(async_ctx, redisPut, cb, format.c_str(), key_prefix.data(), json_key.data(),
|
||||
json_value.data(), static_cast<uint64_t>(expiration_time * 1e3));
|
||||
}
|
||||
else
|
||||
status = redisAsyncCommand(async_ctx, redisPut, cb, format.c_str(), key_prefix.data(), json_key.data(),
|
||||
json_value.data());
|
||||
|
||||
if ( connected && status == REDIS_ERR )
|
||||
return {ReturnCode::OPERATION_FAILED, util::fmt("Failed to queue put operation: %s", async_ctx->errstr)};
|
||||
|
||||
++active_ops;
|
||||
|
||||
// If reading pcaps insert into a secondary set that's ordered by expiration
|
||||
// time that gets checked by Expire().
|
||||
if ( expiration_time > 0.0 && zeek::run_state::reading_traces ) {
|
||||
format = "ZADD %s_expire";
|
||||
if ( ! overwrite )
|
||||
format.append(" NX");
|
||||
format += " %f %s";
|
||||
|
||||
status = redisAsyncCommand(async_ctx, redisZADD, NULL, format.c_str(), key_prefix.data(), expiration_time,
|
||||
json_key.data());
|
||||
if ( connected && status == REDIS_ERR )
|
||||
return {ReturnCode::OPERATION_FAILED, util::fmt("ZADD operation failed: %s", async_ctx->errstr)};
|
||||
|
||||
++active_ops;
|
||||
}
|
||||
|
||||
return {ReturnCode::IN_PROGRESS};
|
||||
}
|
||||
|
||||
/**
|
||||
* The workhorse method for Get(). This must be implemented for plugins.
|
||||
*/
|
||||
OperationResult Redis::DoGet(OperationResultCallback* cb, ValPtr key) {
|
||||
// The async context will queue operations until it's connected fully.
|
||||
if ( ! connected && ! async_ctx )
|
||||
return {ReturnCode::NOT_CONNECTED};
|
||||
|
||||
auto locked_scope = conditionally_lock(zeek::run_state::reading_traces, expire_mutex);
|
||||
|
||||
int status = redisAsyncCommand(async_ctx, redisGet, cb, "GET %s:%s", key_prefix.data(),
|
||||
key->ToJSON()->ToStdStringView().data());
|
||||
|
||||
if ( connected && status == REDIS_ERR )
|
||||
return {ReturnCode::OPERATION_FAILED, util::fmt("Failed to queue get operation: %s", async_ctx->errstr)};
|
||||
|
||||
++active_ops;
|
||||
|
||||
// There isn't a result to return here. That happens in HandleGetResult for
|
||||
// async operations.
|
||||
return {ReturnCode::IN_PROGRESS};
|
||||
}
|
||||
|
||||
/**
|
||||
* The workhorse method for Erase(). This must be implemented for plugins.
|
||||
*/
|
||||
OperationResult Redis::DoErase(OperationResultCallback* cb, ValPtr key) {
|
||||
// The async context will queue operations until it's connected fully.
|
||||
if ( ! connected && ! async_ctx )
|
||||
return {ReturnCode::NOT_CONNECTED};
|
||||
|
||||
auto locked_scope = conditionally_lock(zeek::run_state::reading_traces, expire_mutex);
|
||||
|
||||
int status = redisAsyncCommand(async_ctx, redisErase, cb, "DEL %s:%s", key_prefix.data(),
|
||||
key->ToJSON()->ToStdStringView().data());
|
||||
|
||||
if ( connected && status == REDIS_ERR )
|
||||
return {ReturnCode::OPERATION_FAILED, async_ctx->errstr};
|
||||
|
||||
++active_ops;
|
||||
|
||||
return {ReturnCode::IN_PROGRESS};
|
||||
}
|
||||
|
||||
void Redis::DoExpire(double current_network_time) {
|
||||
// Expiration is handled natively by Redis if not reading traces.
|
||||
if ( ! connected || ! zeek::run_state::reading_traces )
|
||||
return;
|
||||
|
||||
auto locked_scope = conditionally_lock(zeek::run_state::reading_traces, expire_mutex);
|
||||
|
||||
expire_running = true;
|
||||
|
||||
int status = redisAsyncCommand(async_ctx, redisGeneric, NULL, "ZRANGEBYSCORE %s_expire -inf %f", key_prefix.data(),
|
||||
current_network_time);
|
||||
|
||||
if ( status == REDIS_ERR ) {
|
||||
// TODO: do something with the error?
|
||||
printf("ZRANGEBYSCORE command failed: %s\n", async_ctx->errstr);
|
||||
expire_running = false;
|
||||
return;
|
||||
}
|
||||
|
||||
++active_ops;
|
||||
|
||||
// Expire always happens in a synchronous fashion. Block here until we've received
|
||||
// a response.
|
||||
Poll();
|
||||
redisReply* reply = reply_queue.front();
|
||||
reply_queue.pop_front();
|
||||
|
||||
if ( reply->elements == 0 ) {
|
||||
freeReplyObject(reply);
|
||||
expire_running = false;
|
||||
return;
|
||||
}
|
||||
|
||||
std::vector<std::string> elements;
|
||||
for ( size_t i = 0; i < reply->elements; i++ )
|
||||
elements.emplace_back(reply->element[i]->str);
|
||||
|
||||
freeReplyObject(reply);
|
||||
|
||||
// TODO: it's possible to pass multiple keys to a DEL operation but it requires
|
||||
// building an array of the strings, building up the DEL command with entries,
|
||||
// and passing the array as a block somehow. There's no guarantee it'd be faster
|
||||
// anyways.
|
||||
for ( const auto& e : elements ) {
|
||||
status = redisAsyncCommand(async_ctx, redisGeneric, NULL, "DEL %s:%s", key_prefix.data(), e.c_str());
|
||||
++active_ops;
|
||||
Poll();
|
||||
|
||||
redisReply* reply = reply_queue.front();
|
||||
reply_queue.pop_front();
|
||||
freeReplyObject(reply);
|
||||
// TODO: do we care if this failed?
|
||||
}
|
||||
|
||||
// Remove all of the elements from the range-set that match the time range.
|
||||
redisAsyncCommand(async_ctx, redisGeneric, NULL, "ZREMRANGEBYSCORE %s_expire -inf %f", key_prefix.data(),
|
||||
current_network_time);
|
||||
|
||||
++active_ops;
|
||||
Poll();
|
||||
|
||||
reply = reply_queue.front();
|
||||
reply_queue.pop_front();
|
||||
freeReplyObject(reply);
|
||||
// TODO: do we care if this failed?
|
||||
}
|
||||
|
||||
void Redis::HandlePutResult(redisReply* reply, OperationResultCallback* callback) {
|
||||
--active_ops;
|
||||
|
||||
OperationResult res{ReturnCode::SUCCESS};
|
||||
if ( ! connected )
|
||||
res = {ReturnCode::NOT_CONNECTED};
|
||||
else if ( ! reply )
|
||||
res = {ReturnCode::OPERATION_FAILED, "Async put operation returned null reply"};
|
||||
else if ( reply && reply->type == REDIS_REPLY_ERROR )
|
||||
res = {ReturnCode::OPERATION_FAILED, util::fmt("Async put operation failed: %s", reply->str)};
|
||||
|
||||
freeReplyObject(reply);
|
||||
CompleteCallback(callback, res);
|
||||
}
|
||||
|
||||
void Redis::HandleGetResult(redisReply* reply, OperationResultCallback* callback) {
|
||||
--active_ops;
|
||||
|
||||
OperationResult res;
|
||||
if ( ! connected )
|
||||
res = {ReturnCode::NOT_CONNECTED};
|
||||
else
|
||||
res = ParseGetReply(reply);
|
||||
|
||||
freeReplyObject(reply);
|
||||
CompleteCallback(callback, res);
|
||||
}
|
||||
|
||||
void Redis::HandleEraseResult(redisReply* reply, OperationResultCallback* callback) {
|
||||
--active_ops;
|
||||
|
||||
OperationResult res{ReturnCode::SUCCESS};
|
||||
|
||||
if ( ! connected )
|
||||
res = {ReturnCode::NOT_CONNECTED};
|
||||
else if ( ! reply )
|
||||
res = {ReturnCode::OPERATION_FAILED, "Async erase operation returned null reply"};
|
||||
else if ( reply && reply->type == REDIS_REPLY_ERROR )
|
||||
res = {ReturnCode::OPERATION_FAILED, util::fmt("Async erase operation failed: %s", reply->str)};
|
||||
|
||||
freeReplyObject(reply);
|
||||
CompleteCallback(callback, res);
|
||||
}
|
||||
|
||||
void Redis::HandleGeneric(redisReply* reply) {
|
||||
--active_ops;
|
||||
|
||||
if ( reply )
|
||||
reply_queue.push_back(reply);
|
||||
}
|
||||
|
||||
void Redis::OnConnect(int status) {
|
||||
DBG_LOG(DBG_STORAGE, "Redis backend: connection event");
|
||||
--active_ops;
|
||||
|
||||
if ( status == REDIS_OK ) {
|
||||
connected = true;
|
||||
CompleteCallback(open_cb, {ReturnCode::SUCCESS});
|
||||
// The connection_established event is sent via the open callback handler.
|
||||
return;
|
||||
}
|
||||
|
||||
connected = false;
|
||||
CompleteCallback(open_cb, {ReturnCode::CONNECTION_FAILED});
|
||||
|
||||
// TODO: we could attempt to reconnect here
|
||||
}
|
||||
|
||||
void Redis::OnDisconnect(int status) {
|
||||
DBG_LOG(DBG_STORAGE, "Redis backend: disconnection event");
|
||||
|
||||
connected = false;
|
||||
if ( status == REDIS_ERR ) {
|
||||
// An error status indicates that the connection was lost unexpectedly and not
|
||||
// via a request from backend.
|
||||
EnqueueBackendLost(async_ctx->errstr);
|
||||
}
|
||||
else {
|
||||
--active_ops;
|
||||
|
||||
EnqueueBackendLost("Client disconnected");
|
||||
CompleteCallback(close_cb, {ReturnCode::SUCCESS});
|
||||
}
|
||||
|
||||
redisAsyncFree(async_ctx);
|
||||
async_ctx = nullptr;
|
||||
}
|
||||
|
||||
void Redis::ProcessFd(int fd, int flags) {
|
||||
auto locked_scope = conditionally_lock(zeek::run_state::reading_traces, expire_mutex);
|
||||
|
||||
if ( (flags & IOSource::ProcessFlags::READ) != 0 )
|
||||
redisAsyncHandleRead(async_ctx);
|
||||
if ( (flags & IOSource::ProcessFlags::WRITE) != 0 )
|
||||
redisAsyncHandleWrite(async_ctx);
|
||||
}
|
||||
|
||||
OperationResult Redis::ParseGetReply(redisReply* reply) const {
|
||||
OperationResult res;
|
||||
|
||||
if ( ! reply )
|
||||
res = {ReturnCode::OPERATION_FAILED, "GET returned null reply"};
|
||||
else if ( ! reply->str )
|
||||
res = {ReturnCode::KEY_NOT_FOUND};
|
||||
else {
|
||||
auto val = zeek::detail::ValFromJSON(reply->str, val_type, Func::nil);
|
||||
if ( std::holds_alternative<ValPtr>(val) )
|
||||
res = {ReturnCode::SUCCESS, "", std::get<ValPtr>(val)};
|
||||
else
|
||||
res = {ReturnCode::OPERATION_FAILED, std::get<std::string>(val)};
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
void Redis::DoPoll() {
|
||||
while ( active_ops > 0 )
|
||||
int status = redisPollTick(async_ctx, 0.5);
|
||||
}
|
||||
|
||||
} // namespace zeek::storage::backend::redis
|
83
src/storage/backend/redis/Redis.h
Normal file
83
src/storage/backend/redis/Redis.h
Normal file
|
@ -0,0 +1,83 @@
|
|||
// See the file "COPYING" in the main distribution directory for copyright.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <mutex>
|
||||
|
||||
#include "zeek/iosource/IOSource.h"
|
||||
#include "zeek/storage/Backend.h"
|
||||
|
||||
// Forward declare some types from hiredis to avoid including the header
|
||||
struct redisAsyncContext;
|
||||
struct redisReply;
|
||||
struct redisPollEvents;
|
||||
|
||||
namespace zeek::storage::backend::redis {
|
||||
class Redis : public Backend, public iosource::IOSource {
|
||||
public:
|
||||
Redis(std::string_view tag) : Backend(SupportedModes::ASYNC, tag), IOSource(true) {}
|
||||
~Redis() override = default;
|
||||
|
||||
static BackendPtr Instantiate(std::string_view tag);
|
||||
|
||||
/**
|
||||
* Returns a descriptive tag representing the source for debugging.
|
||||
* This has to be overloaded for Redis because IOSource requires it.
|
||||
*
|
||||
* @return The debugging name.
|
||||
*/
|
||||
const char* Tag() override { return tag.c_str(); }
|
||||
|
||||
// IOSource interface
|
||||
double GetNextTimeout() override { return -1; }
|
||||
void Process() override {}
|
||||
void ProcessFd(int fd, int flags) override;
|
||||
|
||||
// Hiredis async interface
|
||||
void OnConnect(int status);
|
||||
void OnDisconnect(int status);
|
||||
|
||||
void HandlePutResult(redisReply* reply, OperationResultCallback* callback);
|
||||
void HandleGetResult(redisReply* reply, OperationResultCallback* callback);
|
||||
void HandleEraseResult(redisReply* reply, OperationResultCallback* callback);
|
||||
void HandleGeneric(redisReply* reply);
|
||||
|
||||
/**
|
||||
* Returns whether the backend is opened.
|
||||
*/
|
||||
bool IsOpen() override { return connected; }
|
||||
|
||||
bool ExpireRunning() const { return expire_running.load(); }
|
||||
|
||||
private:
|
||||
OperationResult DoOpen(OpenResultCallback* cb, RecordValPtr options) override;
|
||||
OperationResult DoClose(OperationResultCallback* cb) override;
|
||||
OperationResult DoPut(OperationResultCallback* cb, ValPtr key, ValPtr value, bool overwrite,
|
||||
double expiration_time) override;
|
||||
OperationResult DoGet(OperationResultCallback* cb, ValPtr key) override;
|
||||
OperationResult DoErase(OperationResultCallback* cb, ValPtr key) override;
|
||||
void DoExpire(double current_network_time) override;
|
||||
void DoPoll() override;
|
||||
|
||||
OperationResult ParseGetReply(redisReply* reply) const;
|
||||
|
||||
redisAsyncContext* async_ctx = nullptr;
|
||||
|
||||
// When running in sync mode, this is used to keep a queue of replies as
|
||||
// responses come in from the remote calls until we run out of data to
|
||||
// poll.
|
||||
std::deque<redisReply*> reply_queue;
|
||||
|
||||
OpenResultCallback* open_cb;
|
||||
OperationResultCallback* close_cb;
|
||||
std::mutex expire_mutex;
|
||||
|
||||
std::string server_addr;
|
||||
std::string key_prefix;
|
||||
|
||||
std::atomic<bool> connected = false;
|
||||
std::atomic<bool> expire_running = false;
|
||||
std::atomic<int> active_ops = 0;
|
||||
};
|
||||
|
||||
} // namespace zeek::storage::backend::redis
|
42
src/storage/backend/redis/cmake/FindHiredis.cmake
Normal file
42
src/storage/backend/redis/cmake/FindHiredis.cmake
Normal file
|
@ -0,0 +1,42 @@
|
|||
include(FindPackageHandleStandardArgs)
|
||||
|
||||
find_library(
|
||||
HIREDIS_LIBRARY NAMES "libhiredis${CMAKE_SHARED_LIBRARY_SUFFIX}"
|
||||
"libhiredis${CMAKE_STATIC_LIBRARY_SUFFIX}" HINTS ${HIREDIS_ROOT_DIR}/lib)
|
||||
|
||||
find_path(HIREDIS_INCLUDE_DIR NAMES hiredis/hiredis.h HINTS ${HIREDIS_ROOT_DIR}/include)
|
||||
|
||||
find_package_handle_standard_args(Hiredis FOUND_VAR HIREDIS_FOUND REQUIRED_VARS HIREDIS_LIBRARY
|
||||
HIREDIS_INCLUDE_DIR)
|
||||
|
||||
if (HIREDIS_FOUND)
|
||||
|
||||
# The hiredis library must be at least v1.0.0 to have all of the API bits that
|
||||
# we need. We can scrape that out of the header.
|
||||
file(STRINGS "${HIREDIS_INCLUDE_DIR}/hiredis/hiredis.h" HIREDIS_MAJOR_VERSION_H
|
||||
REGEX "^#define HIREDIS_MAJOR [0-9]+$")
|
||||
file(STRINGS "${HIREDIS_INCLUDE_DIR}/hiredis/hiredis.h" HIREDIS_MINOR_VERSION_H
|
||||
REGEX "^#define HIREDIS_MINOR [0-9]+$")
|
||||
file(STRINGS "${HIREDIS_INCLUDE_DIR}/hiredis/hiredis.h" HIREDIS_PATCH_VERSION_H
|
||||
REGEX "^#define HIREDIS_PATCH [0-9]+$")
|
||||
string(REGEX REPLACE "^.*MAJOR ([0-9]+)$" "\\1" HIREDIS_MAJOR_VERSION
|
||||
"${HIREDIS_MAJOR_VERSION_H}")
|
||||
string(REGEX REPLACE "^.*MINOR ([0-9]+)$" "\\1" HIREDIS_MINOR_VERSION
|
||||
"${HIREDIS_MINOR_VERSION_H}")
|
||||
string(REGEX REPLACE "^.*PATCH ([0-9]+)$" "\\1" HIREDIS_PATCH_VERSION
|
||||
"${HIREDIS_PATCH_VERSION_H}")
|
||||
|
||||
set(HIREDIS_VERSION
|
||||
"${HIREDIS_MAJOR_VERSION}.${HIREDIS_MINOR_VERSION}.${HIREDIS_PATCH_VERSION}")
|
||||
|
||||
if (HIREDIS_VERSION VERSION_LESS "1.0.0")
|
||||
message(
|
||||
STATUS "Hiredis library version ${HIREDIS_VERSION} is too old, need v1.0.0 or later.")
|
||||
unset(HIREDIS_FOUND)
|
||||
|
||||
else ()
|
||||
set(HIREDIS_LIBRARIES ${HIREDIS_LIBRARY})
|
||||
set(HIREDIS_INCLUDE_DIRS ${HIREDIS_INCLUDE_DIR})
|
||||
set(HIREDIS_FOUND ${HIREDIS_FOUND})
|
||||
endif ()
|
||||
endif ()
|
3
src/storage/backend/sqlite/CMakeLists.txt
Normal file
3
src/storage/backend/sqlite/CMakeLists.txt
Normal file
|
@ -0,0 +1,3 @@
|
|||
zeek_add_plugin(
|
||||
Zeek Storage_Backend_SQLite
|
||||
SOURCES SQLite.cc Plugin.cc)
|
22
src/storage/backend/sqlite/Plugin.cc
Normal file
22
src/storage/backend/sqlite/Plugin.cc
Normal file
|
@ -0,0 +1,22 @@
|
|||
// See the file "COPYING" in the main distribution directory for copyright.
|
||||
|
||||
#include "zeek/plugin/Plugin.h"
|
||||
|
||||
#include "zeek/storage/Component.h"
|
||||
#include "zeek/storage/backend/sqlite/SQLite.h"
|
||||
|
||||
namespace zeek::storage::backend::sqlite {
|
||||
|
||||
class Plugin : public plugin::Plugin {
|
||||
public:
|
||||
plugin::Configuration Configure() override {
|
||||
AddComponent(new storage::Component("SQLITE", backend::sqlite::SQLite::Instantiate));
|
||||
|
||||
plugin::Configuration config;
|
||||
config.name = "Zeek::Storage_Backend_SQLite";
|
||||
config.description = "SQLite backend for storage framework";
|
||||
return config;
|
||||
}
|
||||
} plugin;
|
||||
|
||||
} // namespace zeek::storage::backend::sqlite
|
296
src/storage/backend/sqlite/SQLite.cc
Normal file
296
src/storage/backend/sqlite/SQLite.cc
Normal file
|
@ -0,0 +1,296 @@
|
|||
// See the file "COPYING" in the main distribution directory for copyright.
|
||||
|
||||
#include "zeek/storage/backend/sqlite/SQLite.h"
|
||||
|
||||
#include "zeek/3rdparty/sqlite3.h"
|
||||
#include "zeek/Func.h"
|
||||
#include "zeek/Val.h"
|
||||
#include "zeek/storage/ReturnCode.h"
|
||||
|
||||
namespace zeek::storage::backend::sqlite {
|
||||
|
||||
storage::BackendPtr SQLite::Instantiate(std::string_view tag) { return make_intrusive<SQLite>(tag); }
|
||||
|
||||
/**
|
||||
* Called by the manager system to open the backend.
|
||||
*/
|
||||
OperationResult SQLite::DoOpen(OpenResultCallback* cb, RecordValPtr options) {
|
||||
if ( sqlite3_threadsafe() == 0 ) {
|
||||
std::string res =
|
||||
"SQLite reports that it is not threadsafe. Zeek needs a threadsafe version of "
|
||||
"SQLite. Aborting";
|
||||
Error(res.c_str());
|
||||
return {ReturnCode::INITIALIZATION_FAILED, res};
|
||||
}
|
||||
|
||||
// Allow connections to same DB to use single data/schema cache. Also
|
||||
// allows simultaneous writes to one file.
|
||||
#ifndef ZEEK_TSAN
|
||||
sqlite3_enable_shared_cache(1);
|
||||
#endif
|
||||
|
||||
RecordValPtr backend_options = options->GetField<RecordVal>("sqlite");
|
||||
StringValPtr path = backend_options->GetField<StringVal>("database_path");
|
||||
full_path = zeek::filesystem::path(path->ToStdString()).string();
|
||||
table_name = backend_options->GetField<StringVal>("table_name")->ToStdString();
|
||||
|
||||
if ( auto open_res =
|
||||
CheckError(sqlite3_open_v2(full_path.c_str(), &db,
|
||||
SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE | SQLITE_OPEN_FULLMUTEX, NULL));
|
||||
open_res.code != ReturnCode::SUCCESS ) {
|
||||
sqlite3_close_v2(db);
|
||||
db = nullptr;
|
||||
return open_res;
|
||||
}
|
||||
|
||||
std::string create = "create table if not exists " + table_name + " (";
|
||||
create.append("key_str text primary key, value_str text not null, expire_time real);");
|
||||
|
||||
char* errorMsg = nullptr;
|
||||
if ( int res = sqlite3_exec(db, create.c_str(), NULL, NULL, &errorMsg); res != SQLITE_OK ) {
|
||||
std::string err = util::fmt("Error executing table creation statement: %s", errorMsg);
|
||||
Error(err.c_str());
|
||||
sqlite3_free(errorMsg);
|
||||
Close(nullptr);
|
||||
return {ReturnCode::INITIALIZATION_FAILED, err};
|
||||
}
|
||||
|
||||
if ( int res = sqlite3_exec(db, "pragma integrity_check", NULL, NULL, &errorMsg); res != SQLITE_OK ) {
|
||||
std::string err = util::fmt("Error executing integrity check: %s", errorMsg);
|
||||
Error(err.c_str());
|
||||
sqlite3_free(errorMsg);
|
||||
Close(nullptr);
|
||||
return {ReturnCode::INITIALIZATION_FAILED, err};
|
||||
}
|
||||
|
||||
auto tuning_params = backend_options->GetField<TableVal>("tuning_params")->ToMap();
|
||||
for ( const auto& [k, v] : tuning_params ) {
|
||||
auto ks = k->AsListVal()->Idx(0)->AsStringVal();
|
||||
auto vs = v->AsStringVal();
|
||||
std::string cmd = util::fmt("pragma %s = %s", ks->ToStdStringView().data(), vs->ToStdStringView().data());
|
||||
|
||||
if ( int res = sqlite3_exec(db, cmd.c_str(), NULL, NULL, &errorMsg); res != SQLITE_OK ) {
|
||||
std::string err = util::fmt("Error executing tuning pragma statement: %s", errorMsg);
|
||||
Error(err.c_str());
|
||||
sqlite3_free(errorMsg);
|
||||
Close(nullptr);
|
||||
return {ReturnCode::INITIALIZATION_FAILED, err};
|
||||
}
|
||||
}
|
||||
|
||||
static std::array<std::string, 5> statements =
|
||||
{util::fmt("insert into %s (key_str, value_str, expire_time) values(?, ?, ?)", table_name.c_str()),
|
||||
util::fmt("insert into %s (key_str, value_str, expire_time) values(?, ?, ?) ON CONFLICT(key_str) "
|
||||
"DO UPDATE SET value_str=?",
|
||||
table_name.c_str()),
|
||||
util::fmt("select value_str from %s where key_str=?", table_name.c_str()),
|
||||
util::fmt("delete from %s where key_str=?", table_name.c_str()),
|
||||
util::fmt("delete from %s where expire_time > 0 and expire_time != 0 and expire_time <= ?",
|
||||
table_name.c_str())};
|
||||
|
||||
std::array<unique_stmt_ptr, 5> stmt_ptrs;
|
||||
int i = 0;
|
||||
for ( const auto& stmt : statements ) {
|
||||
sqlite3_stmt* ps;
|
||||
if ( auto prep_res = CheckError(sqlite3_prepare_v2(db, stmt.c_str(), stmt.size(), &ps, NULL));
|
||||
prep_res.code != ReturnCode::SUCCESS ) {
|
||||
Close(nullptr);
|
||||
return prep_res;
|
||||
}
|
||||
|
||||
stmt_ptrs[i++] = unique_stmt_ptr(ps, [](sqlite3_stmt* stmt) { sqlite3_finalize(stmt); });
|
||||
}
|
||||
|
||||
put_stmt = std::move(stmt_ptrs[0]);
|
||||
put_update_stmt = std::move(stmt_ptrs[1]);
|
||||
get_stmt = std::move(stmt_ptrs[2]);
|
||||
erase_stmt = std::move(stmt_ptrs[3]);
|
||||
expire_stmt = std::move(stmt_ptrs[4]);
|
||||
|
||||
sqlite3_busy_timeout(db, 5000);
|
||||
|
||||
return {ReturnCode::SUCCESS};
|
||||
}
|
||||
|
||||
/**
|
||||
* Finalizes the backend when it's being closed.
|
||||
*/
|
||||
OperationResult SQLite::DoClose(OperationResultCallback* cb) {
|
||||
OperationResult op_res{ReturnCode::SUCCESS};
|
||||
|
||||
if ( db ) {
|
||||
put_stmt.reset();
|
||||
put_update_stmt.reset();
|
||||
get_stmt.reset();
|
||||
erase_stmt.reset();
|
||||
expire_stmt.reset();
|
||||
|
||||
char* errmsg;
|
||||
if ( int res = sqlite3_exec(db, "pragma optimize", NULL, NULL, &errmsg); res != SQLITE_OK ) {
|
||||
op_res = {ReturnCode::DISCONNECTION_FAILED, util::fmt("Sqlite failed to optimize at shutdown: %s", errmsg)};
|
||||
sqlite3_free(&errmsg);
|
||||
// TODO: we're shutting down. does this error matter other than being informational?
|
||||
}
|
||||
|
||||
if ( int res = sqlite3_close_v2(db); res != SQLITE_OK ) {
|
||||
if ( op_res.err_str.empty() )
|
||||
op_res.err_str = "Sqlite could not close connection";
|
||||
}
|
||||
|
||||
db = nullptr;
|
||||
}
|
||||
|
||||
return op_res;
|
||||
}
|
||||
|
||||
/**
|
||||
* The workhorse method for Put(). This must be implemented by plugins.
|
||||
*/
|
||||
OperationResult SQLite::DoPut(OperationResultCallback* cb, ValPtr key, ValPtr value, bool overwrite,
|
||||
double expiration_time) {
|
||||
if ( ! db )
|
||||
return {ReturnCode::NOT_CONNECTED};
|
||||
|
||||
auto json_key = key->ToJSON();
|
||||
auto json_value = value->ToJSON();
|
||||
|
||||
sqlite3_stmt* stmt;
|
||||
if ( ! overwrite )
|
||||
stmt = put_stmt.get();
|
||||
else
|
||||
stmt = put_update_stmt.get();
|
||||
|
||||
auto key_str = json_key->ToStdStringView();
|
||||
if ( auto res = CheckError(sqlite3_bind_text(stmt, 1, key_str.data(), key_str.size(), SQLITE_STATIC));
|
||||
res.code != ReturnCode::SUCCESS ) {
|
||||
sqlite3_reset(stmt);
|
||||
return res;
|
||||
}
|
||||
|
||||
auto value_str = json_value->ToStdStringView();
|
||||
if ( auto res = CheckError(sqlite3_bind_text(stmt, 2, value_str.data(), value_str.size(), SQLITE_STATIC));
|
||||
res.code != ReturnCode::SUCCESS ) {
|
||||
sqlite3_reset(stmt);
|
||||
return res;
|
||||
}
|
||||
|
||||
if ( auto res = CheckError(sqlite3_bind_double(stmt, 3, expiration_time)); res.code != ReturnCode::SUCCESS ) {
|
||||
sqlite3_reset(stmt);
|
||||
return res;
|
||||
}
|
||||
|
||||
if ( overwrite ) {
|
||||
if ( auto res = CheckError(sqlite3_bind_text(stmt, 4, value_str.data(), value_str.size(), SQLITE_STATIC));
|
||||
res.code != ReturnCode::SUCCESS ) {
|
||||
sqlite3_reset(stmt);
|
||||
return res;
|
||||
}
|
||||
}
|
||||
|
||||
return Step(stmt, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* The workhorse method for Get(). This must be implemented for plugins.
|
||||
*/
|
||||
OperationResult SQLite::DoGet(OperationResultCallback* cb, ValPtr key) {
|
||||
if ( ! db )
|
||||
return {ReturnCode::NOT_CONNECTED};
|
||||
|
||||
auto json_key = key->ToJSON();
|
||||
auto stmt = get_stmt.get();
|
||||
|
||||
auto key_str = json_key->ToStdStringView();
|
||||
if ( auto res = CheckError(sqlite3_bind_text(stmt, 1, key_str.data(), key_str.size(), SQLITE_STATIC));
|
||||
res.code != ReturnCode::SUCCESS ) {
|
||||
sqlite3_reset(stmt);
|
||||
return res;
|
||||
}
|
||||
|
||||
return Step(stmt, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* The workhorse method for Erase(). This must be implemented for plugins.
|
||||
*/
|
||||
OperationResult SQLite::DoErase(OperationResultCallback* cb, ValPtr key) {
|
||||
if ( ! db )
|
||||
return {ReturnCode::NOT_CONNECTED};
|
||||
|
||||
auto json_key = key->ToJSON();
|
||||
auto stmt = erase_stmt.get();
|
||||
|
||||
auto key_str = json_key->ToStdStringView();
|
||||
if ( auto res = CheckError(sqlite3_bind_text(stmt, 1, key_str.data(), key_str.size(), SQLITE_STATIC));
|
||||
res.code != ReturnCode::SUCCESS ) {
|
||||
sqlite3_reset(stmt);
|
||||
return res;
|
||||
}
|
||||
|
||||
return Step(stmt, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Removes any entries in the backend that have expired. Can be overridden by
|
||||
* derived classes.
|
||||
*/
|
||||
void SQLite::DoExpire(double current_network_time) {
|
||||
auto stmt = expire_stmt.get();
|
||||
|
||||
if ( auto res = CheckError(sqlite3_bind_double(stmt, 1, current_network_time)); res.code != ReturnCode::SUCCESS ) {
|
||||
sqlite3_reset(stmt);
|
||||
// TODO: do something with the error here?
|
||||
}
|
||||
|
||||
Step(stmt, false);
|
||||
}
|
||||
|
||||
// returns true in case of error
|
||||
OperationResult SQLite::CheckError(int code) {
|
||||
if ( code != SQLITE_OK && code != SQLITE_DONE ) {
|
||||
return {ReturnCode::OPERATION_FAILED, util::fmt("SQLite call failed: %s", sqlite3_errmsg(db)), nullptr};
|
||||
}
|
||||
|
||||
return {ReturnCode::SUCCESS};
|
||||
}
|
||||
|
||||
OperationResult SQLite::Step(sqlite3_stmt* stmt, bool parse_value) {
|
||||
OperationResult ret;
|
||||
|
||||
int step_status = sqlite3_step(stmt);
|
||||
if ( step_status == SQLITE_ROW ) {
|
||||
if ( parse_value ) {
|
||||
// Column 1 is the value
|
||||
const char* text = (const char*)sqlite3_column_text(stmt, 0);
|
||||
auto val = zeek::detail::ValFromJSON(text, val_type, Func::nil);
|
||||
sqlite3_reset(stmt);
|
||||
if ( std::holds_alternative<ValPtr>(val) ) {
|
||||
ValPtr val_v = std::get<ValPtr>(val);
|
||||
ret = {ReturnCode::SUCCESS, "", val_v};
|
||||
}
|
||||
else {
|
||||
ret = {ReturnCode::OPERATION_FAILED, std::get<std::string>(val)};
|
||||
}
|
||||
}
|
||||
else {
|
||||
ret = {ReturnCode::OPERATION_FAILED, "sqlite3_step should not have returned a value"};
|
||||
}
|
||||
}
|
||||
else if ( step_status == SQLITE_DONE ) {
|
||||
if ( parse_value )
|
||||
ret = {ReturnCode::KEY_NOT_FOUND};
|
||||
else
|
||||
ret = {ReturnCode::SUCCESS};
|
||||
}
|
||||
else if ( step_status == SQLITE_BUSY )
|
||||
// TODO: this could retry a number of times instead of just failing
|
||||
ret = {ReturnCode::TIMEOUT};
|
||||
else
|
||||
ret = {ReturnCode::OPERATION_FAILED};
|
||||
|
||||
sqlite3_reset(stmt);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
} // namespace zeek::storage::backend::sqlite
|
62
src/storage/backend/sqlite/SQLite.h
Normal file
62
src/storage/backend/sqlite/SQLite.h
Normal file
|
@ -0,0 +1,62 @@
|
|||
// See the file "COPYING" in the main distribution directory for copyright.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "zeek/storage/Backend.h"
|
||||
|
||||
// Forward declare these to avoid including sqlite3.h here
|
||||
struct sqlite3;
|
||||
struct sqlite3_stmt;
|
||||
|
||||
namespace zeek::storage::backend::sqlite {
|
||||
|
||||
class SQLite : public Backend {
|
||||
public:
|
||||
SQLite(std::string_view tag) : Backend(SupportedModes::SYNC, tag) {}
|
||||
~SQLite() override = default;
|
||||
|
||||
static BackendPtr Instantiate(std::string_view tag);
|
||||
|
||||
/**
|
||||
* Returns whether the backend is opened.
|
||||
*/
|
||||
bool IsOpen() override { return db != nullptr; }
|
||||
|
||||
private:
|
||||
OperationResult DoOpen(OpenResultCallback* cb, RecordValPtr options) override;
|
||||
OperationResult DoClose(OperationResultCallback* cb) override;
|
||||
OperationResult DoPut(OperationResultCallback* cb, ValPtr key, ValPtr value, bool overwrite,
|
||||
double expiration_time) override;
|
||||
OperationResult DoGet(OperationResultCallback* cb, ValPtr key) override;
|
||||
OperationResult DoErase(OperationResultCallback* cb, ValPtr key) override;
|
||||
void DoExpire(double current_network_time) override;
|
||||
|
||||
/**
|
||||
* Checks whether a status code returned by an sqlite call is a success.
|
||||
*
|
||||
* @return A result structure containing a result code and an optional error
|
||||
* string based on the status code.
|
||||
*/
|
||||
OperationResult CheckError(int code);
|
||||
|
||||
/**
|
||||
* Abstracts calls to sqlite3_step to properly create an OperationResult
|
||||
* structure based on the result.
|
||||
*/
|
||||
OperationResult Step(sqlite3_stmt* stmt, bool parse_value = false);
|
||||
|
||||
sqlite3* db = nullptr;
|
||||
|
||||
using stmt_deleter = std::function<void(sqlite3_stmt*)>;
|
||||
using unique_stmt_ptr = std::unique_ptr<sqlite3_stmt, stmt_deleter>;
|
||||
unique_stmt_ptr put_stmt;
|
||||
unique_stmt_ptr put_update_stmt;
|
||||
unique_stmt_ptr get_stmt;
|
||||
unique_stmt_ptr erase_stmt;
|
||||
unique_stmt_ptr expire_stmt;
|
||||
|
||||
std::string full_path;
|
||||
std::string table_name;
|
||||
};
|
||||
|
||||
} // namespace zeek::storage::backend::sqlite
|
190
src/storage/storage-async.bif
Normal file
190
src/storage/storage-async.bif
Normal file
|
@ -0,0 +1,190 @@
|
|||
##! Functions related to asynchronous storage operations.
|
||||
|
||||
%%{
|
||||
#include "zeek/Frame.h"
|
||||
#include "zeek/Trigger.h"
|
||||
#include "zeek/storage/Backend.h"
|
||||
#include "zeek/storage/Manager.h"
|
||||
#include "zeek/storage/ReturnCode.h"
|
||||
|
||||
using namespace zeek;
|
||||
using namespace zeek::storage;
|
||||
|
||||
// Utility method for initializing a trigger from a Frame passed into a BIF. This is
|
||||
// used by the asynchronous methods to make sure the trigger is setup before starting
|
||||
// the operations. It also does some sanity checking to ensure the trigger is valid.
|
||||
|
||||
static zeek::detail::trigger::TriggerPtr init_trigger(zeek::detail::Frame* frame) {
|
||||
auto trigger = frame->GetTrigger();
|
||||
|
||||
if ( ! trigger ) {
|
||||
emit_builtin_error("Asynchronous storage operations must be called via a when-condition");
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
if ( auto timeout = trigger->TimeoutValue(); timeout < 0 ) {
|
||||
emit_builtin_error("Asynchronous storage operations must specify a timeout block");
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
frame->SetDelayed();
|
||||
trigger->Hold();
|
||||
|
||||
return {NewRef{}, trigger};
|
||||
}
|
||||
|
||||
// Utility method to cast the handle val passed into BIF methods into a form that can
|
||||
// be used to start storage operations. The method is also used by the BIFs in sync.bif.
|
||||
static zeek::expected<storage::detail::BackendHandleVal*, OperationResult> cast_handle(Val* handle) {
|
||||
auto b = static_cast<storage::detail::BackendHandleVal*>(handle);
|
||||
|
||||
if ( ! b )
|
||||
return zeek::unexpected<OperationResult>(
|
||||
OperationResult{ReturnCode::OPERATION_FAILED, "Invalid storage handlle"});
|
||||
else if ( ! b->backend->IsOpen() )
|
||||
return zeek::unexpected<OperationResult>(OperationResult{ReturnCode::NOT_CONNECTED, "Backend is closed"});
|
||||
|
||||
return b;
|
||||
}
|
||||
|
||||
static void handle_async_result(const IntrusivePtr<Backend>& backend, ResultCallback* cb,
|
||||
const OperationResult& op_result) {
|
||||
if ( op_result.code != ReturnCode::IN_PROGRESS || ! backend->SupportsAsync() ) {
|
||||
// We need to complete the callback early if:
|
||||
// 1. The operation didn't start up successfully. For async operations, this means
|
||||
// it didn't report back IN_PROGRESS.
|
||||
// 2. The backend doesn't support async. This means we already blocked in order
|
||||
// to get here already.
|
||||
cb->Complete(op_result);
|
||||
delete cb;
|
||||
}
|
||||
else if ( run_state::reading_traces ) {
|
||||
// If the backend is truly async and we're reading traces, we need to fake being
|
||||
// in sync mode because otherwise time doesn't move forward correctly.
|
||||
backend->Poll();
|
||||
}
|
||||
}
|
||||
|
||||
%%}
|
||||
|
||||
module Storage::Async;
|
||||
|
||||
function Storage::Async::__open_backend%(btype: Storage::Backend, options: any, key_type: any, val_type: any%): Storage::OperationResult
|
||||
%{
|
||||
auto trigger = init_trigger(frame);
|
||||
if ( ! trigger )
|
||||
return nullptr;
|
||||
|
||||
auto btype_val = IntrusivePtr<EnumVal>{NewRef{}, btype->AsEnumVal()};
|
||||
Tag tag{btype_val};
|
||||
|
||||
auto b = storage_mgr->Instantiate(tag);
|
||||
|
||||
if ( ! b.has_value() ) {
|
||||
trigger->Cache(
|
||||
frame->GetTriggerAssoc(),
|
||||
new StringVal(util::fmt("Failed to instantiate backend: %s", b.error().c_str())));
|
||||
trigger->Release();
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
auto bh = make_intrusive<storage::detail::BackendHandleVal>(b.value());
|
||||
|
||||
auto cb = new OpenResultCallback(trigger, frame->GetTriggerAssoc(), bh);
|
||||
auto kt = key_type->AsTypeVal()->GetType()->AsTypeType()->GetType();
|
||||
auto vt = val_type->AsTypeVal()->GetType()->AsTypeType()->GetType();
|
||||
auto options_val = IntrusivePtr<RecordVal>{NewRef{}, options->AsRecordVal()};
|
||||
auto op_result = storage_mgr->OpenBackend(b.value(), cb, options_val, kt, vt);
|
||||
|
||||
handle_async_result(b.value(), cb, op_result);
|
||||
|
||||
return nullptr;
|
||||
%}
|
||||
|
||||
function Storage::Async::__close_backend%(backend: opaque of Storage::BackendHandle%) : Storage::OperationResult
|
||||
%{
|
||||
auto trigger = init_trigger(frame);
|
||||
if ( ! trigger )
|
||||
return nullptr;
|
||||
|
||||
auto cb = new OperationResultCallback(trigger, frame->GetTriggerAssoc());
|
||||
auto b = cast_handle(backend);
|
||||
if ( ! b ) {
|
||||
cb->Complete(b.error());
|
||||
delete cb;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
auto op_result = storage_mgr->CloseBackend((*b)->backend, cb);
|
||||
handle_async_result((*b)->backend, cb, op_result);
|
||||
|
||||
return nullptr;
|
||||
%}
|
||||
|
||||
function Storage::Async::__put%(backend: opaque of Storage::BackendHandle, key: any, value: any,
|
||||
overwrite: bool, expire_time: interval%): Storage::OperationResult
|
||||
%{
|
||||
auto trigger = init_trigger(frame);
|
||||
if ( ! trigger )
|
||||
return nullptr;
|
||||
|
||||
auto cb = new OperationResultCallback(trigger, frame->GetTriggerAssoc());
|
||||
auto b = cast_handle(backend);
|
||||
if ( ! b ) {
|
||||
cb->Complete(b.error());
|
||||
delete cb;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
if ( expire_time > 0.0 )
|
||||
expire_time += run_state::network_time;
|
||||
|
||||
auto key_v = IntrusivePtr<Val>{NewRef{}, key};
|
||||
auto val_v = IntrusivePtr<Val>{NewRef{}, value};
|
||||
auto op_result = (*b)->backend->Put(cb, key_v, val_v, overwrite, expire_time);
|
||||
handle_async_result((*b)->backend, cb, op_result);
|
||||
|
||||
return nullptr;
|
||||
%}
|
||||
|
||||
function Storage::Async::__get%(backend: opaque of Storage::BackendHandle, key: any%): Storage::OperationResult
|
||||
%{
|
||||
auto trigger = init_trigger(frame);
|
||||
if ( ! trigger )
|
||||
return nullptr;
|
||||
|
||||
auto cb = new OperationResultCallback(trigger, frame->GetTriggerAssoc());
|
||||
auto b = cast_handle(backend);
|
||||
if ( ! b ) {
|
||||
cb->Complete(b.error());
|
||||
delete cb;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
auto key_v = IntrusivePtr<Val>{NewRef{}, key};
|
||||
auto op_result = (*b)->backend->Get(cb, key_v);
|
||||
handle_async_result((*b)->backend, cb, op_result);
|
||||
|
||||
return nullptr;
|
||||
%}
|
||||
|
||||
function Storage::Async::__erase%(backend: opaque of Storage::BackendHandle, key: any%): Storage::OperationResult
|
||||
%{
|
||||
auto trigger = init_trigger(frame);
|
||||
if ( ! trigger )
|
||||
return nullptr;
|
||||
|
||||
auto cb = new OperationResultCallback(trigger, frame->GetTriggerAssoc());
|
||||
auto b = cast_handle(backend);
|
||||
if ( ! b ) {
|
||||
cb->Complete(b.error());
|
||||
delete cb;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
auto key_v = IntrusivePtr<Val>{NewRef{}, key};
|
||||
auto op_result = (*b)->backend->Erase(cb, key_v);
|
||||
handle_async_result((*b)->backend, cb, op_result);
|
||||
|
||||
return nullptr;
|
||||
%}
|
31
src/storage/storage-events.bif
Normal file
31
src/storage/storage-events.bif
Normal file
|
@ -0,0 +1,31 @@
|
|||
##! Events related to storage operations.
|
||||
|
||||
module Storage;
|
||||
|
||||
## Generated automatically when a new backend connection is opened successfully.
|
||||
##
|
||||
## tag: A string describing the backend that enqueued this event. This is typically
|
||||
## generated by the ``Tag()`` method in the backend plugin.
|
||||
##
|
||||
## options: A copy of the configuration options passed to
|
||||
## :zeek:see:`Storage::Async::open_backend` or
|
||||
## :zeek:see:`Storage::Sync::open_backend` when the backend was initially opened.
|
||||
##
|
||||
## .. zeek:see:: Storage::backend_lost
|
||||
event Storage::backend_opened%(tag: string, options: any%);
|
||||
|
||||
## May be generated when a backend connection is lost, both normally and
|
||||
## unexpectedly. This event depends on the backends implementing handling for
|
||||
## it, and is not generated automatically by the storage framework.
|
||||
##
|
||||
## tag: A string describing the backend that enqueued this event. This is typically
|
||||
## generated by the ``Tag()`` method in the backend plugin.
|
||||
##
|
||||
## options: A copy of the configuration options passed to
|
||||
## :zeek:see:`Storage::Async::open_backend` or
|
||||
## :zeek:see:`Storage::Sync::open_backend` when the backend was initially opened.
|
||||
##
|
||||
## reason: A string describing why the connection was lost.
|
||||
##
|
||||
## .. zeek:see:: Storage::backend_opened
|
||||
event Storage::backend_lost%(tag: string, options: any, reason: string%);
|
165
src/storage/storage-sync.bif
Normal file
165
src/storage/storage-sync.bif
Normal file
|
@ -0,0 +1,165 @@
|
|||
##! Functions related to synchronous storage operations.
|
||||
|
||||
%%{
|
||||
#include "zeek/storage/Backend.h"
|
||||
#include "zeek/storage/Manager.h"
|
||||
#include "zeek/storage/ReturnCode.h"
|
||||
|
||||
using namespace zeek;
|
||||
using namespace zeek::storage;
|
||||
|
||||
// Utility method to cast the handle val passed into BIF methods into a form that can
|
||||
// be used to start storage operations. This is a duplicate of the method in sync.bif
|
||||
// due to how utility methods are built by bifcl.
|
||||
/*
|
||||
static zeek::expected<storage::detail::BackendHandleVal*, OperationResult> cast_handle(Val* handle) {
|
||||
auto b = static_cast<storage::detail::BackendHandleVal*>(handle);
|
||||
|
||||
if ( ! b )
|
||||
return zeek::unexpected<OperationResult>(
|
||||
OperationResult{ReturnCode::OPERATION_FAILED, "Invalid storage handlle"});
|
||||
else if ( ! b->backend->IsOpen() )
|
||||
return zeek::unexpected<OperationResult>(OperationResult{ReturnCode::NOT_CONNECTED, "Backend is closed"});
|
||||
|
||||
return b;
|
||||
}
|
||||
*/
|
||||
%%}
|
||||
|
||||
module Storage::Sync;
|
||||
|
||||
function Storage::Sync::__open_backend%(btype: Storage::Backend, options: any, key_type: any, val_type: any%): Storage::OperationResult
|
||||
%{
|
||||
auto btype_val = IntrusivePtr<EnumVal>{NewRef{}, btype->AsEnumVal()};
|
||||
Tag tag{btype_val};
|
||||
|
||||
auto b = storage_mgr->Instantiate(tag);
|
||||
|
||||
if ( ! b.has_value() ) {
|
||||
emit_builtin_error(b.error().c_str());
|
||||
return val_mgr->Bool(false);
|
||||
}
|
||||
|
||||
auto bh = make_intrusive<storage::detail::BackendHandleVal>(b.value());
|
||||
|
||||
auto cb = new OpenResultCallback(bh);
|
||||
auto kt = key_type->AsTypeVal()->GetType()->AsTypeType()->GetType();
|
||||
auto vt = val_type->AsTypeVal()->GetType()->AsTypeType()->GetType();
|
||||
auto options_val = IntrusivePtr<RecordVal>{NewRef{}, options->AsRecordVal()};
|
||||
auto op_result = storage_mgr->OpenBackend(b.value(), cb, options_val, kt, vt);
|
||||
|
||||
// If the backend only supports async, block until it's ready and then pull the result out of
|
||||
// the callback.
|
||||
if ( ! b.value()->SupportsSync() ) {
|
||||
b.value()->Poll();
|
||||
op_result = cb->Result();
|
||||
}
|
||||
|
||||
delete cb;
|
||||
|
||||
return op_result.BuildVal();
|
||||
%}
|
||||
|
||||
function Storage::Sync::__close_backend%(backend: opaque of Storage::BackendHandle%) : Storage::OperationResult
|
||||
%{
|
||||
OperationResult op_result;
|
||||
|
||||
auto b = cast_handle(backend);
|
||||
if ( ! b )
|
||||
op_result = b.error();
|
||||
else {
|
||||
auto cb = new OperationResultCallback();
|
||||
op_result = storage_mgr->CloseBackend((*b)->backend, cb);
|
||||
|
||||
// If the backend only supports async, block until it's ready and then pull the result out of
|
||||
// the callback.
|
||||
if ( ! (*b)->backend->SupportsSync() ) {
|
||||
(*b)->backend->Poll();
|
||||
op_result = cb->Result();
|
||||
}
|
||||
|
||||
delete cb;
|
||||
}
|
||||
|
||||
return op_result.BuildVal();
|
||||
%}
|
||||
|
||||
function Storage::Sync::__put%(backend: opaque of Storage::BackendHandle, key: any, value: any,
|
||||
overwrite: bool, expire_time: interval%): Storage::OperationResult
|
||||
%{
|
||||
OperationResult op_result;
|
||||
|
||||
auto b = cast_handle(backend);
|
||||
if ( ! b )
|
||||
op_result = b.error();
|
||||
else {
|
||||
if ( expire_time > 0.0 )
|
||||
expire_time += run_state::network_time;
|
||||
|
||||
auto cb = new OperationResultCallback();
|
||||
auto key_v = IntrusivePtr<Val>{NewRef{}, key};
|
||||
auto val_v = IntrusivePtr<Val>{NewRef{}, value};
|
||||
op_result = (*b)->backend->Put(cb, key_v, val_v, overwrite, expire_time);
|
||||
|
||||
// If the backend only supports async, block until it's ready and then pull the result out of
|
||||
// the callback.
|
||||
if ( ! (*b)->backend->SupportsSync() ) {
|
||||
(*b)->backend->Poll();
|
||||
op_result = cb->Result();
|
||||
}
|
||||
|
||||
delete cb;
|
||||
}
|
||||
|
||||
return op_result.BuildVal();
|
||||
%}
|
||||
|
||||
function Storage::Sync::__get%(backend: opaque of Storage::BackendHandle, key: any%): Storage::OperationResult
|
||||
%{
|
||||
OperationResult op_result;
|
||||
|
||||
auto b = cast_handle(backend);
|
||||
if ( ! b )
|
||||
op_result = b.error();
|
||||
else {
|
||||
auto cb = new OperationResultCallback();
|
||||
auto key_v = IntrusivePtr<Val>{NewRef{}, key};
|
||||
op_result = (*b)->backend->Get(cb, key_v);
|
||||
|
||||
// If the backend only supports async, block until it's ready and then pull the result out of
|
||||
// the callback.
|
||||
if ( ! (*b)->backend->SupportsSync() ) {
|
||||
(*b)->backend->Poll();
|
||||
op_result = cb->Result();
|
||||
}
|
||||
|
||||
delete cb;
|
||||
}
|
||||
|
||||
return op_result.BuildVal();
|
||||
%}
|
||||
|
||||
function Storage::Sync::__erase%(backend: opaque of Storage::BackendHandle, key: any%): Storage::OperationResult
|
||||
%{
|
||||
OperationResult op_result;
|
||||
|
||||
auto b = cast_handle(backend);
|
||||
if ( ! b )
|
||||
op_result = b.error();
|
||||
else {
|
||||
auto cb = new OperationResultCallback();
|
||||
auto key_v = IntrusivePtr<Val>{NewRef{}, key};
|
||||
op_result = (*b)->backend->Erase(cb, key_v);
|
||||
|
||||
// If the backend only supports async, block until it's ready and then pull the result out of
|
||||
// the callback.
|
||||
if ( ! (*b)->backend->SupportsSync() ) {
|
||||
(*b)->backend->Poll();
|
||||
op_result = cb->Result();
|
||||
}
|
||||
|
||||
delete cb;
|
||||
}
|
||||
|
||||
return op_result.BuildVal();
|
||||
%}
|
|
@ -97,6 +97,15 @@ namespace filesystem = ghc::filesystem;
|
|||
inline constexpr std::string_view path_list_separator = ":";
|
||||
#endif
|
||||
|
||||
#include "zeek/3rdparty/nonstd/expected.hpp"
|
||||
namespace zeek {
|
||||
template<typename T, typename E>
|
||||
using expected = nonstd::expected<T, E>;
|
||||
|
||||
template<typename E>
|
||||
using unexpected = nonstd::unexpected<E>;
|
||||
} // namespace zeek
|
||||
|
||||
using zeek_int_t = int64_t;
|
||||
using zeek_uint_t = uint64_t;
|
||||
|
||||
|
|
|
@ -65,6 +65,7 @@
|
|||
#ifdef HAVE_SPICY
|
||||
#include "zeek/spicy/manager.h"
|
||||
#endif
|
||||
#include "zeek/storage/Manager.h"
|
||||
#include "zeek/supervisor/Supervisor.h"
|
||||
#include "zeek/telemetry/Manager.h"
|
||||
#include "zeek/threading/Manager.h"
|
||||
|
@ -178,6 +179,7 @@ zeek::detail::trigger::Manager* zeek::detail::trigger_mgr = nullptr;
|
|||
#ifdef HAVE_SPICY
|
||||
zeek::spicy::Manager* zeek::spicy_mgr = nullptr;
|
||||
#endif
|
||||
zeek::storage::Manager* zeek::storage_mgr = nullptr;
|
||||
|
||||
zeek::cluster::Manager* zeek::cluster::manager = nullptr;
|
||||
zeek::cluster::Backend* zeek::cluster::backend = nullptr;
|
||||
|
@ -414,6 +416,7 @@ static void terminate_zeek() {
|
|||
#ifdef HAVE_SPICY
|
||||
delete spicy_mgr;
|
||||
#endif
|
||||
delete storage_mgr;
|
||||
|
||||
// free the global scope
|
||||
pop_scope();
|
||||
|
@ -686,6 +689,7 @@ SetupResult setup(int argc, char** argv, Options* zopts) {
|
|||
#ifdef HAVE_SPICY
|
||||
spicy_mgr = new spicy::Manager(); // registers as plugin with the plugin manager
|
||||
#endif
|
||||
storage_mgr = new storage::Manager();
|
||||
|
||||
plugin_mgr->InitPreScript();
|
||||
file_mgr->InitPreScript();
|
||||
|
@ -873,6 +877,7 @@ SetupResult setup(int argc, char** argv, Options* zopts) {
|
|||
|
||||
timer_mgr->InitPostScript();
|
||||
event_mgr.InitPostScript();
|
||||
storage_mgr->InitPostScript();
|
||||
|
||||
if ( supervisor_mgr )
|
||||
supervisor_mgr->InitPostScript();
|
||||
|
|
|
@ -321,6 +321,10 @@ void ScriptInfo::DoInitPostScript() {
|
|||
const auto& log_serializer_id = zeek::detail::global_scope()->Find("Cluster::LogSerializerTag");
|
||||
types.push_back(new IdentifierInfo(log_serializer_id, this));
|
||||
}
|
||||
else if ( name == "base/frameworks/storage/main.zeek" ) {
|
||||
const auto& backend_id = zeek::detail::global_scope()->Find("Storage::Backend");
|
||||
types.push_back(new IdentifierInfo(backend_id, this));
|
||||
}
|
||||
}
|
||||
|
||||
vector<string> ScriptInfo::GetComments() const { return comments; }
|
||||
|
|
|
@ -8,6 +8,6 @@
|
|||
#fields ts host port_num port_proto service
|
||||
#types time addr port enum set[string]
|
||||
XXXXXXXXXX.XXXXXX 192.168.2.1 53 udp DNS
|
||||
XXXXXXXXXX.XXXXXX 192.168.2.16 1577 tcp (empty)
|
||||
XXXXXXXXXX.XXXXXX 192.168.2.16 1576 tcp (empty)
|
||||
XXXXXXXXXX.XXXXXX 192.168.2.16 1577 tcp (empty)
|
||||
#close XXXX-XX-XX-XX-XX-XX
|
||||
|
|
|
@ -160,6 +160,9 @@ scripts/base/init-frameworks-and-bifs.zeek
|
|||
build/scripts/base/bif/bloom-filter.bif.zeek
|
||||
build/scripts/base/bif/cardinality-counter.bif.zeek
|
||||
build/scripts/base/bif/top-k.bif.zeek
|
||||
build/scripts/base/bif/storage-async.bif.zeek
|
||||
build/scripts/base/bif/storage-events.bif.zeek
|
||||
build/scripts/base/bif/storage-sync.bif.zeek
|
||||
build/scripts/base/bif/spicy.bif.zeek
|
||||
build/scripts/base/bif/plugins/__load__.zeek
|
||||
build/scripts/base/bif/plugins/Zeek_BitTorrent.events.bif.zeek
|
||||
|
|
|
@ -160,6 +160,9 @@ scripts/base/init-frameworks-and-bifs.zeek
|
|||
build/scripts/base/bif/bloom-filter.bif.zeek
|
||||
build/scripts/base/bif/cardinality-counter.bif.zeek
|
||||
build/scripts/base/bif/top-k.bif.zeek
|
||||
build/scripts/base/bif/storage-async.bif.zeek
|
||||
build/scripts/base/bif/storage-events.bif.zeek
|
||||
build/scripts/base/bif/storage-sync.bif.zeek
|
||||
build/scripts/base/bif/spicy.bif.zeek
|
||||
build/scripts/base/bif/plugins/__load__.zeek
|
||||
build/scripts/base/bif/plugins/Zeek_BitTorrent.events.bif.zeek
|
||||
|
@ -367,6 +370,10 @@ scripts/base/init-default.zeek
|
|||
scripts/base/frameworks/telemetry/__load__.zeek
|
||||
scripts/base/frameworks/telemetry/main.zeek
|
||||
scripts/base/misc/version.zeek
|
||||
scripts/base/frameworks/storage/__load__.zeek
|
||||
scripts/base/frameworks/storage/async.zeek
|
||||
scripts/base/frameworks/storage/main.zeek
|
||||
scripts/base/frameworks/storage/sync.zeek
|
||||
scripts/base/frameworks/spicy/__load__.zeek
|
||||
scripts/base/frameworks/spicy/main.zeek
|
||||
scripts/base/protocols/conn/__load__.zeek
|
||||
|
|
|
@ -1,5 +1,14 @@
|
|||
### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63.
|
||||
@XXXXXXXXXX.XXXXXX expired a
|
||||
@XXXXXXXXXX.XXXXXX expired [orig_h=172.16.238.1, orig_p=49656/tcp, resp_h=172.16.238.131, resp_p=22/tcp, proto=6]
|
||||
@XXXXXXXXXX.XXXXXX expired [orig_h=172.16.238.1, orig_p=5353/udp, resp_h=224.0.0.251, resp_p=5353/udp, proto=17]
|
||||
@XXXXXXXXXX.XXXXXX expired [orig_h=172.16.238.1, orig_p=49658/tcp, resp_h=172.16.238.131, resp_p=80/tcp, proto=6]
|
||||
@XXXXXXXXXX.XXXXXX expired [orig_h=172.16.238.1, orig_p=17500/udp, resp_h=172.16.238.255, resp_p=17500/udp, proto=17]
|
||||
@XXXXXXXXXX.XXXXXX expired [orig_h=172.16.238.1, orig_p=49657/tcp, resp_h=172.16.238.131, resp_p=80/tcp, proto=6]
|
||||
@XXXXXXXXXX.XXXXXX expired [orig_h=172.16.238.131, orig_p=37975/udp, resp_h=172.16.238.2, resp_p=53/udp, proto=17]
|
||||
@XXXXXXXXXX.XXXXXX expired [orig_h=172.16.238.131, orig_p=5353/udp, resp_h=224.0.0.251, resp_p=5353/udp, proto=17]
|
||||
@XXXXXXXXXX.XXXXXX expired [orig_h=fe80::20c:29ff:febd:6f01, orig_p=5353/udp, resp_h=ff02::fb, resp_p=5353/udp, proto=17]
|
||||
@XXXXXXXXXX.XXXXXX expired a
|
||||
@XXXXXXXXXX.XXXXXX expired copy [orig_h=172.16.238.1, orig_p=5353/udp, resp_h=224.0.0.251, resp_p=5353/udp, proto=17]
|
||||
@XXXXXXXXXX.XXXXXX expired copy [orig_h=fe80::20c:29ff:febd:6f01, orig_p=5353/udp, resp_h=ff02::fb, resp_p=5353/udp, proto=17]
|
||||
@XXXXXXXXXX.XXXXXX expired copy [orig_h=172.16.238.1, orig_p=49657/tcp, resp_h=172.16.238.131, resp_p=80/tcp, proto=6]
|
||||
|
@ -9,19 +18,10 @@
|
|||
@XXXXXXXXXX.XXXXXX expired copy [orig_h=172.16.238.131, orig_p=5353/udp, resp_h=224.0.0.251, resp_p=5353/udp, proto=17]
|
||||
@XXXXXXXXXX.XXXXXX expired copy [orig_h=172.16.238.1, orig_p=49658/tcp, resp_h=172.16.238.131, resp_p=80/tcp, proto=6]
|
||||
@XXXXXXXXXX.XXXXXX expired copy [orig_h=172.16.238.131, orig_p=37975/udp, resp_h=172.16.238.2, resp_p=53/udp, proto=17]
|
||||
@XXXXXXXXXX.XXXXXX expired a
|
||||
@XXXXXXXXXX.XXXXXX expired [orig_h=172.16.238.1, orig_p=49656/tcp, resp_h=172.16.238.131, resp_p=22/tcp, proto=6]
|
||||
@XXXXXXXXXX.XXXXXX expired [orig_h=172.16.238.1, orig_p=5353/udp, resp_h=224.0.0.251, resp_p=5353/udp, proto=17]
|
||||
@XXXXXXXXXX.XXXXXX expired [orig_h=172.16.238.1, orig_p=49658/tcp, resp_h=172.16.238.131, resp_p=80/tcp, proto=6]
|
||||
@XXXXXXXXXX.XXXXXX expired [orig_h=172.16.238.1, orig_p=17500/udp, resp_h=172.16.238.255, resp_p=17500/udp, proto=17]
|
||||
@XXXXXXXXXX.XXXXXX expired [orig_h=172.16.238.1, orig_p=49657/tcp, resp_h=172.16.238.131, resp_p=80/tcp, proto=6]
|
||||
@XXXXXXXXXX.XXXXXX expired [orig_h=172.16.238.131, orig_p=37975/udp, resp_h=172.16.238.2, resp_p=53/udp, proto=17]
|
||||
@XXXXXXXXXX.XXXXXX expired [orig_h=172.16.238.131, orig_p=5353/udp, resp_h=224.0.0.251, resp_p=5353/udp, proto=17]
|
||||
@XXXXXXXXXX.XXXXXX expired [orig_h=fe80::20c:29ff:febd:6f01, orig_p=5353/udp, resp_h=ff02::fb, resp_p=5353/udp, proto=17]
|
||||
@XXXXXXXXXX.XXXXXX expired copy [orig_h=172.16.238.1, orig_p=49659/tcp, resp_h=172.16.238.131, resp_p=21/tcp, proto=6]
|
||||
@XXXXXXXXXX.XXXXXX expired copy [orig_h=172.16.238.131, orig_p=45126/udp, resp_h=172.16.238.2, resp_p=53/udp, proto=17]
|
||||
@XXXXXXXXXX.XXXXXX expired [orig_h=172.16.238.131, orig_p=45126/udp, resp_h=172.16.238.2, resp_p=53/udp, proto=17]
|
||||
@XXXXXXXXXX.XXXXXX expired [orig_h=172.16.238.1, orig_p=49659/tcp, resp_h=172.16.238.131, resp_p=21/tcp, proto=6]
|
||||
@XXXXXXXXXX.XXXXXX expired copy [orig_h=172.16.238.1, orig_p=49659/tcp, resp_h=172.16.238.131, resp_p=21/tcp, proto=6]
|
||||
@XXXXXXXXXX.XXXXXX expired copy [orig_h=172.16.238.131, orig_p=45126/udp, resp_h=172.16.238.2, resp_p=53/udp, proto=17]
|
||||
@XXXXXXXXXX.XXXXXX expired [orig_h=172.16.238.131, orig_p=55368/udp, resp_h=172.16.238.2, resp_p=53/udp, proto=17]
|
||||
@XXXXXXXXXX.XXXXXX expired [orig_h=172.16.238.131, orig_p=33818/udp, resp_h=172.16.238.2, resp_p=53/udp, proto=17]
|
||||
@XXXXXXXXXX.XXXXXX expired [orig_h=172.16.238.131, orig_p=50205/udp, resp_h=172.16.238.2, resp_p=53/udp, proto=17]
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63.
|
||||
546 seen BiFs, 0 unseen BiFs (), 0 new BiFs ()
|
||||
556 seen BiFs, 0 unseen BiFs (), 0 new BiFs ()
|
||||
|
|
|
@ -505,6 +505,9 @@
|
|||
0.000000 MetaHookPost LoadFile(0, ./sftp, <...>/sftp.zeek) -> -1
|
||||
0.000000 MetaHookPost LoadFile(0, ./spicy.bif.zeek, <...>/spicy.bif.zeek) -> -1
|
||||
0.000000 MetaHookPost LoadFile(0, ./stats.bif.zeek, <...>/stats.bif.zeek) -> -1
|
||||
0.000000 MetaHookPost LoadFile(0, ./storage-async.bif.zeek, <...>/storage-async.bif.zeek) -> -1
|
||||
0.000000 MetaHookPost LoadFile(0, ./storage-events.bif.zeek, <...>/storage-events.bif.zeek) -> -1
|
||||
0.000000 MetaHookPost LoadFile(0, ./storage-sync.bif.zeek, <...>/storage-sync.bif.zeek) -> -1
|
||||
0.000000 MetaHookPost LoadFile(0, ./store, <...>/store.zeek) -> -1
|
||||
0.000000 MetaHookPost LoadFile(0, ./store.bif.zeek, <...>/store.bif.zeek) -> -1
|
||||
0.000000 MetaHookPost LoadFile(0, ./strings.bif.zeek, <...>/strings.bif.zeek) -> -1
|
||||
|
@ -815,6 +818,9 @@
|
|||
0.000000 MetaHookPost LoadFileExtended(0, ./sftp, <...>/sftp.zeek) -> (-1, <no content>)
|
||||
0.000000 MetaHookPost LoadFileExtended(0, ./spicy.bif.zeek, <...>/spicy.bif.zeek) -> (-1, <no content>)
|
||||
0.000000 MetaHookPost LoadFileExtended(0, ./stats.bif.zeek, <...>/stats.bif.zeek) -> (-1, <no content>)
|
||||
0.000000 MetaHookPost LoadFileExtended(0, ./storage-async.bif.zeek, <...>/storage-async.bif.zeek) -> (-1, <no content>)
|
||||
0.000000 MetaHookPost LoadFileExtended(0, ./storage-events.bif.zeek, <...>/storage-events.bif.zeek) -> (-1, <no content>)
|
||||
0.000000 MetaHookPost LoadFileExtended(0, ./storage-sync.bif.zeek, <...>/storage-sync.bif.zeek) -> (-1, <no content>)
|
||||
0.000000 MetaHookPost LoadFileExtended(0, ./store, <...>/store.zeek) -> (-1, <no content>)
|
||||
0.000000 MetaHookPost LoadFileExtended(0, ./store.bif.zeek, <...>/store.bif.zeek) -> (-1, <no content>)
|
||||
0.000000 MetaHookPost LoadFileExtended(0, ./strings.bif.zeek, <...>/strings.bif.zeek) -> (-1, <no content>)
|
||||
|
@ -1458,6 +1464,9 @@
|
|||
0.000000 MetaHookPre LoadFile(0, ./sftp, <...>/sftp.zeek)
|
||||
0.000000 MetaHookPre LoadFile(0, ./spicy.bif.zeek, <...>/spicy.bif.zeek)
|
||||
0.000000 MetaHookPre LoadFile(0, ./stats.bif.zeek, <...>/stats.bif.zeek)
|
||||
0.000000 MetaHookPre LoadFile(0, ./storage-async.bif.zeek, <...>/storage-async.bif.zeek)
|
||||
0.000000 MetaHookPre LoadFile(0, ./storage-events.bif.zeek, <...>/storage-events.bif.zeek)
|
||||
0.000000 MetaHookPre LoadFile(0, ./storage-sync.bif.zeek, <...>/storage-sync.bif.zeek)
|
||||
0.000000 MetaHookPre LoadFile(0, ./store, <...>/store.zeek)
|
||||
0.000000 MetaHookPre LoadFile(0, ./store.bif.zeek, <...>/store.bif.zeek)
|
||||
0.000000 MetaHookPre LoadFile(0, ./strings.bif.zeek, <...>/strings.bif.zeek)
|
||||
|
@ -1768,6 +1777,9 @@
|
|||
0.000000 MetaHookPre LoadFileExtended(0, ./sftp, <...>/sftp.zeek)
|
||||
0.000000 MetaHookPre LoadFileExtended(0, ./spicy.bif.zeek, <...>/spicy.bif.zeek)
|
||||
0.000000 MetaHookPre LoadFileExtended(0, ./stats.bif.zeek, <...>/stats.bif.zeek)
|
||||
0.000000 MetaHookPre LoadFileExtended(0, ./storage-async.bif.zeek, <...>/storage-async.bif.zeek)
|
||||
0.000000 MetaHookPre LoadFileExtended(0, ./storage-events.bif.zeek, <...>/storage-events.bif.zeek)
|
||||
0.000000 MetaHookPre LoadFileExtended(0, ./storage-sync.bif.zeek, <...>/storage-sync.bif.zeek)
|
||||
0.000000 MetaHookPre LoadFileExtended(0, ./store, <...>/store.zeek)
|
||||
0.000000 MetaHookPre LoadFileExtended(0, ./store.bif.zeek, <...>/store.bif.zeek)
|
||||
0.000000 MetaHookPre LoadFileExtended(0, ./strings.bif.zeek, <...>/strings.bif.zeek)
|
||||
|
@ -2422,6 +2434,9 @@
|
|||
0.000000 | HookLoadFile ./sftp <...>/sftp.zeek
|
||||
0.000000 | HookLoadFile ./spicy.bif.zeek <...>/spicy.bif.zeek
|
||||
0.000000 | HookLoadFile ./stats.bif.zeek <...>/stats.bif.zeek
|
||||
0.000000 | HookLoadFile ./storage-async.bif.zeek <...>/storage-async.bif.zeek
|
||||
0.000000 | HookLoadFile ./storage-events.bif.zeek <...>/storage-events.bif.zeek
|
||||
0.000000 | HookLoadFile ./storage-sync.bif.zeek <...>/storage-sync.bif.zeek
|
||||
0.000000 | HookLoadFile ./store <...>/store.zeek
|
||||
0.000000 | HookLoadFile ./store.bif.zeek <...>/store.bif.zeek
|
||||
0.000000 | HookLoadFile ./strings.bif.zeek <...>/strings.bif.zeek
|
||||
|
@ -2732,6 +2747,9 @@
|
|||
0.000000 | HookLoadFileExtended ./sftp <...>/sftp.zeek
|
||||
0.000000 | HookLoadFileExtended ./spicy.bif.zeek <...>/spicy.bif.zeek
|
||||
0.000000 | HookLoadFileExtended ./stats.bif.zeek <...>/stats.bif.zeek
|
||||
0.000000 | HookLoadFileExtended ./storage-async.bif.zeek <...>/storage-async.bif.zeek
|
||||
0.000000 | HookLoadFileExtended ./storage-events.bif.zeek <...>/storage-events.bif.zeek
|
||||
0.000000 | HookLoadFileExtended ./storage-sync.bif.zeek <...>/storage-sync.bif.zeek
|
||||
0.000000 | HookLoadFileExtended ./store <...>/store.zeek
|
||||
0.000000 | HookLoadFileExtended ./store.bif.zeek <...>/store.bif.zeek
|
||||
0.000000 | HookLoadFileExtended ./strings.bif.zeek <...>/strings.bif.zeek
|
||||
|
|
5
testing/btest/Baseline/plugins.storage/output
Normal file
5
testing/btest/Baseline/plugins.storage/output
Normal file
|
@ -0,0 +1,5 @@
|
|||
### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63.
|
||||
open result, [code=Storage::SUCCESS, error_str=<uninitialized>, value=<opaque of BackendHandleVal>]
|
||||
results of trying to use closed handle: get: Storage::NOT_CONNECTED, put: Storage::NOT_CONNECTED, erase: Storage::NOT_CONNECTED
|
||||
open result 2, [code=Storage::OPERATION_FAILED, error_str=Failed to open backend Storage::STORAGEDUMMY: open_fail was set to true, returning error, value=<opaque of BackendHandleVal>]
|
||||
close result of closed handle, [code=Storage::NOT_CONNECTED, error_str=Backend is closed, value=<uninitialized>]
|
1
testing/btest/Baseline/plugins.storage/zeek-stderr
Normal file
1
testing/btest/Baseline/plugins.storage/zeek-stderr
Normal file
|
@ -0,0 +1 @@
|
|||
### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63.
|
|
@ -0,0 +1 @@
|
|||
### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63.
|
|
@ -0,0 +1,8 @@
|
|||
### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63.
|
||||
open result, [code=Storage::SUCCESS, error_str=<uninitialized>, value=<opaque of BackendHandleVal>]
|
||||
put result, [code=Storage::SUCCESS, error_str=<uninitialized>, value=<uninitialized>]
|
||||
get result, [code=Storage::SUCCESS, error_str=<uninitialized>, value={
|
||||
[2] = b,
|
||||
[1] = a,
|
||||
[3] = c
|
||||
}]
|
|
@ -0,0 +1 @@
|
|||
### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63.
|
|
@ -0,0 +1,4 @@
|
|||
### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63.
|
||||
open result, [code=Storage::SUCCESS, error_str=<uninitialized>, value=<opaque of BackendHandleVal>]
|
||||
erase result, [code=Storage::SUCCESS, error_str=<uninitialized>, value=<uninitialized>]
|
||||
get result, [code=Storage::KEY_NOT_FOUND, error_str=<uninitialized>, value=<uninitialized>]
|
|
@ -0,0 +1,2 @@
|
|||
### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63.
|
||||
1627225025.686472 received termination signal
|
|
@ -0,0 +1,10 @@
|
|||
### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63.
|
||||
open result, [code=Storage::SUCCESS, error_str=<uninitialized>, value=<opaque of BackendHandleVal>]
|
||||
put result 1, [code=Storage::SUCCESS, error_str=<uninitialized>, value=<uninitialized>]
|
||||
put result 2, [code=Storage::SUCCESS, error_str=<uninitialized>, value=<uninitialized>]
|
||||
get result, [code=Storage::SUCCESS, error_str=<uninitialized>, value=value1234]
|
||||
get result same as inserted, T
|
||||
get result 2, [code=Storage::SUCCESS, error_str=<uninitialized>, value=value2345]
|
||||
get result 2 same as inserted, T
|
||||
get result 1 after expiration, [code=Storage::KEY_NOT_FOUND, error_str=<uninitialized>, value=<uninitialized>]
|
||||
get result 2 after expiration, [code=Storage::SUCCESS, error_str=<uninitialized>, value=value2345]
|
|
@ -0,0 +1 @@
|
|||
### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63.
|
|
@ -0,0 +1,5 @@
|
|||
### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63.
|
||||
open result, [code=Storage::SUCCESS, error_str=<uninitialized>, value=<opaque of BackendHandleVal>]
|
||||
put result, [code=Storage::SUCCESS, error_str=<uninitialized>, value=<uninitialized>]
|
||||
get result, [code=Storage::SUCCESS, error_str=<uninitialized>, value=value7890]
|
||||
get result same as inserted, T
|
|
@ -0,0 +1,5 @@
|
|||
### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63.
|
||||
open result, [code=Storage::SUCCESS, error_str=<uninitialized>, value=<opaque of BackendHandleVal>]
|
||||
put result, [code=Storage::SUCCESS, error_str=<uninitialized>, value=<uninitialized>]
|
||||
get result, [code=Storage::SUCCESS, error_str=<uninitialized>, value=value5678]
|
||||
get result same as inserted, T
|
|
@ -0,0 +1,6 @@
|
|||
### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63.
|
||||
open result, [code=Storage::SUCCESS, error_str=<uninitialized>, value=<opaque of BackendHandleVal>]
|
||||
put result, [code=Storage::SUCCESS, error_str=<uninitialized>, value=<uninitialized>]
|
||||
get result, [code=Storage::SUCCESS, error_str=<uninitialized>, value=value5678]
|
||||
get result same as inserted, T
|
||||
close result, [code=Storage::SUCCESS, error_str=<uninitialized>, value=<uninitialized>]
|
|
@ -0,0 +1,4 @@
|
|||
### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63.
|
||||
worker-1, put result, [code=Storage::SUCCESS, error_str=<uninitialized>, value=<uninitialized>]
|
||||
redis_data_written
|
||||
worker-1, [code=Storage::SUCCESS, error_str=<uninitialized>, value=5678]
|
|
@ -0,0 +1,3 @@
|
|||
### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63.
|
||||
redis_data_written
|
||||
worker-2, [code=Storage::SUCCESS, error_str=<uninitialized>, value=5678]
|
|
@ -0,0 +1,4 @@
|
|||
### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63.
|
||||
open_result, [code=Storage::SUCCESS, error_str=<uninitialized>, value=<opaque of BackendHandleVal>]
|
||||
Storage::backend_opened, Storage::REDIS, [redis=[server_host=127.0.0.1, server_port=xxxx/tcp, server_unix_socket=<uninitialized>, key_prefix=testing]]
|
||||
Storage::backend_lost, Storage::REDIS, [redis=[server_host=127.0.0.1, server_port=xxxx/tcp, server_unix_socket=<uninitialized>, key_prefix=testing]], Server closed the connection
|
|
@ -0,0 +1,7 @@
|
|||
### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63.
|
||||
open_result, [code=Storage::SUCCESS, error_str=<uninitialized>, value=<opaque of BackendHandleVal>]
|
||||
put result, [code=Storage::SUCCESS, error_str=<uninitialized>, value=<uninitialized>]
|
||||
get result, [code=Storage::SUCCESS, error_str=<uninitialized>, value=value1234]
|
||||
get result same as inserted, T
|
||||
erase result, [code=Storage::SUCCESS, error_str=<uninitialized>, value=<uninitialized>]
|
||||
get result 2, [code=Storage::KEY_NOT_FOUND, error_str=<uninitialized>, value=<uninitialized>]
|
|
@ -0,0 +1,10 @@
|
|||
### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63.
|
||||
open result, [code=Storage::SUCCESS, error_str=<uninitialized>, value=<opaque of BackendHandleVal>]
|
||||
put result 1, [code=Storage::SUCCESS, error_str=<uninitialized>, value=<uninitialized>]
|
||||
put result 2, [code=Storage::SUCCESS, error_str=<uninitialized>, value=<uninitialized>]
|
||||
get result, [code=Storage::SUCCESS, error_str=<uninitialized>, value=value1234]
|
||||
get result same as inserted, T
|
||||
get result 2, [code=Storage::SUCCESS, error_str=<uninitialized>, value=value2345]
|
||||
get result 2 same as inserted, T
|
||||
get result 1 after expiration, [code=Storage::KEY_NOT_FOUND, error_str=<uninitialized>, value=<uninitialized>]
|
||||
get result 2 after expiration, [code=Storage::SUCCESS, error_str=<uninitialized>, value=value2345]
|
|
@ -0,0 +1,10 @@
|
|||
### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63.
|
||||
open result, [code=Storage::SUCCESS, error_str=<uninitialized>, value=<opaque of BackendHandleVal>]
|
||||
put result 1, [code=Storage::SUCCESS, error_str=<uninitialized>, value=<uninitialized>]
|
||||
put result 2, [code=Storage::SUCCESS, error_str=<uninitialized>, value=<uninitialized>]
|
||||
get result, [code=Storage::SUCCESS, error_str=<uninitialized>, value=value1234]
|
||||
get result same as inserted, T
|
||||
get result 2, [code=Storage::SUCCESS, error_str=<uninitialized>, value=value2345]
|
||||
get result 2 same as inserted, T
|
||||
get result 1 after expiration, [code=Storage::KEY_NOT_FOUND, error_str=<uninitialized>, value=<uninitialized>]
|
||||
get result 2 after expiration, [code=Storage::SUCCESS, error_str=<uninitialized>, value=value2345]
|
|
@ -0,0 +1,10 @@
|
|||
### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63.
|
||||
open_result, [code=Storage::SUCCESS, error_str=<uninitialized>, value=<opaque of BackendHandleVal>]
|
||||
put result, [code=Storage::SUCCESS, error_str=<uninitialized>, value=<uninitialized>]
|
||||
get result, [code=Storage::SUCCESS, error_str=<uninitialized>, value=value1234]
|
||||
get result same as inserted, T
|
||||
overwrite put result, [code=Storage::SUCCESS, error_str=<uninitialized>, value=<uninitialized>]
|
||||
get result, [code=Storage::SUCCESS, error_str=<uninitialized>, value=value5678]
|
||||
get result same as inserted, T
|
||||
Storage::backend_opened, Storage::REDIS, [redis=[server_host=127.0.0.1, server_port=xxxx/tcp, server_unix_socket=<uninitialized>, key_prefix=testing]]
|
||||
Storage::backend_lost, Storage::REDIS, [redis=[server_host=127.0.0.1, server_port=xxxx/tcp, server_unix_socket=<uninitialized>, key_prefix=testing]], Client disconnected
|
|
@ -0,0 +1,3 @@
|
|||
### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63.
|
||||
1362692526.869344 received termination signal
|
||||
1362692526.869344 warning in <...>/find-filtered-trace.zeek, line 69: The analyzed trace file was determined to contain only TCP control packets, which may indicate it's been pre-filtered. By default, Zeek reports the missing segments for this type of trace, but the 'detect_filtered_trace' option may be toggled if that's not desired.
|
|
@ -0,0 +1,5 @@
|
|||
### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63.
|
||||
open result, [code=Storage::SUCCESS, error_str=<uninitialized>, value=<opaque of BackendHandleVal>]
|
||||
put result, [code=Storage::SUCCESS, error_str=<uninitialized>, value=<uninitialized>]
|
||||
get result, [code=Storage::SUCCESS, error_str=<uninitialized>, value=value5678]
|
||||
get result same as inserted, T
|
|
@ -0,0 +1,2 @@
|
|||
### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63.
|
||||
received termination signal
|
|
@ -0,0 +1,6 @@
|
|||
### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63.
|
||||
open result, [code=Storage::SUCCESS, error_str=<uninitialized>, value=<opaque of BackendHandleVal>]
|
||||
put result, [code=Storage::SUCCESS, error_str=<uninitialized>, value=<uninitialized>]
|
||||
get result, [code=Storage::SUCCESS, error_str=<uninitialized>, value=value5678]
|
||||
get result same as inserted, T
|
||||
closed succesfully
|
|
@ -0,0 +1,2 @@
|
|||
### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63.
|
||||
received termination signal
|
|
@ -0,0 +1,11 @@
|
|||
### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63.
|
||||
Storage::backend_opened, Storage::SQLITE, [sqlite=[database_path=test.sqlite, table_name=testing, tuning_params={
|
||||
[synchronous] = normal,
|
||||
[temp_store] = memory,
|
||||
[journal_mode] = WAL
|
||||
}]]
|
||||
open result, [code=Storage::SUCCESS, error_str=<uninitialized>, value=<opaque of BackendHandleVal>]
|
||||
put result, [code=Storage::SUCCESS, error_str=<uninitialized>, value=<uninitialized>]
|
||||
get result, [code=Storage::SUCCESS, error_str=<uninitialized>, value=value5678]
|
||||
get result same as inserted, T
|
||||
closed succesfully
|
|
@ -0,0 +1 @@
|
|||
### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63.
|
|
@ -0,0 +1,5 @@
|
|||
### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63.
|
||||
Open result, [code=Storage::OPERATION_FAILED, error_str=Failed to open backend Storage::SQLITE: SQLite call failed: unable to open database file, value=<opaque of BackendHandleVal>]
|
||||
Open result 2, [code=Storage::SUCCESS, error_str=<uninitialized>, value=<opaque of BackendHandleVal>]
|
||||
Put result with bad key type, [code=Storage::KEY_TYPE_MISMATCH, error_str=<uninitialized>, value=<uninitialized>]
|
||||
Put result on closed handle, [code=Storage::NOT_CONNECTED, error_str=Backend is closed, value=<uninitialized>]
|
BIN
testing/btest/Files/storage-test.sqlite
Normal file
BIN
testing/btest/Files/storage-test.sqlite
Normal file
Binary file not shown.
|
@ -177,6 +177,16 @@ global known_BiFs = set(
|
|||
"Reporter::warning",
|
||||
"Spicy::__resource_usage",
|
||||
"Spicy::__toggle_analyzer",
|
||||
"Storage::Async::__close_backend",
|
||||
"Storage::Async::__erase",
|
||||
"Storage::Async::__get",
|
||||
"Storage::Async::__open_backend",
|
||||
"Storage::Async::__put",
|
||||
"Storage::Sync::__close_backend",
|
||||
"Storage::Sync::__erase",
|
||||
"Storage::Sync::__get",
|
||||
"Storage::Sync::__open_backend",
|
||||
"Storage::Sync::__put",
|
||||
"Supervisor::__create",
|
||||
"Supervisor::__destroy",
|
||||
"Supervisor::__is_supervised",
|
||||
|
|
0
testing/btest/plugins/storage-plugin/.btest-ignore
Normal file
0
testing/btest/plugins/storage-plugin/.btest-ignore
Normal file
16
testing/btest/plugins/storage-plugin/CMakeLists.txt
Normal file
16
testing/btest/plugins/storage-plugin/CMakeLists.txt
Normal file
|
@ -0,0 +1,16 @@
|
|||
cmake_minimum_required(VERSION 3.15)
|
||||
|
||||
project(Zeek-Plugin-Storage-Demo)
|
||||
|
||||
if (NOT ZEEK_DIST)
|
||||
message(FATAL_ERROR "ZEEK_DIST not set")
|
||||
endif ()
|
||||
|
||||
set(CMAKE_MODULE_PATH ${ZEEK_DIST}/cmake)
|
||||
|
||||
include(ZeekPlugin)
|
||||
|
||||
zeek_plugin_begin(Testing StorageDummy)
|
||||
zeek_plugin_cc(src/Plugin.cc)
|
||||
zeek_plugin_cc(src/StorageDummy.cc)
|
||||
zeek_plugin_end()
|
23
testing/btest/plugins/storage-plugin/src/Plugin.cc
Normal file
23
testing/btest/plugins/storage-plugin/src/Plugin.cc
Normal file
|
@ -0,0 +1,23 @@
|
|||
#include "Plugin.h"
|
||||
|
||||
#include "zeek/storage/Component.h"
|
||||
|
||||
#include "StorageDummy.h"
|
||||
|
||||
namespace btest::plugin::Testing_StorageDummy {
|
||||
Plugin plugin;
|
||||
}
|
||||
|
||||
using namespace btest::plugin::Testing_StorageDummy;
|
||||
|
||||
zeek::plugin::Configuration Plugin::Configure() {
|
||||
AddComponent(new zeek::storage::Component("StorageDummy", btest::storage::backend::StorageDummy::Instantiate));
|
||||
|
||||
zeek::plugin::Configuration config;
|
||||
config.name = "Testing::StorageDummy";
|
||||
config.description = "A dummy storage plugin";
|
||||
config.version.major = 1;
|
||||
config.version.minor = 0;
|
||||
config.version.patch = 0;
|
||||
return config;
|
||||
}
|
16
testing/btest/plugins/storage-plugin/src/Plugin.h
Normal file
16
testing/btest/plugins/storage-plugin/src/Plugin.h
Normal file
|
@ -0,0 +1,16 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include <plugin/Plugin.h>
|
||||
|
||||
namespace btest::plugin::Testing_StorageDummy {
|
||||
|
||||
class Plugin : public zeek::plugin::Plugin {
|
||||
protected:
|
||||
// Overridden from plugin::Plugin.
|
||||
virtual zeek::plugin::Configuration Configure();
|
||||
};
|
||||
|
||||
extern Plugin plugin;
|
||||
|
||||
} // namespace btest::plugin::Testing_StorageDummy
|
84
testing/btest/plugins/storage-plugin/src/StorageDummy.cc
Normal file
84
testing/btest/plugins/storage-plugin/src/StorageDummy.cc
Normal file
|
@ -0,0 +1,84 @@
|
|||
// See the file "COPYING" in the main distribution directory for copyright.
|
||||
|
||||
#include "StorageDummy.h"
|
||||
|
||||
#include "zeek/Func.h"
|
||||
#include "zeek/Val.h"
|
||||
#include "zeek/storage/ReturnCode.h"
|
||||
|
||||
using namespace zeek;
|
||||
using namespace zeek::storage;
|
||||
|
||||
namespace btest::storage::backend {
|
||||
|
||||
BackendPtr StorageDummy::Instantiate(std::string_view tag) { return make_intrusive<StorageDummy>(tag); }
|
||||
|
||||
/**
|
||||
* Called by the manager system to open the backend.
|
||||
*
|
||||
* Derived classes must implement this method. If successful, the
|
||||
* implementation must call \a Opened(); if not, it must call Error()
|
||||
* with a corresponding message.
|
||||
*/
|
||||
OperationResult StorageDummy::DoOpen(OpenResultCallback* cb, RecordValPtr options) {
|
||||
RecordValPtr backend_options = options->GetField<RecordVal>("dummy");
|
||||
bool open_fail = backend_options->GetField<BoolVal>("open_fail")->Get();
|
||||
if ( open_fail )
|
||||
return {ReturnCode::OPERATION_FAILED, "open_fail was set to true, returning error"};
|
||||
|
||||
open = true;
|
||||
|
||||
return {ReturnCode::SUCCESS};
|
||||
}
|
||||
|
||||
/**
|
||||
* Finalizes the backend when it's being closed.
|
||||
*/
|
||||
OperationResult StorageDummy::DoClose(OperationResultCallback* cb) {
|
||||
open = false;
|
||||
return {ReturnCode::SUCCESS};
|
||||
}
|
||||
|
||||
/**
|
||||
* The workhorse method for Put(). This must be implemented by plugins.
|
||||
*/
|
||||
OperationResult StorageDummy::DoPut(OperationResultCallback* cb, ValPtr key, ValPtr value, bool overwrite,
|
||||
double expiration_time) {
|
||||
auto json_key = key->ToJSON()->ToStdString();
|
||||
auto json_value = value->ToJSON()->ToStdString();
|
||||
data[json_key] = json_value;
|
||||
return {ReturnCode::SUCCESS};
|
||||
}
|
||||
|
||||
/**
|
||||
* The workhorse method for Get(). This must be implemented for plugins.
|
||||
*/
|
||||
OperationResult StorageDummy::DoGet(OperationResultCallback* cb, ValPtr key) {
|
||||
auto json_key = key->ToJSON();
|
||||
auto it = data.find(json_key->ToStdString());
|
||||
if ( it == data.end() )
|
||||
return {ReturnCode::KEY_NOT_FOUND};
|
||||
|
||||
auto val = zeek::detail::ValFromJSON(it->second.c_str(), val_type, Func::nil);
|
||||
if ( std::holds_alternative<ValPtr>(val) ) {
|
||||
ValPtr val_v = std::get<ValPtr>(val);
|
||||
return {ReturnCode::SUCCESS, "", val_v};
|
||||
}
|
||||
|
||||
return {ReturnCode::OPERATION_FAILED, std::get<std::string>(val)};
|
||||
}
|
||||
|
||||
/**
|
||||
* The workhorse method for Erase(). This must be implemented for plugins.
|
||||
*/
|
||||
OperationResult StorageDummy::DoErase(OperationResultCallback* cb, ValPtr key) {
|
||||
auto json_key = key->ToJSON();
|
||||
auto it = data.find(json_key->ToStdString());
|
||||
if ( it == data.end() )
|
||||
return {ReturnCode::KEY_NOT_FOUND};
|
||||
|
||||
data.erase(it);
|
||||
return {ReturnCode::SUCCESS};
|
||||
}
|
||||
|
||||
} // namespace btest::storage::backend
|
58
testing/btest/plugins/storage-plugin/src/StorageDummy.h
Normal file
58
testing/btest/plugins/storage-plugin/src/StorageDummy.h
Normal file
|
@ -0,0 +1,58 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include <map>
|
||||
#include <string>
|
||||
|
||||
#include "zeek/storage/Backend.h"
|
||||
|
||||
namespace btest::storage::backend {
|
||||
|
||||
/**
|
||||
* A Foo reader to measure performance of the input framework.
|
||||
*/
|
||||
class StorageDummy : public zeek::storage::Backend {
|
||||
public:
|
||||
StorageDummy(std::string_view tag) : Backend(zeek::storage::SupportedModes::SYNC, tag) {}
|
||||
~StorageDummy() override = default;
|
||||
|
||||
static zeek::storage::BackendPtr Instantiate(std::string_view tag);
|
||||
|
||||
/**
|
||||
* Called by the manager system to open the backend.
|
||||
*/
|
||||
zeek::storage::OperationResult DoOpen(zeek::storage::OpenResultCallback* cb, zeek::RecordValPtr options) override;
|
||||
|
||||
/**
|
||||
* Finalizes the backend when it's being closed.
|
||||
*/
|
||||
zeek::storage::OperationResult DoClose(zeek::storage::OperationResultCallback* cb = nullptr) override;
|
||||
|
||||
/**
|
||||
* Returns whether the backend is opened.
|
||||
*/
|
||||
bool IsOpen() override { return open; }
|
||||
|
||||
/**
|
||||
* The workhorse method for Put().
|
||||
*/
|
||||
zeek::storage::OperationResult DoPut(zeek::storage::OperationResultCallback* cb, zeek::ValPtr key,
|
||||
zeek::ValPtr value, bool overwrite = true,
|
||||
double expiration_time = 0) override;
|
||||
|
||||
/**
|
||||
* The workhorse method for Get().
|
||||
*/
|
||||
zeek::storage::OperationResult DoGet(zeek::storage::OperationResultCallback* cb, zeek::ValPtr key) override;
|
||||
|
||||
/**
|
||||
* The workhorse method for Erase().
|
||||
*/
|
||||
zeek::storage::OperationResult DoErase(zeek::storage::OperationResultCallback* cb, zeek::ValPtr key) override;
|
||||
|
||||
private:
|
||||
std::map<std::string, std::string> data;
|
||||
bool open = false;
|
||||
};
|
||||
|
||||
} // namespace btest::storage::backend
|
63
testing/btest/plugins/storage.zeek
Normal file
63
testing/btest/plugins/storage.zeek
Normal file
|
@ -0,0 +1,63 @@
|
|||
# @TEST-DOC: Basic test of a plugin implmenting a backend for the storage framework
|
||||
# @TEST-REQUIRES: test "${ZEEK_ZAM}" != "1"
|
||||
|
||||
# @TEST-EXEC: ${DIST}/auxil/zeek-aux/plugin-support/init-plugin -u . Testing StorageDummy
|
||||
# @TEST-EXEC: cp -r %DIR/storage-plugin/* .
|
||||
# @TEST-EXEC: ./configure --zeek-dist=${DIST} && make
|
||||
# @TEST-EXEC: ZEEK_PLUGIN_PATH=$(pwd) zeek -b Testing::StorageDummy %INPUT >> output 2>zeek-stderr
|
||||
# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff output
|
||||
# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff zeek-stderr
|
||||
|
||||
@load base/frameworks/storage/sync
|
||||
|
||||
# Create a typename here that can be passed down into get().
|
||||
type str: string;
|
||||
|
||||
type StorageDummyOpts : record {
|
||||
open_fail: bool;
|
||||
};
|
||||
|
||||
redef record Storage::BackendOptions += {
|
||||
dummy: StorageDummyOpts &optional;
|
||||
};
|
||||
|
||||
event zeek_init() {
|
||||
local opts : Storage::BackendOptions;
|
||||
opts$dummy = [$open_fail = F];
|
||||
|
||||
local key = "key1234";
|
||||
local value = "value5678";
|
||||
|
||||
# Test basic operation. The second get() should return an error
|
||||
# as the key should have been erased.
|
||||
local open_res = Storage::Sync::open_backend(Storage::STORAGEDUMMY, opts, str, str);
|
||||
print "open result", open_res;
|
||||
local b = open_res$value;
|
||||
local put_res = Storage::Sync::put(b, [$key=key, $value=value, $overwrite=F]);
|
||||
local get_res = Storage::Sync::get(b, key);
|
||||
if ( get_res$code != Storage::SUCCESS ) {
|
||||
print("Got an invalid value in response!");
|
||||
}
|
||||
|
||||
local erase_res = Storage::Sync::erase(b, key);
|
||||
get_res = Storage::Sync::get(b, key);
|
||||
Storage::Sync::close_backend(b);
|
||||
|
||||
if ( get_res$code != Storage::SUCCESS && get_res?$error_str )
|
||||
Reporter::error(get_res$error_str);
|
||||
|
||||
# Test attempting to use the closed handle.
|
||||
put_res = Storage::Sync::put(b, [$key="a", $value="b", $overwrite=F]);
|
||||
get_res = Storage::Sync::get(b, "a");
|
||||
erase_res = Storage::Sync::erase(b, "a");
|
||||
|
||||
print(fmt("results of trying to use closed handle: get: %s, put: %s, erase: %s",
|
||||
get_res$code, put_res$code, erase_res$code));
|
||||
|
||||
# Test failing to open the handle and test closing an invalid handle.
|
||||
opts$dummy$open_fail = T;
|
||||
open_res = Storage::Sync::open_backend(Storage::STORAGEDUMMY, opts, str, str);
|
||||
print "open result 2", open_res;
|
||||
local close_res = Storage::Sync::close_backend(open_res$value);
|
||||
print "close result of closed handle", close_res;
|
||||
}
|
|
@ -0,0 +1,75 @@
|
|||
# @TEST-DOC: Test operations using more complicated types
|
||||
# @TEST-EXEC: zeek -b %INPUT > out
|
||||
# @TEST-EXEC: btest-diff out
|
||||
# @TEST-EXEC: btest-diff .stderr
|
||||
|
||||
@load base/frameworks/storage/sync
|
||||
@load policy/frameworks/storage/backend/sqlite
|
||||
|
||||
type Color: enum {
|
||||
Red = 10,
|
||||
White = 20,
|
||||
Blue = 30
|
||||
};
|
||||
|
||||
type Rec: record
|
||||
{
|
||||
hello: string;
|
||||
t: bool;
|
||||
f: bool;
|
||||
n: count &optional;
|
||||
m: count &optional; # not in input
|
||||
def: count &default = 123;
|
||||
i: int;
|
||||
pi: double;
|
||||
a: string_vec;
|
||||
c1: Color;
|
||||
p: port;
|
||||
ti: time;
|
||||
it: interval;
|
||||
ad: addr;
|
||||
s: subnet;
|
||||
re: pattern;
|
||||
su: subnet_set;
|
||||
};
|
||||
|
||||
type tbl: table[count] of string;
|
||||
|
||||
event zeek_init() {
|
||||
# Create a database file in the .tmp directory with a 'testing' table
|
||||
local opts : Storage::BackendOptions;
|
||||
opts$sqlite = [$database_path = "types_test.sqlite", $table_name = "types_testing"];
|
||||
|
||||
local key : Rec;
|
||||
key$hello = "hello";
|
||||
key$t = T;
|
||||
key$f = F;
|
||||
key$n = 1234;
|
||||
key$m = 5678;
|
||||
key$i = -2345;
|
||||
key$pi = 345.0;
|
||||
key$a = ["a","b","c"];
|
||||
key$c1 = Red;
|
||||
key$p = 1234/tcp;
|
||||
key$ti = current_time();
|
||||
key$it = 15sec;
|
||||
key$ad = 1.2.3.4;
|
||||
key$s = 255.255.255.0/24;
|
||||
key$re = /.*/;
|
||||
key$su = [255.255.255.0/24];
|
||||
|
||||
local value : tbl;
|
||||
value[1] = "a";
|
||||
value[2] = "b";
|
||||
value[3] = "c";
|
||||
|
||||
local open_res = Storage::Sync::open_backend(Storage::SQLITE, opts, Rec, tbl);
|
||||
print "open result", open_res;
|
||||
local b = open_res$value;
|
||||
|
||||
local res = Storage::Sync::put(b, [$key=key, $value=value]);
|
||||
print "put result", res;
|
||||
|
||||
local res2 = Storage::Sync::get(b, key);
|
||||
print "get result", res2;
|
||||
}
|
35
testing/btest/scripts/base/frameworks/storage/erase.zeek
Normal file
35
testing/btest/scripts/base/frameworks/storage/erase.zeek
Normal file
|
@ -0,0 +1,35 @@
|
|||
# @TEST-DOC: Erase existing data in a SQLite backend
|
||||
# @TEST-EXEC: cp $FILES/storage-test.sqlite ./storage-test.sqlite
|
||||
# @TEST-EXEC: zeek -b %INPUT > out
|
||||
# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff out
|
||||
# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff .stderr
|
||||
|
||||
@load base/frameworks/storage/sync
|
||||
@load policy/frameworks/storage/backend/sqlite
|
||||
|
||||
# Create a typename here that can be passed down into get().
|
||||
type str: string;
|
||||
|
||||
event zeek_init()
|
||||
{
|
||||
# Create a database file in the .tmp directory with a 'testing' table
|
||||
local opts: Storage::BackendOptions;
|
||||
opts$sqlite = [ $database_path="storage-test.sqlite", $table_name="testing" ];
|
||||
|
||||
local key = "key1234";
|
||||
|
||||
# Test inserting/retrieving a key/value pair that we know won't be in
|
||||
# the backend yet.
|
||||
local open_res = Storage::Sync::open_backend(Storage::SQLITE, opts, str, str);
|
||||
print "open result", open_res;
|
||||
local b = open_res$value;
|
||||
|
||||
local res = Storage::Sync::erase(b, key);
|
||||
print "erase result", res;
|
||||
|
||||
local res2 = Storage::Sync::get(b, key);
|
||||
if ( res2$code != Storage::SUCCESS )
|
||||
print "get result", res2;
|
||||
|
||||
Storage::Sync::close_backend(b);
|
||||
}
|
|
@ -0,0 +1,71 @@
|
|||
# @TEST-DOC: Automatic expiration of stored data
|
||||
# @TEST-EXEC: zcat <$TRACES/echo-connections.pcap.gz | zeek -b -Cr - %INPUT > out
|
||||
# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff out
|
||||
# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff .stderr
|
||||
|
||||
@load base/frameworks/storage/sync
|
||||
@load policy/frameworks/storage/backend/sqlite
|
||||
|
||||
redef Storage::expire_interval = 2 secs;
|
||||
redef exit_only_after_terminate = T;
|
||||
|
||||
# Create a typename here that can be passed down into get().
|
||||
type str: string;
|
||||
|
||||
global b: opaque of Storage::BackendHandle;
|
||||
global key1: string = "key1234";
|
||||
global value1: string = "value1234";
|
||||
|
||||
global key2: string = "key2345";
|
||||
global value2: string = "value2345";
|
||||
|
||||
event check_removed()
|
||||
{
|
||||
local res = Storage::Sync::get(b, key1);
|
||||
print "get result 1 after expiration", res;
|
||||
|
||||
res = Storage::Sync::get(b, key2);
|
||||
print "get result 2 after expiration", res;
|
||||
|
||||
Storage::Sync::close_backend(b);
|
||||
terminate();
|
||||
}
|
||||
|
||||
event setup_test()
|
||||
{
|
||||
local opts : Storage::BackendOptions;
|
||||
opts$sqlite = [$database_path = "storage-test.sqlite", $table_name = "testing"];
|
||||
|
||||
local open_res = Storage::Sync::open_backend(Storage::SQLITE, opts, str, str);
|
||||
print "open result", open_res;
|
||||
|
||||
b = open_res$value;
|
||||
|
||||
# Insert a key that will expire in the time allotted
|
||||
local res = Storage::Sync::put(b, [ $key=key1, $value=value1, $expire_time=2secs ]);
|
||||
print "put result 1", res;
|
||||
|
||||
# Insert a key that won't expire
|
||||
res = Storage::Sync::put(b, [ $key=key2, $value=value2, $expire_time=20secs ]);
|
||||
print "put result 2", res;
|
||||
|
||||
res = Storage::Sync::get(b, key1);
|
||||
print "get result", res;
|
||||
if ( res$code == Storage::SUCCESS && res?$value )
|
||||
print "get result same as inserted", value1 == ( res$value as string );
|
||||
|
||||
res = Storage::Sync::get(b, key2);
|
||||
print "get result 2", res;
|
||||
if ( res$code == Storage::SUCCESS && res?$value )
|
||||
print "get result 2 same as inserted", value2 == ( res$value as string );
|
||||
|
||||
schedule 5secs { check_removed() };
|
||||
}
|
||||
|
||||
event zeek_init()
|
||||
{
|
||||
# We need network time to be set to something other than zero for the
|
||||
# expiration time to be set correctly. Schedule an event on a short
|
||||
# timer so packets start getting read and do the setup there.
|
||||
schedule 100msecs { setup_test() };
|
||||
}
|
|
@ -0,0 +1,33 @@
|
|||
# @TEST-DOC: Overwriting existing data in a SQLite backend
|
||||
# @TEST-EXEC: cp $FILES/storage-test.sqlite ./storage-test.sqlite
|
||||
# @TEST-EXEC: zeek -b %INPUT > out
|
||||
# @TEST-EXEC: btest-diff out
|
||||
# @TEST-EXEC: btest-diff .stderr
|
||||
|
||||
@load base/frameworks/storage/sync
|
||||
@load policy/frameworks/storage/backend/sqlite
|
||||
|
||||
# Create a typename here that can be passed down into get().
|
||||
type str: string;
|
||||
|
||||
event zeek_init() {
|
||||
local opts : Storage::BackendOptions;
|
||||
opts$sqlite = [$database_path = "storage-test.sqlite", $table_name = "testing"];
|
||||
|
||||
local key = "key1234";
|
||||
local value = "value7890";
|
||||
|
||||
local open_res = Storage::Sync::open_backend(Storage::SQLITE, opts, str, str);
|
||||
print "open result", open_res;
|
||||
local b = open_res$value;
|
||||
|
||||
local res = Storage::Sync::put(b, [$key=key, $value=value]);
|
||||
print "put result", res;
|
||||
|
||||
local res2 = Storage::Sync::get(b, key);
|
||||
print "get result", res2;
|
||||
if ( res2$code == Storage::SUCCESS && res2?$value )
|
||||
print "get result same as inserted", value == (res2$value as string);
|
||||
|
||||
Storage::Sync::close_backend(b);
|
||||
}
|
|
@ -0,0 +1,54 @@
|
|||
# @TEST-DOC: Tests that Redis storage backend defaults back to sync mode reading pcaps
|
||||
|
||||
# @TEST-REQUIRES: have-redis
|
||||
# @TEST-PORT: REDIS_PORT
|
||||
|
||||
# @TEST-EXEC: btest-bg-run redis-server run-redis-server ${REDIS_PORT%/tcp}
|
||||
# @TEST-EXEC: zeek -r $TRACES/http/get.trace -b %INPUT > out
|
||||
# @TEST-EXEC: btest-bg-wait -k 0
|
||||
|
||||
# @TEST-EXEC: btest-diff out
|
||||
|
||||
@load base/frameworks/storage/sync
|
||||
@load base/frameworks/storage/async
|
||||
@load policy/frameworks/storage/backend/redis
|
||||
|
||||
# Create a typename here that can be passed down into open_backend()
|
||||
type str: string;
|
||||
|
||||
event zeek_init()
|
||||
{
|
||||
local opts: Storage::BackendOptions;
|
||||
opts$redis = [ $server_host="127.0.0.1", $server_port=to_port(getenv(
|
||||
"REDIS_PORT")), $key_prefix="testing" ];
|
||||
|
||||
local key = "key1234";
|
||||
local value = "value5678";
|
||||
|
||||
local open_res = Storage::Sync::open_backend(Storage::REDIS, opts, str, str);
|
||||
print "open result", open_res;
|
||||
local b = open_res$value;
|
||||
|
||||
when [b, key, value] ( local res = Storage::Async::put(b, [ $key=key,
|
||||
$value=value ]) )
|
||||
{
|
||||
print "put result", res;
|
||||
|
||||
when [b, key, value] ( local res2 = Storage::Async::get(b, key) )
|
||||
{
|
||||
print "get result", res2;
|
||||
if ( res2$code == Storage::SUCCESS && res2?$value )
|
||||
print "get result same as inserted", value == ( res2$value as string );
|
||||
|
||||
Storage::Sync::close_backend(b);
|
||||
}
|
||||
timeout 5sec
|
||||
{
|
||||
print "get request timed out";
|
||||
}
|
||||
}
|
||||
timeout 5sec
|
||||
{
|
||||
print "put request timed out";
|
||||
}
|
||||
}
|
|
@ -0,0 +1,75 @@
|
|||
# @TEST-DOC: Tests basic Redis storage backend functions in async mode
|
||||
|
||||
# @TEST-REQUIRES: have-redis
|
||||
# @TEST-PORT: REDIS_PORT
|
||||
|
||||
# @TEST-EXEC: btest-bg-run redis-server run-redis-server ${REDIS_PORT%/tcp}
|
||||
# @TEST-EXEC: zeek -b %INPUT > out
|
||||
# @TEST-EXEC: btest-bg-wait -k 0
|
||||
|
||||
# @TEST-EXEC: btest-diff out
|
||||
|
||||
@load base/frameworks/storage/async
|
||||
@load base/frameworks/storage/sync
|
||||
@load policy/frameworks/storage/backend/redis
|
||||
|
||||
redef exit_only_after_terminate = T;
|
||||
|
||||
# Create a typename here that can be passed down into open_backend()
|
||||
type str: string;
|
||||
|
||||
event zeek_init()
|
||||
{
|
||||
local opts: Storage::BackendOptions;
|
||||
opts$redis = [ $server_host="127.0.0.1", $server_port=to_port(getenv(
|
||||
"REDIS_PORT")), $key_prefix="testing" ];
|
||||
|
||||
local key = "key1234";
|
||||
local value = "value5678";
|
||||
|
||||
when [opts, key, value] ( local open_res = Storage::Async::open_backend(
|
||||
Storage::REDIS, opts, str, str) )
|
||||
{
|
||||
print "open result", open_res;
|
||||
local b = open_res$value;
|
||||
|
||||
when [b, key, value] ( local put_res = Storage::Async::put(b, [ $key=key,
|
||||
$value=value ]) )
|
||||
{
|
||||
print "put result", put_res;
|
||||
|
||||
when [b, key, value] ( local get_res = Storage::Async::get(b, key) )
|
||||
{
|
||||
print "get result", get_res;
|
||||
if ( get_res$code == Storage::SUCCESS && get_res?$value )
|
||||
print "get result same as inserted", value == ( get_res$value as string );
|
||||
|
||||
when [b] ( local close_res = Storage::Async::close_backend(b) )
|
||||
{
|
||||
print "close result", close_res;
|
||||
terminate();
|
||||
}
|
||||
timeout 5sec
|
||||
{
|
||||
print "close request timed out";
|
||||
terminate();
|
||||
}
|
||||
}
|
||||
timeout 5sec
|
||||
{
|
||||
print "get request timed out";
|
||||
terminate();
|
||||
}
|
||||
}
|
||||
timeout 5sec
|
||||
{
|
||||
print "put request timed out";
|
||||
terminate();
|
||||
}
|
||||
}
|
||||
timeout 5sec
|
||||
{
|
||||
print "open request timed out";
|
||||
terminate();
|
||||
}
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue