diff --git a/src/storage/backend/redis/Redis.cc b/src/storage/backend/redis/Redis.cc index 3e5a4f30d5..d5e96d7dfe 100644 --- a/src/storage/backend/redis/Redis.cc +++ b/src/storage/backend/redis/Redis.cc @@ -254,24 +254,30 @@ OperationResult Redis::DoPut(ResultCallback* cb, ValPtr key, ValPtr value, bool auto locked_scope = conditionally_lock(zeek::run_state::reading_traces, expire_mutex); - std::string format = "SET %s:%s %s"; + std::string format = "SET %s:%b %b"; if ( ! overwrite ) format.append(" NX"); - auto json_key = key->ToJSON()->ToStdString(); - auto json_value = value->ToJSON()->ToStdString(); + auto key_data = serializer->Serialize(key); + if ( ! key_data ) + return {ReturnCode::SERIALIZATION_FAILED, "Failed to serialize key"}; + + auto val_data = serializer->Serialize(value); + if ( ! val_data ) + return {ReturnCode::SERIALIZATION_FAILED, "Failed to serialize value"}; int status; // Use built-in expiration if reading live data, since time will move // forward consistently. If reading pcaps, we'll do something else. if ( expiration_time > 0.0 && ! zeek::run_state::reading_traces ) { format.append(" PXAT %" PRIu64); - status = redisAsyncCommand(async_ctx, redisPut, cb, format.c_str(), key_prefix.data(), json_key.data(), - json_value.data(), static_cast(expiration_time * 1e3)); + status = redisAsyncCommand(async_ctx, redisPut, cb, format.c_str(), key_prefix.data(), key_data->data(), + key_data->size(), val_data->data(), val_data->size(), + static_cast(expiration_time * 1e3)); } else - status = redisAsyncCommand(async_ctx, redisPut, cb, format.c_str(), key_prefix.data(), json_key.data(), - json_value.data()); + status = redisAsyncCommand(async_ctx, redisPut, cb, format.c_str(), key_prefix.data(), key_data->data(), + key_data->size(), val_data->data(), val_data->size()); if ( connected && status == REDIS_ERR ) return {ReturnCode::OPERATION_FAILED, util::fmt("Failed to queue put operation: %s", async_ctx->errstr)}; @@ -284,10 +290,10 @@ OperationResult Redis::DoPut(ResultCallback* cb, ValPtr key, ValPtr value, bool format = "ZADD %s_expire"; if ( ! overwrite ) format.append(" NX"); - format += " %f %s"; + format += " %f %b"; status = redisAsyncCommand(async_ctx, redisZADD, NULL, format.c_str(), key_prefix.data(), expiration_time, - json_key.data()); + key_data->data(), key_data->size()); if ( connected && status == REDIS_ERR ) return {ReturnCode::OPERATION_FAILED, util::fmt("ZADD operation failed: %s", async_ctx->errstr)}; @@ -307,8 +313,12 @@ OperationResult Redis::DoGet(ResultCallback* cb, ValPtr key) { auto locked_scope = conditionally_lock(zeek::run_state::reading_traces, expire_mutex); - int status = redisAsyncCommand(async_ctx, redisGet, cb, "GET %s:%s", key_prefix.data(), - key->ToJSON()->ToStdStringView().data()); + auto key_data = serializer->Serialize(key); + if ( ! key_data ) + return {ReturnCode::SERIALIZATION_FAILED, "Failed to serialize key"}; + + int status = + redisAsyncCommand(async_ctx, redisGet, cb, "GET %s:%b", key_prefix.data(), key_data->data(), key_data->size()); if ( connected && status == REDIS_ERR ) return {ReturnCode::OPERATION_FAILED, util::fmt("Failed to queue get operation: %s", async_ctx->errstr)}; @@ -330,8 +340,12 @@ OperationResult Redis::DoErase(ResultCallback* cb, ValPtr key) { auto locked_scope = conditionally_lock(zeek::run_state::reading_traces, expire_mutex); - int status = redisAsyncCommand(async_ctx, redisErase, cb, "DEL %s:%s", key_prefix.data(), - key->ToJSON()->ToStdStringView().data()); + auto key_data = serializer->Serialize(key); + if ( ! key_data ) + return {ReturnCode::SERIALIZATION_FAILED, "Failed to serialize key"}; + + int status = redisAsyncCommand(async_ctx, redisErase, cb, "DEL %s:%b", key_prefix.data(), key_data->data(), + key_data->size()); if ( connected && status == REDIS_ERR ) return {ReturnCode::OPERATION_FAILED, async_ctx->errstr}; @@ -439,7 +453,7 @@ void Redis::HandleGetResult(redisReply* reply, ResultCallback* callback) { else if ( reply->type == REDIS_REPLY_ERROR ) res = ParseReplyError("get", reply->str); else { - auto val = zeek::detail::ValFromJSON(reply->str, val_type, Func::nil); + auto val = serializer->Unserialize({(std::byte*)reply->str, reply->len}, val_type); if ( val ) res = {ReturnCode::SUCCESS, "", val.value()}; else diff --git a/src/storage/backend/sqlite/SQLite.cc b/src/storage/backend/sqlite/SQLite.cc index a51c2f21bf..e5ddbd15b5 100644 --- a/src/storage/backend/sqlite/SQLite.cc +++ b/src/storage/backend/sqlite/SQLite.cc @@ -44,7 +44,7 @@ OperationResult SQLite::DoOpen(OpenResultCallback* cb, RecordValPtr options) { } std::string create = "create table if not exists " + table_name + " ("; - create.append("key_str text primary key, value_str text not null, expire_time real);"); + create.append("key_str blob primary key, value_str blob not null, expire_time real);"); char* errorMsg = nullptr; if ( int res = sqlite3_exec(db, create.c_str(), NULL, NULL, &errorMsg); res != SQLITE_OK ) { @@ -151,8 +151,9 @@ OperationResult SQLite::DoPut(ResultCallback* cb, ValPtr key, ValPtr value, bool if ( ! db ) return {ReturnCode::NOT_CONNECTED}; - auto json_key = key->ToJSON(); - auto json_value = value->ToJSON(); + auto key_data = serializer->Serialize(key); + if ( ! key_data ) + return {ReturnCode::SERIALIZATION_FAILED, "Failed to serialize key"}; sqlite3_stmt* stmt; if ( ! overwrite ) @@ -160,15 +161,17 @@ OperationResult SQLite::DoPut(ResultCallback* cb, ValPtr key, ValPtr value, bool else stmt = put_update_stmt.get(); - auto key_str = json_key->ToStdStringView(); - if ( auto res = CheckError(sqlite3_bind_text(stmt, 1, key_str.data(), key_str.size(), SQLITE_STATIC)); + if ( auto res = CheckError(sqlite3_bind_blob(stmt, 1, key_data->data(), key_data->size(), SQLITE_STATIC)); res.code != ReturnCode::SUCCESS ) { sqlite3_reset(stmt); return res; } - auto value_str = json_value->ToStdStringView(); - if ( auto res = CheckError(sqlite3_bind_text(stmt, 2, value_str.data(), value_str.size(), SQLITE_STATIC)); + auto val_data = serializer->Serialize(value); + if ( ! val_data ) + return {ReturnCode::SERIALIZATION_FAILED, "Failed to serialize value"}; + + if ( auto res = CheckError(sqlite3_bind_blob(stmt, 2, val_data->data(), val_data->size(), SQLITE_STATIC)); res.code != ReturnCode::SUCCESS ) { sqlite3_reset(stmt); return res; @@ -180,7 +183,7 @@ OperationResult SQLite::DoPut(ResultCallback* cb, ValPtr key, ValPtr value, bool } if ( overwrite ) { - if ( auto res = CheckError(sqlite3_bind_text(stmt, 4, value_str.data(), value_str.size(), SQLITE_STATIC)); + if ( auto res = CheckError(sqlite3_bind_blob(stmt, 4, val_data->data(), val_data->size(), SQLITE_STATIC)); res.code != ReturnCode::SUCCESS ) { sqlite3_reset(stmt); return res; @@ -197,11 +200,13 @@ OperationResult SQLite::DoGet(ResultCallback* cb, ValPtr key) { if ( ! db ) return {ReturnCode::NOT_CONNECTED}; - auto json_key = key->ToJSON(); + auto key_data = serializer->Serialize(key); + if ( ! key_data ) + return {ReturnCode::SERIALIZATION_FAILED, "Failed to serialize key"}; + auto stmt = get_stmt.get(); - auto key_str = json_key->ToStdStringView(); - if ( auto res = CheckError(sqlite3_bind_text(stmt, 1, key_str.data(), key_str.size(), SQLITE_STATIC)); + if ( auto res = CheckError(sqlite3_bind_blob(stmt, 1, key_data->data(), key_data->size(), SQLITE_STATIC)); res.code != ReturnCode::SUCCESS ) { sqlite3_reset(stmt); return res; @@ -217,11 +222,13 @@ OperationResult SQLite::DoErase(ResultCallback* cb, ValPtr key) { if ( ! db ) return {ReturnCode::NOT_CONNECTED}; - auto json_key = key->ToJSON(); + auto key_data = serializer->Serialize(key); + if ( ! key_data ) + return {ReturnCode::SERIALIZATION_FAILED, "Failed to serialize key"}; + auto stmt = erase_stmt.get(); - auto key_str = json_key->ToStdStringView(); - if ( auto res = CheckError(sqlite3_bind_text(stmt, 1, key_str.data(), key_str.size(), SQLITE_STATIC)); + if ( auto res = CheckError(sqlite3_bind_blob(stmt, 1, key_data->data(), key_data->size(), SQLITE_STATIC)); res.code != ReturnCode::SUCCESS ) { sqlite3_reset(stmt); return res; @@ -266,9 +273,10 @@ OperationResult SQLite::Step(sqlite3_stmt* stmt, bool parse_value) { int step_status = sqlite3_step(stmt); if ( step_status == SQLITE_ROW ) { if ( parse_value ) { - // Column 1 is the value - const char* text = (const char*)sqlite3_column_text(stmt, 0); - auto val = zeek::detail::ValFromJSON(text, val_type, Func::nil); + auto blob = static_cast(sqlite3_column_blob(stmt, 0)); + size_t blob_size = sqlite3_column_bytes(stmt, 0); + + auto val = serializer->Unserialize({blob, blob_size}, val_type); sqlite3_reset(stmt); if ( val ) diff --git a/src/storage/serializer/CMakeLists.txt b/src/storage/serializer/CMakeLists.txt index e69de29bb2..7a340d53da 100644 --- a/src/storage/serializer/CMakeLists.txt +++ b/src/storage/serializer/CMakeLists.txt @@ -0,0 +1 @@ +add_subdirectory(json) diff --git a/src/storage/serializer/json/CMakeLists.txt b/src/storage/serializer/json/CMakeLists.txt new file mode 100644 index 0000000000..2c9a16f9d7 --- /dev/null +++ b/src/storage/serializer/json/CMakeLists.txt @@ -0,0 +1,3 @@ +zeek_add_plugin( + Zeek Storage_Serializer_JSON + SOURCES JSON.cc Plugin.cc) diff --git a/src/storage/serializer/json/JSON.cc b/src/storage/serializer/json/JSON.cc new file mode 100644 index 0000000000..8625eaa0be --- /dev/null +++ b/src/storage/serializer/json/JSON.cc @@ -0,0 +1,29 @@ +// See the file "COPYING" in the main distribution directory for copyright. + +#include "zeek/storage/serializer/json/JSON.h" + +#include "zeek/Func.h" + +namespace zeek::storage::serializer::json { + +std::unique_ptr JSON::Instantiate() { return std::make_unique(); } + +JSON::JSON() : Serializer("JSON") {} + +std::optional JSON::Serialize(ValPtr val) { + detail::byte_buffer buf; + auto json = val->ToJSON(); + buf.reserve(json->Len()); + + std::transform(json->Bytes(), json->Bytes() + json->Len(), std::back_inserter(buf), + [](u_char c) { return std::byte(c); }); + + return buf; +} + +zeek::expected JSON::Unserialize(detail::byte_buffer_span buf, TypePtr type) { + std::string_view text{reinterpret_cast(buf.data()), buf.size()}; + return zeek::detail::ValFromJSON(text, type, Func::nil); +} + +} // namespace zeek::storage::serializer::json diff --git a/src/storage/serializer/json/JSON.h b/src/storage/serializer/json/JSON.h new file mode 100644 index 0000000000..2b5475936a --- /dev/null +++ b/src/storage/serializer/json/JSON.h @@ -0,0 +1,20 @@ +// See the file "COPYING" in the main distribution directory for copyright. + +#pragma once + +#include "zeek/storage/Serializer.h" + +namespace zeek::storage::serializer::json { + +class JSON final : public Serializer { +public: + static std::unique_ptr Instantiate(); + + JSON(); + ~JSON() override = default; + + std::optional Serialize(ValPtr val) override; + zeek::expected Unserialize(detail::byte_buffer_span buf, TypePtr type) override; +}; + +} // namespace zeek::storage::serializer::json diff --git a/src/storage/serializer/json/Plugin.cc b/src/storage/serializer/json/Plugin.cc new file mode 100644 index 0000000000..7f4055b354 --- /dev/null +++ b/src/storage/serializer/json/Plugin.cc @@ -0,0 +1,22 @@ +// See the file "COPYING" in the main distribution directory for copyright. + +#include "zeek/plugin/Plugin.h" + +#include "zeek/storage/Component.h" +#include "zeek/storage/serializer/json/JSON.h" + +namespace zeek::storage::serializer::json { + +class Plugin final : public plugin::Plugin { +public: + plugin::Configuration Configure() override { + AddComponent(new storage::SerializerComponent("JSON", serializer::json::JSON::Instantiate)); + + plugin::Configuration config; + config.name = "Zeek::Storage_Serializer_JSON"; + config.description = "JSON serializer for storage framework"; + return config; + } +} plugin; + +} // namespace zeek::storage::serializer::json diff --git a/src/storage/storage-async.bif b/src/storage/storage-async.bif index a705569236..fecfcfe341 100644 --- a/src/storage/storage-async.bif +++ b/src/storage/storage-async.bif @@ -80,9 +80,9 @@ function Storage::Async::__open_backend%(btype: Storage::Backend, options: any, return nullptr; auto btype_val = IntrusivePtr{NewRef{}, btype->AsEnumVal()}; - Tag btag{btype_val}; + Tag tag{btype_val}; - auto b = storage_mgr->InstantiateBackend(btag); + auto b = storage_mgr->InstantiateBackend(tag); if ( ! b.has_value() ) { trigger->Cache( diff --git a/src/storage/storage-sync.bif b/src/storage/storage-sync.bif index 38c977bd3b..8c20ff4d3b 100644 --- a/src/storage/storage-sync.bif +++ b/src/storage/storage-sync.bif @@ -31,9 +31,9 @@ module Storage::Sync; function Storage::Sync::__open_backend%(btype: Storage::Backend, options: any, key_type: any, val_type: any%): Storage::OperationResult %{ auto btype_val = IntrusivePtr{NewRef{}, btype->AsEnumVal()}; - Tag btag{btype_val}; + Tag tag{btype_val}; - auto b = storage_mgr->InstantiateBackend(btag); + auto b = storage_mgr->InstantiateBackend(tag); if ( ! b.has_value() ) { emit_builtin_error(b.error().c_str()); diff --git a/testing/btest/Baseline/scripts.base.frameworks.storage.redis-disconnect/out b/testing/btest/Baseline/scripts.base.frameworks.storage.redis-disconnect/out index 3308aef14e..d87e524a8b 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.storage.redis-disconnect/out +++ b/testing/btest/Baseline/scripts.base.frameworks.storage.redis-disconnect/out @@ -1,4 +1,4 @@ ### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. open_result, [code=Storage::SUCCESS, error_str=, value=] -Storage::backend_opened, Storage::REDIS, [redis=[server_host=127.0.0.1, server_port=xxxx/tcp, server_unix_socket=, key_prefix=testing]] -Storage::backend_lost, Storage::REDIS, [redis=[server_host=127.0.0.1, server_port=xxxx/tcp, server_unix_socket=, key_prefix=testing]], Server closed the connection +Storage::backend_opened, Storage::REDIS, [serializer=Storage::JSON, redis=[server_host=127.0.0.1, server_port=xxxx/tcp, server_unix_socket=, key_prefix=testing]] +Storage::backend_lost, Storage::REDIS, [serializer=Storage::JSON, redis=[server_host=127.0.0.1, server_port=xxxx/tcp, server_unix_socket=, key_prefix=testing]], Server closed the connection diff --git a/testing/btest/Baseline/scripts.base.frameworks.storage.redis-sync/out b/testing/btest/Baseline/scripts.base.frameworks.storage.redis-sync/out index 0053e1555d..52cc495f62 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.storage.redis-sync/out +++ b/testing/btest/Baseline/scripts.base.frameworks.storage.redis-sync/out @@ -9,5 +9,5 @@ get result same as originally inserted, T put result, [code=Storage::SUCCESS, error_str=, value=] get result, [code=Storage::SUCCESS, error_str=, value=value2345] get result same as overwritten, T -Storage::backend_opened, Storage::REDIS, [redis=[server_host=127.0.0.1, server_port=xxxx/tcp, server_unix_socket=, key_prefix=testing]] -Storage::backend_lost, Storage::REDIS, [redis=[server_host=127.0.0.1, server_port=xxxx/tcp, server_unix_socket=, key_prefix=testing]], Client disconnected +Storage::backend_opened, Storage::REDIS, [serializer=Storage::JSON, redis=[server_host=127.0.0.1, server_port=xxxx/tcp, server_unix_socket=, key_prefix=testing]] +Storage::backend_lost, Storage::REDIS, [serializer=Storage::JSON, redis=[server_host=127.0.0.1, server_port=xxxx/tcp, server_unix_socket=, key_prefix=testing]], Client disconnected diff --git a/testing/btest/Baseline/scripts.base.frameworks.storage.sqlite-basic/out b/testing/btest/Baseline/scripts.base.frameworks.storage.sqlite-basic/out index 781e32a333..8ad2a12b65 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.storage.sqlite-basic/out +++ b/testing/btest/Baseline/scripts.base.frameworks.storage.sqlite-basic/out @@ -1,5 +1,5 @@ ### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63. -Storage::backend_opened, Storage::SQLITE, [sqlite=[database_path=test.sqlite, table_name=testing, tuning_params={ +Storage::backend_opened, Storage::SQLITE, [serializer=Storage::JSON, sqlite=[database_path=test.sqlite, table_name=testing, tuning_params={ [synchronous] = normal, [temp_store] = memory, [journal_mode] = WAL diff --git a/testing/btest/plugins/storage-plugin/src/Plugin.cc b/testing/btest/plugins/storage-plugin/src/Plugin.cc index ca7e42f4e6..e73eb1fdfb 100644 --- a/testing/btest/plugins/storage-plugin/src/Plugin.cc +++ b/testing/btest/plugins/storage-plugin/src/Plugin.cc @@ -11,7 +11,8 @@ Plugin plugin; using namespace btest::plugin::Testing_StorageDummy; zeek::plugin::Configuration Plugin::Configure() { - AddComponent(new zeek::storage::Component("StorageDummy", btest::storage::backend::StorageDummy::Instantiate)); + AddComponent( + new zeek::storage::BackendComponent("StorageDummy", btest::storage::backend::StorageDummy::Instantiate)); zeek::plugin::Configuration config; config.name = "Testing::StorageDummy"; diff --git a/testing/btest/plugins/storage-plugin/src/StorageDummy.cc b/testing/btest/plugins/storage-plugin/src/StorageDummy.cc index f05f3ed403..59471dac18 100644 --- a/testing/btest/plugins/storage-plugin/src/StorageDummy.cc +++ b/testing/btest/plugins/storage-plugin/src/StorageDummy.cc @@ -49,9 +49,10 @@ OperationResult StorageDummy::DoPut(ResultCallback* cb, ValPtr key, ValPtr value if ( timeout_put ) return {ReturnCode::TIMEOUT}; - auto json_key = key->ToJSON()->ToStdString(); - auto json_value = value->ToJSON()->ToStdString(); - data[json_key] = json_value; + auto key_data = serializer->Serialize(key); + auto val_data = serializer->Serialize(value); + + data[*key_data] = *val_data; return {ReturnCode::SUCCESS}; } @@ -59,31 +60,31 @@ OperationResult StorageDummy::DoPut(ResultCallback* cb, ValPtr key, ValPtr value * The workhorse method for Get(). This must be implemented for plugins. */ OperationResult StorageDummy::DoGet(ResultCallback* cb, ValPtr key) { - auto json_key = key->ToJSON(); - auto it = data.find(json_key->ToStdString()); + auto key_data = serializer->Serialize(key); + + auto it = data.find(*key_data); if ( it == data.end() ) return {ReturnCode::KEY_NOT_FOUND}; - auto val = zeek::detail::ValFromJSON(it->second.c_str(), val_type, Func::nil); - if ( std::holds_alternative(val) ) { - ValPtr val_v = std::get(val); - return {ReturnCode::SUCCESS, "", val_v}; - } + auto val = serializer->Unserialize(it->second, val_type); + if ( val ) + return {ReturnCode::SUCCESS, "", val.value()}; - return {ReturnCode::OPERATION_FAILED, std::get(val)}; + return {ReturnCode::UNSERIALIZATION_FAILED, val.error()}; } /** * The workhorse method for Erase(). This must be implemented for plugins. */ OperationResult StorageDummy::DoErase(ResultCallback* cb, ValPtr key) { - auto json_key = key->ToJSON(); - auto it = data.find(json_key->ToStdString()); - if ( it == data.end() ) - return {ReturnCode::KEY_NOT_FOUND}; + auto key_data = serializer->Serialize(key); - data.erase(it); - return {ReturnCode::SUCCESS}; + if ( auto it = data.find(*key_data); it != data.end() ) { + data.erase(it); + return {ReturnCode::SUCCESS}; + } + + return {ReturnCode::KEY_NOT_FOUND}; } } // namespace btest::storage::backend diff --git a/testing/btest/plugins/storage-plugin/src/StorageDummy.h b/testing/btest/plugins/storage-plugin/src/StorageDummy.h index 0fa718fc4c..c295aee82e 100644 --- a/testing/btest/plugins/storage-plugin/src/StorageDummy.h +++ b/testing/btest/plugins/storage-plugin/src/StorageDummy.h @@ -50,7 +50,7 @@ public: zeek::storage::OperationResult DoErase(zeek::storage::ResultCallback* cb, zeek::ValPtr key) override; private: - std::map data; + std::map data; bool open = false; }; diff --git a/testing/btest/scripts/base/frameworks/storage/redis-disconnect.zeek b/testing/btest/scripts/base/frameworks/storage/redis-disconnect.zeek index 33f66b1d43..55a8f2c320 100644 --- a/testing/btest/scripts/base/frameworks/storage/redis-disconnect.zeek +++ b/testing/btest/scripts/base/frameworks/storage/redis-disconnect.zeek @@ -26,6 +26,7 @@ event Storage::backend_lost(tag: Storage::Backend, config: any, reason: string) event zeek_init() { local opts: Storage::BackendOptions; + opts$serializer = Storage::JSON; opts$redis = [ $server_host="127.0.0.1", $server_port=to_port(getenv( "REDIS_PORT")), $key_prefix="testing" ]; diff --git a/testing/btest/scripts/base/frameworks/storage/redis-sync.zeek b/testing/btest/scripts/base/frameworks/storage/redis-sync.zeek index ffaf42f4c8..e19503df5d 100644 --- a/testing/btest/scripts/base/frameworks/storage/redis-sync.zeek +++ b/testing/btest/scripts/base/frameworks/storage/redis-sync.zeek @@ -24,6 +24,7 @@ event Storage::backend_lost(tag: Storage::Backend, config: any, reason: string) event zeek_init() { local opts: Storage::BackendOptions; + opts$serializer = Storage::JSON; opts$redis = [ $server_host="127.0.0.1", $server_port=to_port(getenv( "REDIS_PORT")), $key_prefix="testing" ]; diff --git a/testing/btest/scripts/base/frameworks/storage/sqlite-basic.zeek b/testing/btest/scripts/base/frameworks/storage/sqlite-basic.zeek index c910353b0e..2e6a815f97 100644 --- a/testing/btest/scripts/base/frameworks/storage/sqlite-basic.zeek +++ b/testing/btest/scripts/base/frameworks/storage/sqlite-basic.zeek @@ -16,6 +16,7 @@ event zeek_init() { # Create a database file in the .tmp directory with a 'testing' table local opts: Storage::BackendOptions; + opts$serializer = Storage::JSON; opts$sqlite = [ $database_path="test.sqlite", $table_name="testing" ]; local key = "key1234";