diff --git a/CHANGES b/CHANGES index e40dc49085..a8e742f1bf 100644 --- a/CHANGES +++ b/CHANGES @@ -1,4 +1,120 @@ +1.6-dev-1095 | 2011-08-13 11:59:07 -0700 + + * A larger number of script documentation updates. Closes #543. (Jon + Siwek) + + * Workaround for FreeBSD CMake port missing debug flags. (Jon Siwek) + + * piped_exec() can now deal with null bytes. (Seth Hall) + + * Fix vector initialization for lists of records with optional + types. Closes #485. (Jon Siwek) + + * Fix redef'ing records with &default empty set fields. Closes #460. + (Jon Siwek) + + * Fix ConnSize_Analyzer when used in conjunction with the connection + compressor. (Gregor Maier) + + * Fix reporter using part of the actual message as a format string. + (Jon Siwek) + + * Fixing reporter's location tracking. Closes #492. (Robin Sommer) + + * Turning DNS errors into warnings. Closes #255. (Robin Sommer) + + * Logging's path_func now receives the log record as argument. + Closes #555. (Robin Sommer) + + * Functions can now be logged; their full body gets recorded. + Closes #506. (Robin Sommer) + + * Bugfix for hostname notice email extension. (Seth Hall) + + * Updates for notice framework. (Seth Hall) + + - New ACTION_ADD_GEODATA to add geodata to notices in an extension + field named remote_location. + + - Loading extend-email/hostnames by default now that it only does + anything when the ACTION_EMAIL action is applied (finally). + + * Updates to local.bro (Seth Hall) + + * Added the profiling script. (Seth Hall) + + * Updates for SSH scripts. (Seth Hall) + + * ConnSize analyzer is turned on by default now. (Seth Hall) + + * Updates for the build system and site local scripts for cluster. + (Seth Hall) + + * HTTP now uses the extract_filename_from_content_disposition function. (Seth Hall) + + * Major SMTP script refactor. Closes #509. (Jon Siwek and Seth Hall) + + * New variable Site::local_nets_table in utils/site for mapping + address to defined local subnet. + + * Metrics framework updates, more to come. (Seth Hall) + + +1.6-dev-1061 | 2011-08-08 18:25:27 -0700 + + * A set of new/changed tests regarding the new policy script + organisation. (Robin Sommer) + +1.6-dev-1058 | 2011-08-08 16:15:18 -0700 + + * Reorganisation of the scripts that Bro loads by default. (Seth + Hall) + + - policy/ renamed to scripts/ + + - By default BROPATH now contains: + - scripts/ + - scripts/policy + - scripts/site + + - The scripts in scripts/base/protocols/ only do logging and state + building. + + - All of scripts/base/ is loaded by by default. This can however + be disabled by switching Bro into "bare mode" using the new + command-line option --bare-mode (or -b). The cripts in + scripts/base/ don't use relative path loading to ease use of + bare mode (to copy and paste that script). + + - The scripts in scripts/base/frameworks/ add functionality + without causing any additional overhead. + + - All "detection" activity happens through scripts in + scripts/policy/. + + - bro.init was renamed to base/init-bare.bro, and base/all.bro was + renamed to init-default.bro. + + - local.bro now loads more functionality from policy/ and adds + more documentation. (Seth Hall) + + * Adding default_path_func() to the logging framework that makes the + default naming scheme script-level controlled. (Robin Sommer) + + * Reworking logging's postprocessor logic so that postprocessor + commands are no longer run by the log writers themselves, but + instead by a script level function. (Robin Sommer) + + * The communication subsystem is now by default off and must be + enabled explicitly with a new BiF, enable_communication(). Closes + #540. (Robin Sommer) + + * The hostname notice email extension now only add hostnames for + emailed noticed. (Seth Hall) + + * Cleaning up doc generation. (Seth Hall) + 1.6-dev-1044 | 2011-08-05 19:07:32 -0700 * Fixing memory (and CPU) leak in log writer. diff --git a/CMakeLists.txt b/CMakeLists.txt index f1a5e0b772..4a4d48c6eb 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -65,7 +65,8 @@ set(EXTRA_COMPILE_FLAGS "-Wall -Wno-unused") if (ENABLE_DEBUG) set(CMAKE_BUILD_TYPE Debug) - set(EXTRA_COMPILE_FLAGS "${EXTRA_COMPILE_FLAGS} -DDEBUG") + # manual add of -g works around its omission in FreeBSD's CMake port + set(EXTRA_COMPILE_FLAGS "${EXTRA_COMPILE_FLAGS} -g -DDEBUG") else () set(CMAKE_BUILD_TYPE RelWithDebInfo) endif () diff --git a/VERSION b/VERSION index 59b89dec9a..c8e03864f4 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -1.6-dev-1044 +1.6-dev-1095 diff --git a/aux/binpac b/aux/binpac index 4fc13f7c69..a3a9410ded 160000 --- a/aux/binpac +++ b/aux/binpac @@ -1 +1 @@ -Subproject commit 4fc13f7c6987b4163609e3df7a31f38501411cb7 +Subproject commit a3a9410dedc842f6bb9859642f334ed354633b57 diff --git a/aux/bro-aux b/aux/bro-aux index 86990f1640..d68b98bb99 160000 --- a/aux/bro-aux +++ b/aux/bro-aux @@ -1 +1 @@ -Subproject commit 86990f1640d986e39d5bb1287dbeb03b59a464f0 +Subproject commit d68b98bb995a105b257f805ec4ff22c4929c7476 diff --git a/aux/broccoli b/aux/broccoli index 2455dbebc1..03e6d398ed 160000 --- a/aux/broccoli +++ b/aux/broccoli @@ -1 +1 @@ -Subproject commit 2455dbebc15f06b3cc5eccb701727baf5472cf24 +Subproject commit 03e6d398edf422140ba9f50e6fabbec33ee2f3cb diff --git a/aux/broctl b/aux/broctl index 870ee782bf..c39622855e 160000 --- a/aux/broctl +++ b/aux/broctl @@ -1 +1 @@ -Subproject commit 870ee782bfeb3a60bac40fce4273436e5f2d280b +Subproject commit c39622855e3c3a5cc94c7376f86184ed1db1939a diff --git a/aux/btest b/aux/btest index ab78a66dd7..d1c620d98c 160000 --- a/aux/btest +++ b/aux/btest @@ -1 +1 @@ -Subproject commit ab78a66dd782f165ddf921faaf1f065b2f987481 +Subproject commit d1c620d98ce9d9c0b203314108b413784965d2ed diff --git a/doc/scripts/DocSourcesList.cmake b/doc/scripts/DocSourcesList.cmake index fbf93ce869..37f4b41279 100644 --- a/doc/scripts/DocSourcesList.cmake +++ b/doc/scripts/DocSourcesList.cmake @@ -34,9 +34,8 @@ rest_target(${psd} base/frameworks/dpd/main.bro) rest_target(${psd} base/frameworks/intel/main.bro) rest_target(${psd} base/frameworks/logging/main.bro) rest_target(${psd} base/frameworks/logging/writers/ascii.bro) -rest_target(${psd} base/frameworks/metrics/conn-example.bro) -rest_target(${psd} base/frameworks/metrics/http-example.bro) rest_target(${psd} base/frameworks/metrics/main.bro) +rest_target(${psd} base/frameworks/notice/actions/add-geodata.bro) rest_target(${psd} base/frameworks/notice/actions/drop.bro) rest_target(${psd} base/frameworks/notice/actions/email_admin.bro) rest_target(${psd} base/frameworks/notice/actions/page.bro) @@ -70,6 +69,8 @@ rest_target(${psd} base/protocols/mime/file-extract.bro) rest_target(${psd} base/protocols/mime/file-hash.bro) rest_target(${psd} base/protocols/mime/file-ident.bro) rest_target(${psd} base/protocols/rpc/base.bro) +rest_target(${psd} base/protocols/smtp/entities-excerpt.bro) +rest_target(${psd} base/protocols/smtp/entities.bro) rest_target(${psd} base/protocols/smtp/main.bro) rest_target(${psd} base/protocols/ssh/main.bro) rest_target(${psd} base/protocols/ssl/consts.bro) @@ -93,6 +94,9 @@ rest_target(${psd} policy/frameworks/control/controllee.bro) rest_target(${psd} policy/frameworks/control/controller.bro) rest_target(${psd} policy/frameworks/dpd/detect-protocols.bro) rest_target(${psd} policy/frameworks/dpd/packet-segment-logging.bro) +rest_target(${psd} policy/frameworks/metrics/conn-example.bro) +rest_target(${psd} policy/frameworks/metrics/http-example.bro) +rest_target(${psd} policy/frameworks/metrics/ssl-example.bro) rest_target(${psd} policy/frameworks/software/version-changes.bro) rest_target(${psd} policy/frameworks/software/vulnerable.bro) rest_target(${psd} policy/frameworks/tunnel.bro) @@ -101,6 +105,7 @@ rest_target(${psd} policy/integration/barnyard2/event.bro) rest_target(${psd} policy/integration/barnyard2/types.bro) rest_target(${psd} policy/misc/analysis-groups.bro) rest_target(${psd} policy/misc/loaded-scripts.bro) +rest_target(${psd} policy/misc/profiling.bro) rest_target(${psd} policy/misc/trim-trace-file.bro) rest_target(${psd} policy/protocols/conn/known-hosts.bro) rest_target(${psd} policy/protocols/conn/known-services.bro) @@ -116,8 +121,12 @@ rest_target(${psd} policy/protocols/http/headers.bro) rest_target(${psd} policy/protocols/http/software.bro) rest_target(${psd} policy/protocols/http/var-extraction-cookies.bro) rest_target(${psd} policy/protocols/http/var-extraction-uri.bro) +rest_target(${psd} policy/protocols/smtp/blocklists.bro) rest_target(${psd} policy/protocols/smtp/detect-suspicious-orig.bro) rest_target(${psd} policy/protocols/smtp/software.bro) +rest_target(${psd} policy/protocols/ssh/detect-bruteforcing.bro) +rest_target(${psd} policy/protocols/ssh/geo-data.bro) +rest_target(${psd} policy/protocols/ssh/interesting-hostnames.bro) rest_target(${psd} policy/protocols/ssh/software.bro) rest_target(${psd} policy/protocols/ssl/known-certs.bro) rest_target(${psd} policy/protocols/ssl/validate-certs.bro) @@ -125,4 +134,8 @@ rest_target(${psd} policy/tuning/defaults/packet-fragments.bro) rest_target(${psd} policy/tuning/defaults/remove-high-volume-notices.bro) rest_target(${psd} policy/tuning/defaults/warnings.bro) rest_target(${psd} policy/tuning/track-all-assets.bro) +rest_target(${psd} site/local-manager.bro) +rest_target(${psd} site/local-proxy.bro) +rest_target(${psd} site/local-worker.bro) rest_target(${psd} site/local.bro) +rest_target(${psd} test-all-policy.bro) diff --git a/scripts/CMakeLists.txt b/scripts/CMakeLists.txt index d7a321627f..d834c08053 100644 --- a/scripts/CMakeLists.txt +++ b/scripts/CMakeLists.txt @@ -1,17 +1,27 @@ include(InstallPackageConfigFile) install(DIRECTORY ./ DESTINATION ${BRO_SCRIPT_INSTALL_PATH} FILES_MATCHING - PATTERN "all.bro" EXCLUDE - PATTERN "site/local.bro" EXCLUDE + PATTERN "site/local*" EXCLUDE PATTERN "*.bro" PATTERN "*.sig" - PATTERN "*.osf" + PATTERN "*.fp" ) -# Install as a config file since the local.bro script is meant to be +# Install all local* scripts as config files since they are meant to be # user modify-able. InstallPackageConfigFile( ${CMAKE_CURRENT_SOURCE_DIR}/site/local.bro ${BRO_SCRIPT_INSTALL_PATH}/site local.bro) - +InstallPackageConfigFile( + ${CMAKE_CURRENT_SOURCE_DIR}/site/local-manager.bro + ${BRO_SCRIPT_INSTALL_PATH}/site + local-manager.bro) +InstallPackageConfigFile( + ${CMAKE_CURRENT_SOURCE_DIR}/site/local-proxy.bro + ${BRO_SCRIPT_INSTALL_PATH}/site + local-proxy.bro) +InstallPackageConfigFile( + ${CMAKE_CURRENT_SOURCE_DIR}/site/local-worker.bro + ${BRO_SCRIPT_INSTALL_PATH}/site + local-worker.bro) diff --git a/scripts/base/frameworks/cluster/__load__.bro b/scripts/base/frameworks/cluster/__load__.bro index c2ff70144c..03262d3d75 100644 --- a/scripts/base/frameworks/cluster/__load__.bro +++ b/scripts/base/frameworks/cluster/__load__.bro @@ -9,10 +9,6 @@ redef peer_description = Cluster::node; # Add a cluster prefix. @prefixes += cluster -# Make this a controllable node since all cluster nodes are inherently -# controllable. -@load frameworks/control/controllee - ## If this script isn't found anywhere, the cluster bombs out. ## Loading the cluster framework requires that a script by this name exists ## somewhere in the BROPATH. The only thing in the file should be the @@ -23,7 +19,7 @@ redef peer_description = Cluster::node; @load ./setup-connections -# Don't start the listening process until we're a bit more sure that the +# Don't load the listening script until we're a bit more sure that the # cluster framework is actually being enabled. @load frameworks/communication/listen-clear diff --git a/scripts/base/frameworks/cluster/main.bro b/scripts/base/frameworks/cluster/main.bro index 0fc793e7f5..f6066e5800 100644 --- a/scripts/base/frameworks/cluster/main.bro +++ b/scripts/base/frameworks/cluster/main.bro @@ -47,6 +47,25 @@ export { time_machine: string &optional; }; + ## This function can be called at any time to determine if the cluster + ## framework is being enabled for this run. + global is_enabled: function(): bool; + + ## This function can be called at any time to determine what type of + ## cluster node the current Bro instance is going to be acting as. + ## :bro:id:`is_enabled` should be called first to find out if this is + ## actually going to be a cluster node. + global local_node_type: function(): NodeType; + + ## This gives the value for the number of workers currently connected to, + ## and it's maintained internally by the cluster framework. It's + ## primarily intended for use by managers to find out how many workers + ## should be responding to requests. + global worker_count: count = 0; + + ## The cluster layout definition. This should be placed into a filter + ## named cluster-layout.bro somewhere in the BROPATH. It will be + ## automatically loaded if the CLUSTER_NODE environment variable is set. const nodes: table[string] of Node = {} &redef; ## This is usually supplied on the command line for each instance @@ -54,7 +73,29 @@ export { const node = getenv("CLUSTER_NODE") &redef; } -event bro_init() +function is_enabled(): bool + { + return (node != ""); + } + +function local_node_type(): NodeType + { + return nodes[node]$node_type; + } + + +event remote_connection_handshake_done(p: event_peer) + { + if ( nodes[p$descr]$node_type == WORKER ) + ++worker_count; + } +event remote_connection_closed(p: event_peer) + { + if ( nodes[p$descr]$node_type == WORKER ) + --worker_count; + } + +event bro_init() &priority=5 { # If a node is given, but it's an unknown name we need to fail. if ( node != "" && node !in nodes ) diff --git a/scripts/base/frameworks/cluster/nodes/manager.bro b/scripts/base/frameworks/cluster/nodes/manager.bro index f4db54fc50..78b9fb7788 100644 --- a/scripts/base/frameworks/cluster/nodes/manager.bro +++ b/scripts/base/frameworks/cluster/nodes/manager.bro @@ -10,11 +10,14 @@ @prefixes += cluster-manager +# Load the script for local site configuration for the manager node. +@load site/local-manager + ## Turn off remote logging since this is the manager and should only log here. redef Log::enable_remote_logging = F; ## Use the cluster's archive logging script. -redef Log::default_rotation_postprocessor = "archive-log"; +redef Log::default_rotation_postprocessor_cmd = "archive-log"; ## We're processing essentially *only* remote events. redef max_remote_events_processed = 10000; diff --git a/scripts/base/frameworks/cluster/nodes/proxy.bro b/scripts/base/frameworks/cluster/nodes/proxy.bro index c1af918842..8340bf1be8 100644 --- a/scripts/base/frameworks/cluster/nodes/proxy.bro +++ b/scripts/base/frameworks/cluster/nodes/proxy.bro @@ -1,6 +1,9 @@ @prefixes += cluster-proxy +# Load the script for local site configuration for proxy nodes. +@load site/local-proxy + ## The proxy only syncs state; does not forward events. redef forward_remote_events = F; redef forward_remote_state_changes = T; @@ -12,5 +15,5 @@ redef Log::enable_local_logging = F; redef Log::enable_remote_logging = T; ## Use the cluster's delete-log script. -redef Log::default_rotation_postprocessor = "delete-log"; +redef Log::default_rotation_postprocessor_cmd = "delete-log"; diff --git a/scripts/base/frameworks/cluster/nodes/worker.bro b/scripts/base/frameworks/cluster/nodes/worker.bro index f8aae6a23c..f534e0aecc 100644 --- a/scripts/base/frameworks/cluster/nodes/worker.bro +++ b/scripts/base/frameworks/cluster/nodes/worker.bro @@ -1,6 +1,9 @@ @prefixes += cluster-worker +# Load the script for local site configuration for the worker nodes. +@load site/local-worker + ## Don't do any local logging. redef Log::enable_local_logging = F; @@ -8,7 +11,7 @@ redef Log::enable_local_logging = F; redef Log::enable_remote_logging = T; ## Use the cluster's delete-log script. -redef Log::default_rotation_postprocessor = "delete-log"; +redef Log::default_rotation_postprocessor_cmd = "delete-log"; ## Record all packets into trace file. # TODO: should we really be setting this to T? diff --git a/scripts/base/frameworks/cluster/setup-connections.bro b/scripts/base/frameworks/cluster/setup-connections.bro index c51b59f70f..7928d0c6ec 100644 --- a/scripts/base/frameworks/cluster/setup-connections.bro +++ b/scripts/base/frameworks/cluster/setup-connections.bro @@ -1,4 +1,5 @@ -@load base/frameworks/communication +@load ./main +@load base/frameworks/communication/main module Cluster; @@ -59,13 +60,12 @@ event bro_init() &priority=9 $connect=T, $retry=1mins, $class=node]; } - else if ( me$node_type == WORKER ) { if ( n$node_type == MANAGER && me$manager == i ) Communication::nodes["manager"] = [$host=nodes[i]$ip, $p=nodes[i]$p, $connect=T, $retry=1mins, - $class=node]; + $class=node, $events=manager_events]; if ( n$node_type == PROXY && me$proxy == i ) Communication::nodes["proxy"] = [$host=nodes[i]$ip, $p=nodes[i]$p, diff --git a/scripts/base/frameworks/communication/__load__.bro b/scripts/base/frameworks/communication/__load__.bro index 3b18043db1..a10fe855df 100644 --- a/scripts/base/frameworks/communication/__load__.bro +++ b/scripts/base/frameworks/communication/__load__.bro @@ -1,5 +1 @@ - -# TODO: get rid of this as soon as the Expr.cc hack is changed. -@if ( getenv("ENABLE_COMMUNICATION") != "" ) @load ./main -@endif diff --git a/scripts/base/frameworks/communication/main.bro b/scripts/base/frameworks/communication/main.bro index 6cc9812b47..73e6086f97 100644 --- a/scripts/base/frameworks/communication/main.bro +++ b/scripts/base/frameworks/communication/main.bro @@ -108,6 +108,9 @@ const src_names = { event bro_init() { Log::create_stream(COMMUNICATION, [$columns=Info]); + + if ( |nodes| > 0 ) + enable_communication(); } function do_script_log_common(level: count, src: count, msg: string) diff --git a/scripts/base/frameworks/logging/main.bro b/scripts/base/frameworks/logging/main.bro index c7ffea84cc..a90dd21984 100644 --- a/scripts/base/frameworks/logging/main.bro +++ b/scripts/base/frameworks/logging/main.bro @@ -27,6 +27,19 @@ export { ev: any &optional; }; + ## Default function for building the path values for log filters if not + ## speficied otherwise by a filter. The default implementation uses ``id`` + ## to derive a name. + ## + ## id: The log stream. + ## path: A suggested path value, which may be either the filter's ``path`` + ## if defined or a fall-back generated internally. + ## rec: An instance of the streams's ``columns`` type with its + ## fields set to the values to logged. + ## + ## Returns: The path to be used for the filter. + global default_path_func: function(id: ID, path: string, rec: any) : string &redef; + ## Filter customizing logging. type Filter: record { ## Descriptive name to reference this filter. @@ -50,7 +63,7 @@ export { ## The specific interpretation of the string is up to ## the used writer, and may for example be the destination ## file name. Generally, filenames are expected to given - ## without any extensions; writers will add appropiate + ## without any extensions; writers will add appropiate ## extensions automatically. path: string &optional; @@ -60,7 +73,15 @@ export { ## different strings for separate calls, but be careful: it's ## easy to flood the disk by returning a new string for each ## connection ... - path_func: function(id: ID, path: string): string &optional; + ## + ## id: The log stream. + ## path: A suggested path value, which may be either the filter's ``path`` + ## if defined or a fall-back generated internally. + ## rec: An instance of the streams's ``columns`` type with its + ## fields set to the values to logged. + ## + ## Returns: The path to be used for the filter. + path_func: function(id: ID, path: string, rec: any): string &optional; ## Subset of column names to record. If not given, all ## columns are recorded. @@ -81,36 +102,34 @@ export { ## Information passed into rotation callback functions. type RotationInfo: record { - writer: Writer; ##< Writer. - path: string; ##< Original path value. - open: time; ##< Time when opened. - close: time; ##< Time when closed. + writer: Writer; ##< Writer. + fname: string; ##< Full name of the rotated file. + path: string; ##< Original path value. + open: time; ##< Time when opened. + close: time; ##< Time when closed. + terminating: bool; ##< True if rotation occured due to Bro shutting down. }; ## Default rotation interval. Zero disables rotation. const default_rotation_interval = 0secs &redef; - ## Default naming suffix format. Uses a strftime() style. - const default_rotation_date_format = "%y-%m-%d_%H.%M.%S" &redef; + ## Default naming format for timestamps embedded into filenames. Uses a strftime() style. + const default_rotation_date_format = "%Y-%m-%d-%H-%M-%S" &redef; - ## Default postprocessor for writers outputting into files. - const default_rotation_postprocessor = "" &redef; + ## Default shell command to run on rotated files. Empty for none. + const default_rotation_postprocessor_cmd = "" &redef; - ## Default function to construct the name of a rotated output file. - ## The default implementation appends info$date_fmt to the original - ## file name. - ## - ## info: Meta-data about the file to be rotated. - global default_rotation_path_func: function(info: RotationInfo) : string &redef; + ## Specifies the default postprocessor function per writer type. Entries in this + ## table are initialized by each writer type. + const default_rotation_postprocessors: table[Writer] of function(info: RotationInfo) : bool &redef; ## Type for controlling file rotation. type RotationControl: record { ## Rotation interval. interv: interval &default=default_rotation_interval; - ## Format for timestamps embedded into rotated file names. - date_fmt: string &default=default_rotation_date_format; - ## Postprocessor process to run on rotate file. - postprocessor: string &default=default_rotation_postprocessor; + ## Callback function to trigger for rotated files. If not set, the default + ## comes out of default_rotation_postprocessors. + postprocessor: function(info: RotationInfo) : bool &optional; }; ## Specifies rotation parameters per ``(id, path)`` tuple. @@ -133,6 +152,8 @@ export { global flush: function(id: ID): bool; global add_default_filter: function(id: ID) : bool; global remove_default_filter: function(id: ID) : bool; + + global run_rotation_postprocessor_cmd: function(info: RotationInfo, npath: string) : bool; } # We keep a script-level copy of all filters so that we can manipulate them. @@ -140,10 +161,39 @@ global filters: table[ID, string] of Filter; @load logging.bif.bro # Needs Filter and Stream defined. -function default_rotation_path_func(info: RotationInfo) : string +module Log; + +# Used internally by the log manager. +function __default_rotation_postprocessor(info: RotationInfo) : bool { - local date_fmt = rotation_control[info$writer, info$path]$date_fmt; - return fmt("%s-%s", info$path, strftime(date_fmt, info$open)); + if ( info$writer in default_rotation_postprocessors ) + return default_rotation_postprocessors[info$writer](info); + } + +function default_path_func(id: ID, path: string, rec: any) : string + { + # TODO for Seth: Do what you want. :) + return path; + } + +# Run post-processor on file. If there isn't any postprocessor defined, +# we move the file to a nicer name. +function run_rotation_postprocessor_cmd(info: RotationInfo, npath: string) : bool + { + local pp_cmd = default_rotation_postprocessor_cmd; + + if ( pp_cmd == "" ) + return T; + + # The date format is hard-coded here to provide a standardized + # script interface. + system(fmt("%s %s %s %s %s %d", + pp_cmd, npath, info$path, + strftime("%y-%m-%d_%H.%M.%S", info$open), + strftime("%y-%m-%d_%H.%M.%S", info$close), + info$terminating)); + + return T; } function create_stream(id: ID, stream: Stream) : bool @@ -159,9 +209,15 @@ function disable_stream(id: ID) : bool if ( ! __disable_stream(id) ) return F; } - + function add_filter(id: ID, filter: Filter) : bool { + # This is a work-around for the fact that we can't forward-declare + # the default_path_func and then use it as &default in the record + # definition. + if ( ! filter?$path_func ) + filter$path_func = default_path_func; + filters[id, filter$name] = filter; return __add_filter(id, filter); } diff --git a/scripts/base/frameworks/logging/writers/ascii.bro b/scripts/base/frameworks/logging/writers/ascii.bro index bf9fb84d01..1b5b1be33d 100644 --- a/scripts/base/frameworks/logging/writers/ascii.bro +++ b/scripts/base/frameworks/logging/writers/ascii.bro @@ -26,4 +26,19 @@ export { const unset_field = "-" &redef; } +# Default function to postprocess a rotated ASCII log file. It moves the rotated +# file to a new name that includes a timestamp with the opening time, and then +# runs the writer's default postprocessor command on it. +function default_rotation_postprocessor_func(info: Log::RotationInfo) : bool + { + # Move file to name including both opening and closing time. + local dst = fmt("%s.%s.log", info$path, + strftime(Log::default_rotation_date_format, info$open)); + system(fmt("/bin/mv %s %s", info$fname, dst)); + + # Run default postprocessor. + return Log::run_rotation_postprocessor_cmd(info, dst); + } + +redef Log::default_rotation_postprocessors += { [Log::WRITER_ASCII] = default_rotation_postprocessor_func }; diff --git a/scripts/base/frameworks/metrics/__load__.bro b/scripts/base/frameworks/metrics/__load__.bro index a10fe855df..35f6b30fb5 100644 --- a/scripts/base/frameworks/metrics/__load__.bro +++ b/scripts/base/frameworks/metrics/__load__.bro @@ -1 +1,11 @@ @load ./main + +# The cluster framework must be loaded first. +@load base/frameworks/cluster + +# Load either the cluster support script or the non-cluster support script. +@if ( Cluster::is_enabled() ) +@load ./cluster +@else +@load ./non-cluster +@endif \ No newline at end of file diff --git a/scripts/base/frameworks/metrics/cluster.bro b/scripts/base/frameworks/metrics/cluster.bro new file mode 100644 index 0000000000..94281eb883 --- /dev/null +++ b/scripts/base/frameworks/metrics/cluster.bro @@ -0,0 +1,146 @@ +##! This implements transparent cluster support for the metrics framework. +##! Do not load this file directly. It's only meant to be loaded automatically +##! and will be depending on if the cluster framework has been enabled. +##! The goal of this script is to make metric calculation completely and +##! transparently automated when running on a cluster. + +@load base/frameworks/cluster + +module Metrics; + +export { + ## This event is sent by the manager in a cluster to initiate the 3 + ## collection of metrics values + global cluster_collect: event(uid: string, id: ID, filter_name: string); + + ## This event is sent by nodes that are collecting metrics after receiving + ## a request for the metric filter from the manager. + global cluster_results: event(uid: string, id: ID, filter_name: string, data: MetricTable, done: bool); + + ## This event is used internally by workers to send result chunks. + global send_data: event(uid: string, id: ID, filter_name: string, data: MetricTable); + + ## This value allows a user to decide how large of result groups the + ## workers should transmit values. + const cluster_send_in_groups_of = 50 &redef; +} + +# This is maintained by managers so they can know what data they requested and +# when they requested it. +global requested_results: table[string] of time = table() &create_expire=5mins; + +# TODO: Both of the next variables make the assumption that a value never +# takes longer than 5 minutes to transmit from workers to manager. This needs to +# be tunable or self-tuning. These should also be restructured to be +# maintained within a single variable. +# This variable is maintained by manager nodes as they collect and aggregate +# results. +global collecting_results: table[string, ID, string] of MetricTable &create_expire=5mins; + +# This variable is maintained by manager nodes to track how many "dones" they +# collected per collection unique id. Once the number of results for a uid +# matches the number of peer nodes that results should be coming from, the +# result is written out and deleted from here. +# TODO: add an &expire_func in case not all results are received. +global done_with: table[string] of count &create_expire=5mins &default=0; + +# Add events to the cluster framework to make this work. +redef Cluster::manager_events += /Metrics::cluster_collect/; +redef Cluster::worker_events += /Metrics::cluster_results/; + +# The metrics collection process can only be done by a manager. +@if ( Cluster::local_node_type() == Cluster::MANAGER ) +event Metrics::log_it(filter: Filter) + { + local uid = unique_id(""); + + # Set some tracking variables. + requested_results[uid] = network_time(); + collecting_results[uid, filter$id, filter$name] = table(); + + # Request data from peers. + event Metrics::cluster_collect(uid, filter$id, filter$name); + # Schedule the log_it event for the next break period. + schedule filter$break_interval { Metrics::log_it(filter) }; + } +@endif + +@if ( Cluster::local_node_type() == Cluster::WORKER ) + +event Metrics::send_data(uid: string, id: ID, filter_name: string, data: MetricTable) + { + #print fmt("WORKER %s: sending data for uid %s...", Cluster::node, uid); + + local local_data: MetricTable; + local num_added = 0; + for ( index in data ) + { + local_data[index] = data[index]; + delete data[index]; + + # Only send cluster_send_in_groups_of at a time. Queue another + # event to send the next group. + if ( cluster_send_in_groups_of == ++num_added ) + break; + } + + local done = F; + # If data is empty, this metric is done. + if ( |data| == 0 ) + done = T; + + event Metrics::cluster_results(uid, id, filter_name, local_data, done); + if ( ! done ) + event Metrics::send_data(uid, id, filter_name, data); + } + +event Metrics::cluster_collect(uid: string, id: ID, filter_name: string) + { + #print fmt("WORKER %s: received the cluster_collect event.", Cluster::node); + + event Metrics::send_data(uid, id, filter_name, store[id, filter_name]); + + # Lookup the actual filter and reset it, the reference to the data + # currently stored will be maintained interally by the send_data event. + reset(filter_store[id, filter_name]); + } +@endif + + +@if ( Cluster::local_node_type() == Cluster::MANAGER ) + +event Metrics::cluster_results(uid: string, id: ID, filter_name: string, data: MetricTable, done: bool) + { + #print fmt("MANAGER: receiving results from %s", get_event_peer()$descr); + + local local_data = collecting_results[uid, id, filter_name]; + for ( index in data ) + { + if ( index !in local_data ) + local_data[index] = 0; + local_data[index] += data[index]; + } + + # Mark another worker as being "done" for this uid. + if ( done ) + ++done_with[uid]; + + # If the data has been collected from all peers, we are done and ready to log. + if ( Cluster::worker_count == done_with[uid] ) + { + local ts = network_time(); + # Log the time this was initially requested if it's available. + if ( uid in requested_results ) + ts = requested_results[uid]; + + write_log(ts, filter_store[id, filter_name], local_data); + if ( [uid, id, filter_name] in collecting_results ) + delete collecting_results[uid, id, filter_name]; + if ( uid in done_with ) + delete done_with[uid]; + if ( uid in requested_results ) + delete requested_results[uid]; + } + } + +@endif \ No newline at end of file diff --git a/scripts/base/frameworks/metrics/conn-example.bro b/scripts/base/frameworks/metrics/conn-example.bro deleted file mode 100644 index f4e8e71d86..0000000000 --- a/scripts/base/frameworks/metrics/conn-example.bro +++ /dev/null @@ -1,19 +0,0 @@ -@load base/frameworks/metrics - -redef enum Metrics::ID += { - CONNS_ORIGINATED, - CONNS_RESPONDED -}; - -event bro_init() - { - Metrics::configure(CONNS_ORIGINATED, [$aggregation_mask=24, $break_interval=5mins]); - Metrics::configure(CONNS_RESPONDED, [$aggregation_mask=24, $break_interval=5mins]); - } - -event connection_established(c: connection) - { - Metrics::add_data(CONNS_ORIGINATED, [$host=c$id$orig_h], 1); - Metrics::add_data(CONNS_RESPONDED, [$host=c$id$resp_h], 1); - } - diff --git a/scripts/base/frameworks/metrics/http-example.bro b/scripts/base/frameworks/metrics/http-example.bro deleted file mode 100644 index 6af4031fd2..0000000000 --- a/scripts/base/frameworks/metrics/http-example.bro +++ /dev/null @@ -1,20 +0,0 @@ -@load base/frameworks/metrics - -redef enum Metrics::ID += { - HTTP_REQUESTS_BY_STATUS_CODE, - HTTP_REQUESTS_BY_HOST, -}; - -event bro_init() - { - Metrics::configure(HTTP_REQUESTS_BY_STATUS_CODE, [$aggregation_mask=24, $break_interval=10secs]); - Metrics::configure(HTTP_REQUESTS_BY_HOST, [$break_interval=10secs]); - } - -event HTTP::log_http(rec: HTTP::Info) - { - if ( rec?$host ) - Metrics::add_data(HTTP_REQUESTS_BY_HOST, [$index=rec$host], 1); - if ( rec?$status_code ) - Metrics::add_data(HTTP_REQUESTS_BY_STATUS_CODE, [$host=rec$id$orig_h, $index=fmt("%d", rec$status_code)], 1); - } diff --git a/scripts/base/frameworks/metrics/main.bro b/scripts/base/frameworks/metrics/main.bro index b603add6de..38ed17b36f 100644 --- a/scripts/base/frameworks/metrics/main.bro +++ b/scripts/base/frameworks/metrics/main.bro @@ -1,28 +1,19 @@ -##! This is the implementation of the metrics framework +##! This is the implementation of the metrics framework. + +@load base/frameworks/notice module Metrics; export { redef enum Log::ID += { METRICS }; - + type ID: enum { - ALL, + NOTHING, }; - const default_aggregation_mask = 24 &redef; - const default_break_interval = 5mins &redef; - - # TODO: configure a metrics filter logging stream to log the current - # metrics configuration in case someone is looking through - # old logs and the configuration has changed since then. - type Filter: record { - name: ID &optional; - ## Global mask by which you'd like to aggregate traffic. - aggregation_mask: count &optional; - ## This is essentially applying names to various subnets. - aggregation_table: table[subnet] of string &optional; - break_interval: interval &default=default_break_interval; - }; + ## The default interval used for "breaking" metrics and writing the + ## current value to the logging stream. + const default_break_interval = 15mins &redef; type Index: record { ## Host is the value to which this metric applies. @@ -35,108 +26,190 @@ export { ## value in a Host header. This is an example of a non-host based ## metric since multiple IP addresses could respond for the same Host ## header value. - index: string &default=""; - }; + str: string &optional; + + ## The CIDR block that this metric applies to. This is typically + ## only used internally for host based aggregation. + network: subnet &optional; + } &log; type Info: record { ts: time &log; - name: ID &log; - index: string &log &optional; - agg_subnet: string &log &optional; + metric_id: ID &log; + filter_name: string &log; + index: Index &log; value: count &log; }; - global add_filter: function(name: ID, filter: Filter); - global add_data: function(name: ID, index: Index, increment: count); + # TODO: configure a metrics filter logging stream to log the current + # metrics configuration in case someone is looking through + # old logs and the configuration has changed since then. + type Filter: record { + ## The :bro:type:`Metrics::ID` that this filter applies to. + id: ID &optional; + ## The name for this filter so that multiple filters can be + ## applied to a single metrics to get a different view of the same + ## metric data being collected (different aggregation, break, etc). + name: string &default="default"; + ## A predicate so that you can decide per index if you would like + ## to accept the data being inserted. + pred: function(index: Index): bool &optional; + ## Global mask by which you'd like to aggregate traffic. + aggregation_mask: count &optional; + ## This is essentially applying names to various subnets. + aggregation_table: table[subnet] of subnet &optional; + ## The interval at which the metric should be "broken" and written + ## to the logging stream. + break_interval: interval &default=default_break_interval; + ## This determines if the result of this filter is sent to the metrics + ## logging stream. One use for the logging framework is as an internal + ## thresholding and statistics gathering utility that is meant to + ## never log but rather to generate notices and derive data. + log: bool &default=T; + ## A straight threshold for generating a notice. + notice_threshold: count &optional; + ## A series of thresholds at which to generate notices. + ## TODO: This is not implemented yet! + notice_thresholds: vector of count &optional; + ## If this and a $notice_threshold value are set, this notice type + ## will be generated by the metrics framework. + note: Notice::Type &optional; + }; + + global add_filter: function(id: ID, filter: Filter); + global add_data: function(id: ID, index: Index, increment: count); + + # This is the event that is used to "finish" metrics and adapt the metrics + # framework for clustered or non-clustered usage. + global log_it: event(filter: Filter); global log_metrics: event(rec: Info); } -global metric_filters: table[ID] of Filter = table(); +redef record Notice::Info += { + metric_index: Index &log &optional; +}; -type MetricIndex: table[string] of count &default=0; -type MetricTable: table[string] of MetricIndex; -global store: table[ID] of MetricTable = table(); +global metric_filters: table[ID] of vector of Filter = table(); +global filter_store: table[ID, string] of Filter = table(); -event bro_init() +type MetricTable: table[Index] of count &default=0; +# This is indexed by metric ID and stream filter name. +global store: table[ID, string] of MetricTable = table(); + +# This stores the current threshold index for filters using the +# $notice_thresholds element. +global thresholds: table[string] of count = {} &default=0; + +event bro_init() &priority=5 { Log::create_stream(METRICS, [$columns=Info, $ev=log_metrics]); } - -function reset(name: ID) - { - store[name] = table(); - } -event log_it(filter: Filter) +function write_log(ts: time, filter: Filter, data: MetricTable) { - # If this node is the manager in a cluster, this needs to request values - # for this metric from all of the workers. - - local name = filter$name; - for ( agg_subnet in store[name] ) + for ( index in data ) { - local metric_values = store[name][agg_subnet]; - for ( index in metric_values ) + local val = data[index]; + local m: Info = [$ts=ts, + $metric_id=filter$id, + $filter_name=filter$name, + $index=index, + $value=val]; + + if ( m$index?$host && + filter?$notice_threshold && + m$value >= filter$notice_threshold ) { - local val = metric_values[index]; - local m: Info = [$ts=network_time(), - $name=name, - $agg_subnet=fmt("%s", agg_subnet), - $index=index, - $value=val]; - if ( index == "" ) - delete m$index; - if ( agg_subnet == "" ) - delete m$agg_subnet; - Log::write(METRICS, m); + NOTICE([$note=filter$note, + $msg=fmt("Metrics threshold crossed by %s %d/%d", index$host, m$value, filter$notice_threshold), + $src=m$index$host, $n=m$value, + $metric_index=index]); } + + else if ( filter?$notice_thresholds && + m$value >= filter$notice_thresholds[thresholds[cat(filter$id,filter$name)]] ) + { + # TODO: implement this + } + + if ( filter$log ) + Log::write(METRICS, m); } - - - reset(name); - - schedule filter$break_interval { log_it(filter) }; } -function add_filter(name: ID, filter: Filter) + +function reset(filter: Filter) + { + store[filter$id, filter$name] = table(); + } + +function add_filter(id: ID, filter: Filter) { if ( filter?$aggregation_table && filter?$aggregation_mask ) { print "INVALID Metric filter: Defined $aggregation_table and $aggregation_mask."; return; } - - filter$name = name; - metric_filters[name] = filter; - store[name] = table(); - - # Only do this on the manager if in a cluster. - schedule filter$break_interval { log_it(filter) }; - } - -function add_data(name: ID, index: Index, increment: count) - { - local conf = metric_filters[name]; - - local agg_subnet = ""; - if ( index?$host ) + if ( [id, filter$name] in store ) { - if ( conf?$aggregation_mask ) - { - local agg_mask = conf$aggregation_mask; - agg_subnet = fmt("%s", mask_addr(index$host, agg_mask)); - } - else if ( conf?$aggregation_table ) - agg_subnet = fmt("%s", conf$aggregation_table[index$host]); - else - agg_subnet = fmt("%s", index$host); + print fmt("INVALID Metric filter: Filter with name \"%s\" already exists.", filter$name); + return; + } + if ( filter?$notice_threshold && filter?$notice_thresholds ) + { + print "INVALID Metric filter: Defined both $notice_threshold and $notice_thresholds"; + return; } - if ( agg_subnet !in store[name] ) - store[name][agg_subnet] = table(); + if ( ! filter?$id ) + filter$id = id; - if ( index$index !in store[name][agg_subnet] ) - store[name][agg_subnet][index$index] = 0; - store[name][agg_subnet][index$index] = store[name][agg_subnet][index$index] + increment; + if ( id !in metric_filters ) + metric_filters[id] = vector(); + metric_filters[id][|metric_filters[id]|] = filter; + + filter_store[id, filter$name] = filter; + store[id, filter$name] = table(); + + schedule filter$break_interval { Metrics::log_it(filter) }; + } + +function add_data(id: ID, index: Index, increment: count) + { + if ( id !in metric_filters ) + return; + + local filters = metric_filters[id]; + + # Add the data to any of the defined filters. + for ( filter_id in filters ) + { + local filter = filters[filter_id]; + + # If this filter has a predicate, run the predicate and skip this + # index if the predicate return false. + if ( filter?$pred && + ! filter$pred(index) ) + next; + + local filt_store = store[id, filter$name]; + if ( index?$host ) + { + if ( filter?$aggregation_mask ) + { + index$network = mask_addr(index$host, filter$aggregation_mask); + delete index$host; + } + else if ( filter?$aggregation_table ) + { + index$network = filter$aggregation_table[index$host]; + delete index$host; + } + } + + if ( index !in filt_store ) + filt_store[index] = 0; + filt_store[index] += increment; + } } diff --git a/scripts/base/frameworks/metrics/non-cluster.bro b/scripts/base/frameworks/metrics/non-cluster.bro new file mode 100644 index 0000000000..a96210649e --- /dev/null +++ b/scripts/base/frameworks/metrics/non-cluster.bro @@ -0,0 +1,17 @@ + +module Metrics; + +export { + +} + +event Metrics::log_it(filter: Filter) + { + local id = filter$id; + local name = filter$name; + + write_log(network_time(), filter, store[id, name]); + reset(filter); + + schedule filter$break_interval { Metrics::log_it(filter) }; + } diff --git a/scripts/base/frameworks/notice/__load__.bro b/scripts/base/frameworks/notice/__load__.bro index bbc1fcae0d..2cc93ee933 100644 --- a/scripts/base/frameworks/notice/__load__.bro +++ b/scripts/base/frameworks/notice/__load__.bro @@ -6,7 +6,8 @@ @load ./actions/drop @load ./actions/email_admin @load ./actions/page +@load ./actions/add-geodata -# Load the script to add hostnames to emails by default. -# NOTE: this exposes a memleak in async DNS lookups. -#@load ./extend-email/hostnames +# There shouldn't be any defaul toverhead from loading these since they +# *should* only do anything when notices have the ACTION_EMAIL action applied. +@load ./extend-email/hostnames diff --git a/scripts/base/frameworks/notice/actions/add-geodata.bro b/scripts/base/frameworks/notice/actions/add-geodata.bro new file mode 100644 index 0000000000..71e9c6b490 --- /dev/null +++ b/scripts/base/frameworks/notice/actions/add-geodata.bro @@ -0,0 +1,47 @@ +##! This script adds geographic location data to notices for the "remote" +##! host in a connection. It does make the assumption that one of the +##! addresses in a connection is "local" and one is "remote" which is +##! probably a safe assumption to make in most cases. If both addresses +##! are remote, it will use the $src address. + +module Notice; + +export { + redef enum Action += { + ## Indicates that the notice should have geodata added for the + ## "remote" host. :bro:id:`Site::local_nets` must be defined + ## in order for this to work. + ACTION_ADD_GEODATA + }; + + redef record Info += { + ## If libGeoIP support is built in, notices can have geographic + ## information attached to them. + remote_location: geo_location &log &optional; + }; + + ## Notice types which should have the "remote" location looked up. + ## If GeoIP support is not built in, this does nothing. + const lookup_location_types: set[Notice::Type] = {} &redef; + + ## Add a helper to the notice policy for looking up GeoIP data. + redef Notice::policy += { + [$pred(n: Notice::Info) = { return (n$note in Notice::lookup_location_types); }, + $priority = 10], + }; +} + +# This is handled at a high priority in case other notice handlers +# want to use the data. +event notice(n: Notice::Info) &priority=10 + { + if ( ACTION_ADD_GEODATA in n$actions && + |Site::local_nets| > 0 && + ! n?$remote_location ) + { + if ( n?$src && ! Site::is_local_addr(n$src) ) + n$remote_location = lookup_location(n$src); + else if ( n?$dst && ! Site::is_local_addr(n$dst) ) + n$remote_location = lookup_location(n$dst); + } + } \ No newline at end of file diff --git a/scripts/base/init-bare.bro b/scripts/base/init-bare.bro index 45357fde77..7fe403fe08 100644 --- a/scripts/base/init-bare.bro +++ b/scripts/base/init-bare.bro @@ -519,7 +519,7 @@ const frag_timeout = 0.0 sec &redef; # packets and IP-level bytes transfered by each endpoint. If # true, these values are returned in the connection's endpoint # record val. -const use_conn_size_analyzer = F &redef; +const use_conn_size_analyzer = T &redef; const UDP_INACTIVE = 0; const UDP_ACTIVE = 1; # means we've seen something from this endpoint diff --git a/scripts/base/init-default.bro b/scripts/base/init-default.bro index 32201fddc0..1cf125c3ab 100644 --- a/scripts/base/init-default.bro +++ b/scripts/base/init-default.bro @@ -23,11 +23,11 @@ @load base/frameworks/signatures @load base/frameworks/packet-filter @load base/frameworks/software -@load base/frameworks/intel -@load base/frameworks/metrics @load base/frameworks/communication @load base/frameworks/control @load base/frameworks/cluster +@load base/frameworks/metrics +@load base/frameworks/intel @load base/frameworks/reporter @load base/protocols/conn diff --git a/scripts/base/protocols/http/main.bro b/scripts/base/protocols/http/main.bro index 172fb2bd1c..8cd80bde5f 100644 --- a/scripts/base/protocols/http/main.bro +++ b/scripts/base/protocols/http/main.bro @@ -214,7 +214,7 @@ event http_header(c: connection, is_orig: bool, name: string, value: string) &pr c$http$response_content_length = extract_count(value); else if ( name == "CONTENT-DISPOSITION" && /[fF][iI][lL][eE][nN][aA][mM][eE]/ in value ) - c$http$filename = sub(value, /^.*[fF][iI][lL][eE][nN][aA][mM][eE]=/, ""); + c$http$filename = extract_filename_from_content_disposition(value); } } diff --git a/scripts/base/protocols/mime/file-extract.bro b/scripts/base/protocols/mime/file-extract.bro index 33d2c70513..4e25f19d10 100644 --- a/scripts/base/protocols/mime/file-extract.bro +++ b/scripts/base/protocols/mime/file-extract.bro @@ -16,11 +16,13 @@ export { extract_file: bool &default=F; ## Store the file handle here for the file currently being extracted. - extraction_file: file &optional; - + extraction_file: file &log &optional; + }; + + redef record State += { ## Store a count of the number of files that have been transferred in ## this conversation to create unique file names on disk. - num_extracted_files: count &optional; + num_extracted_files: count &default=0; }; } @@ -34,7 +36,7 @@ event mime_segment_data(c: connection, length: count, data: string) &priority=3 { if ( c$mime$extract_file && c$mime$content_len == 0 ) { - local suffix = fmt("%d.dat", ++c$mime$num_extracted_files); + local suffix = fmt("%d.dat", ++c$mime_state$num_extracted_files); local fname = generate_extraction_filename(extraction_prefix, c, suffix); c$mime$extraction_file = open(fname); enable_raw_output(c$mime$extraction_file); diff --git a/scripts/base/protocols/smtp/__load__.bro b/scripts/base/protocols/smtp/__load__.bro index a10fe855df..b4f089eaf4 100644 --- a/scripts/base/protocols/smtp/__load__.bro +++ b/scripts/base/protocols/smtp/__load__.bro @@ -1 +1,3 @@ @load ./main +@load ./entities +@load ./entities-excerpt \ No newline at end of file diff --git a/scripts/base/protocols/smtp/entities-excerpt.bro b/scripts/base/protocols/smtp/entities-excerpt.bro new file mode 100644 index 0000000000..701ed76399 --- /dev/null +++ b/scripts/base/protocols/smtp/entities-excerpt.bro @@ -0,0 +1,52 @@ +##! This script is for optionally adding a body excerpt to the SMTP +##! entities log. + +@load ./entities + +module SMTP; + +export { + redef record SMTP::EntityInfo += { + ## The entity body excerpt. + excerpt: string &log &default=""; + + ## Internal tracking to know how much of the body should be included + ## in the excerpt. + excerpt_len: count &optional; + }; + + ## This is the default value for how much of the entity body should be + ## included for all MIME entities. + const default_entity_excerpt_len = 0 &redef; + + ## This table defines how much of various entity bodies should be + ## included in excerpts. + const entity_excerpt_len: table[string] of count = {} + &redef + &default = default_entity_excerpt_len; +} + +event mime_segment_data(c: connection, length: count, data: string) &priority=-1 + { + if ( ! c?$smtp ) return; + + if ( c$smtp$current_entity$content_len == 0 ) + c$smtp$current_entity$excerpt_len = entity_excerpt_len[c$smtp$current_entity$mime_type]; + } + +event mime_segment_data(c: connection, length: count, data: string) &priority=-2 + { + if ( ! c?$smtp ) return; + + local ent = c$smtp$current_entity; + if ( ent$content_len < ent$excerpt_len ) + { + if ( ent$content_len + length < ent$excerpt_len ) + ent$excerpt = cat(ent$excerpt, data); + else + { + local x_bytes = ent$excerpt_len - ent$content_len; + ent$excerpt = cat(ent$excerpt, sub_bytes(data, 1, x_bytes)); + } + } + } diff --git a/scripts/base/protocols/smtp/entities.bro b/scripts/base/protocols/smtp/entities.bro new file mode 100644 index 0000000000..4296488ec7 --- /dev/null +++ b/scripts/base/protocols/smtp/entities.bro @@ -0,0 +1,234 @@ +##! Analysis and logging for MIME entities found in SMTP sessions. + +@load base/utils/strings +@load base/utils/files +@load ./main + +module SMTP; + +export { + redef enum Notice::Type += { + ## Indicates that an MD5 sum was calculated for a MIME message. + MD5, + }; + + redef enum Log::ID += { SMTP_ENTITIES }; + + type EntityInfo: record { + ## This is the timestamp of when the MIME content transfer began. + ts: time &log; + uid: string &log; + id: conn_id &log; + ## Internally generated "message id" that ties back to the particular + ## message in the SMTP log where this entity was seen. + mid: string &log; + ## The filename seen in the Content-Disposition header. + filename: string &log &optional; + ## Track how many bytes of the MIME encoded file have been seen. + content_len: count &log &default=0; + ## The mime type of the entity discovered through magic bytes identification. + mime_type: string &log &optional; + + ## The calculated MD5 sum for the MIME entity. + md5: string &log &optional; + ## Optionally calculate the file's MD5 sum. Must be set prior to the + ## first data chunk being see in an event. + calc_md5: bool &default=F; + ## This boolean value indicates if an MD5 sum is being calculated + ## for the current file transfer. + calculating_md5: bool &default=F; + + ## Optionally write the file to disk. Must be set prior to first + ## data chunk being seen in an event. + extract_file: bool &default=F; + ## Store the file handle here for the file currently being extracted. + extraction_file: file &log &optional; + }; + + redef record Info += { + ## The in-progress entity information. + current_entity: EntityInfo &optional; + }; + + redef record State += { + ## Store a count of the number of files that have been transferred in + ## a conversation to create unique file names on disk. + num_extracted_files: count &default=0; + ## Track the number of MIME encoded files transferred during a session. + mime_level: count &default=0; + }; + + ## Generate MD5 sums for these filetypes. + const generate_md5 = /application\/x-dosexec/ # Windows and DOS executables + | /application\/x-executable/ # *NIX executable binary + &redef; + + ## Pattern of file mime types to extract from MIME bodies. + const extract_file_types = /NO_DEFAULT/ &redef; + + ## The on-disk prefix for files to be extracted from MIME entity bodies. + const extraction_prefix = "smtp-entity" &redef; + + global log_mime: event(rec: EntityInfo); +} + +event bro_init() &priority=5 + { + Log::create_stream(SMTP_ENTITIES, [$columns=EntityInfo, $ev=log_mime]); + } + +function set_session(c: connection, new_entity: bool) + { + if ( ! c$smtp?$current_entity || new_entity ) + { + local info: EntityInfo; + info$ts=network_time(); + info$uid=c$uid; + info$id=c$id; + info$mid=c$smtp$mid; + + c$smtp$current_entity = info; + ++c$smtp_state$mime_level; + } + } + +event mime_begin_entity(c: connection) &priority=10 + { + if ( ! c?$smtp ) return; + + set_session(c, T); + } + +# This has priority -10 because other handlers need to know the current +# content_len before it's updated by this handler. +event mime_segment_data(c: connection, length: count, data: string) &priority=-10 + { + if ( ! c?$smtp ) return; + + c$smtp$current_entity$content_len = c$smtp$current_entity$content_len + length; + } + +event mime_segment_data(c: connection, length: count, data: string) &priority=7 + { + if ( ! c?$smtp ) return; + if ( c$smtp$current_entity$content_len == 0 ) + c$smtp$current_entity$mime_type = split1(identify_data(data, T), /;/)[1]; + } + +event mime_segment_data(c: connection, length: count, data: string) &priority=-5 + { + if ( ! c?$smtp ) return; + + if ( c$smtp$current_entity$content_len == 0 ) + { + if ( generate_md5 in c$smtp$current_entity$mime_type ) + c$smtp$current_entity$calc_md5 = T; + + if ( c$smtp$current_entity$calc_md5 ) + { + c$smtp$current_entity$calculating_md5 = T; + md5_hash_init(c$id); + } + } + + if ( c$smtp$current_entity$calculating_md5 ) + md5_hash_update(c$id, data); +} + +## In the event of a content gap during the MIME transfer, detect the state for +## the MD5 sum calculation and stop calculating the MD5 since it would be +## incorrect anyway. +event content_gap(c: connection, is_orig: bool, seq: count, length: count) &priority=5 + { + if ( is_orig || ! c?$smtp || ! c$smtp?$current_entity ) return; + + if ( c$smtp$current_entity$calculating_md5 ) + { + c$smtp$current_entity$calculating_md5 = F; + md5_hash_finish(c$id); + } + } + +event mime_end_entity(c: connection) &priority=-3 + { + # TODO: this check is only due to a bug in mime_end_entity that + # causes the event to be generated twice for the same real event. + if ( ! c?$smtp || ! c$smtp?$current_entity ) + return; + + if ( c$smtp$current_entity$calculating_md5 ) + { + c$smtp$current_entity$md5 = md5_hash_finish(c$id); + + NOTICE([$note=MD5, $msg=fmt("Calculated a hash for a MIME entity from %s", c$id$orig_h), + $sub=c$smtp$current_entity$md5, $conn=c]); + } + } + +event mime_one_header(c: connection, h: mime_header_rec) + { + if ( ! c?$smtp ) return; + + if ( h$name == "CONTENT-DISPOSITION" && + /[fF][iI][lL][eE][nN][aA][mM][eE]/ in h$value ) + c$smtp$current_entity$filename = extract_filename_from_content_disposition(h$value); + } + +event mime_end_entity(c: connection) &priority=-5 + { + if ( ! c?$smtp ) return; + + # This check and the delete below are just to cope with a bug where + # mime_end_entity can be generated multiple times for the same event. + if ( ! c$smtp?$current_entity ) + return; + + # Only log is there was some content. + if ( c$smtp$current_entity$content_len > 0 ) + Log::write(SMTP_ENTITIES, c$smtp$current_entity); + + delete c$smtp$current_entity; + } + +event mime_segment_data(c: connection, length: count, data: string) &priority=5 + { + if ( ! c?$smtp ) return; + + if ( extract_file_types in c$smtp$current_entity$mime_type ) + c$smtp$current_entity$extract_file = T; + } + +event mime_segment_data(c: connection, length: count, data: string) &priority=3 + { + if ( ! c?$smtp ) return; + + if ( c$smtp$current_entity$extract_file && + c$smtp$current_entity$content_len == 0 ) + { + local suffix = fmt("%d.dat", ++c$smtp_state$num_extracted_files); + local fname = generate_extraction_filename(extraction_prefix, c, suffix); + c$smtp$current_entity$extraction_file = open(fname); + enable_raw_output(c$smtp$current_entity$extraction_file); + } + } + +event mime_segment_data(c: connection, length: count, data: string) &priority=-5 + { + if ( ! c?$smtp ) return; + + if ( c$smtp$current_entity$extract_file && c$smtp$current_entity?$extraction_file ) + print c$smtp$current_entity$extraction_file, data; + } + +event mime_end_entity(c: connection) &priority=-3 + { + if ( ! c?$smtp ) return; + + # TODO: this check is only due to a bug in mime_end_entity that + # causes the event to be generated twice for the same real event. + if ( ! c$smtp?$current_entity ) + return; + + if ( c$smtp$current_entity?$extraction_file ) + close(c$smtp$current_entity$extraction_file); + } diff --git a/scripts/base/protocols/smtp/main.bro b/scripts/base/protocols/smtp/main.bro index e034a459d4..02b282894c 100644 --- a/scripts/base/protocols/smtp/main.bro +++ b/scripts/base/protocols/smtp/main.bro @@ -4,17 +4,14 @@ module SMTP; export { redef enum Log::ID += { SMTP }; - redef enum Notice::Type += { - ## Indicates that the server sent a reply mentioning an SMTP block list. - BL_Error_Message, - ## Indicates the client's address is seen in the block list error message. - BL_Blocked_Host, - }; - type Info: record { ts: time &log; uid: string &log; id: conn_id &log; + ## This is an internally generated "message id" that can be used to + ## map between SMTP messages and MIME entities in the SMTP entities + ## log. + mid: string &log; helo: string &log &optional; mailfrom: string &log &optional; rcptto: set[string] &log &optional; @@ -30,19 +27,13 @@ export { second_received: string &log &optional; ## The last message the server sent to the client. last_reply: string &log &optional; - files: set[string] &log &optional; path: vector of addr &log &optional; user_agent: string &log &optional; - ## Indicate if this session is currently transmitting SMTP message - ## envelope headers. - in_headers: bool &default=F; ## Indicate if the "Received: from" headers should still be processed. process_received_from: bool &default=T; - ## Maintain the current header for cases where there is header wrapping. - current_header: string &default=""; - ## Indicate when the message is logged and no longer applicable. - done: bool &default=F; + ## Indicates if client activity has been seen, but not yet logged + has_client_activity: bool &default=F; }; type State: record { @@ -61,26 +52,7 @@ export { ## ALL_HOSTS - always capture the entire path. ## NO_HOSTS - never capture the path. const mail_path_capture = ALL_HOSTS &redef; - - # This matches content in SMTP error messages that indicate some - # block list doesn't like the connection/mail. - const bl_error_messages = - /spamhaus\.org\// - | /sophos\.com\/security\// - | /spamcop\.net\/bl/ - | /cbl\.abuseat\.org\// - | /sorbs\.net\// - | /bsn\.borderware\.com\// - | /mail-abuse\.com\// - | /b\.barracudacentral\.com\// - | /psbl\.surriel\.com\// - | /antispam\.imp\.ch\// - | /dyndns\.com\/.*spam/ - | /rbl\.knology\.net\// - | /intercept\.datapacket\.net\// - | /uceprotect\.net\// - | /hostkarma\.junkemailfilter\.com\// &redef; - + global log_smtp: event(rec: Info); ## Configure the default ports for SMTP analysis. @@ -121,6 +93,7 @@ function new_smtp_log(c: connection): Info l$ts=network_time(); l$uid=c$uid; l$id=c$id; + l$mid=unique_id("@"); if ( c?$smtp_state && c$smtp_state?$helo ) l$helo = c$smtp_state$helo; @@ -136,26 +109,23 @@ function set_smtp_session(c: connection) if ( ! c?$smtp_state ) c$smtp_state = []; - if ( ! c?$smtp || c$smtp$done ) - { + if ( ! c?$smtp ) c$smtp = new_smtp_log(c); - } } - function smtp_message(c: connection) { - Log::write(SMTP, c$smtp); - - c$smtp$done = T; - # Track the number of messages seen in this session. - ++c$smtp_state$messages_transferred; + if ( c$smtp$has_client_activity ) + Log::write(SMTP, c$smtp); } event smtp_request(c: connection, is_orig: bool, command: string, arg: string) &priority=5 { set_smtp_session(c); local upper_command = to_upper(command); + + if ( upper_command != "QUIT" ) + c$smtp$has_client_activity = T; if ( upper_command == "HELO" || upper_command == "EHLO" ) { @@ -172,26 +142,11 @@ event smtp_request(c: connection, is_orig: bool, command: string, arg: string) & else if ( upper_command == "MAIL" && /^[fF][rR][oO][mM]:/ in arg ) { - # In case this is not the first message in a session we want to - # essentially write out a log, clear the session tracking, and begin - # new session tracking. - if ( c$smtp_state$messages_transferred > 0 ) - { - smtp_message(c); - set_smtp_session(c); - } - local partially_done = split1(arg, /:[[:blank:]]*/)[2]; c$smtp$mailfrom = split1(partially_done, /[[:blank:]]?/)[1]; } - - else if ( upper_command == "DATA" ) - { - c$smtp$in_headers = T; - } } - event smtp_reply(c: connection, is_orig: bool, code: count, cmd: string, msg: string, cont_resp: bool) &priority=5 { @@ -199,169 +154,98 @@ event smtp_reply(c: connection, is_orig: bool, code: count, cmd: string, # This continually overwrites, but we want the last reply, # so this actually works fine. - if ( code != 421 && code >= 400 ) + c$smtp$last_reply = fmt("%d %s", code, msg); + } + +event smtp_reply(c: connection, is_orig: bool, code: count, cmd: string, + msg: string, cont_resp: bool) &priority=-5 + { + set_smtp_session(c); + if ( cmd == "." ) { - c$smtp$last_reply = fmt("%d %s", code, msg); - - # Raise a notice when an SMTP error about a block list is discovered. - if ( bl_error_messages in msg ) - { - local note = BL_Error_Message; - local message = fmt("%s received an error message mentioning an SMTP block list", c$id$orig_h); - - # Determine if the originator's IP address is in the message. - local ips = find_ip_addresses(msg); - local text_ip = ""; - if ( |ips| > 0 && to_addr(ips[0]) == c$id$orig_h ) - { - note = BL_Blocked_Host; - message = fmt("%s is on an SMTP block list", c$id$orig_h); - } - - NOTICE([$note=note, $conn=c, $msg=message, $sub=msg]); - } + # Track the number of messages seen in this session. + ++c$smtp_state$messages_transferred; + smtp_message(c); + c$smtp = new_smtp_log(c); } } -event smtp_data(c: connection, is_orig: bool, data: string) &priority=5 +event mime_one_header(c: connection, h: mime_header_rec) &priority=5 { - # Is there something we should be handling from the server? - if ( ! is_orig ) return; - - set_smtp_session(c); - - if ( ! c$smtp$in_headers ) - { - if ( /^[cC][oO][nN][tT][eE][nN][tT]-[dD][iI][sS].*[fF][iI][lL][eE][nN][aA][mM][eE]/ in data ) - { - if ( ! c$smtp?$files ) - c$smtp$files = set(); - data = sub(data, /^.*[fF][iI][lL][eE][nN][aA][mM][eE]=/, ""); - add c$smtp$files[data]; - } - return; - } + if ( ! c?$smtp ) return; + c$smtp$has_client_activity = T; - if ( /^[[:blank:]]*$/ in data ) - c$smtp$in_headers = F; + if ( h$name == "MESSAGE-ID" ) + c$smtp$msg_id = h$value; - # This is to reconstruct headers that tend to wrap around. - if ( /^[[:blank:]]/ in data ) - { - # Remove all but a single space at the beginning (this seems to follow - # the most common behavior). - data = sub(data, /^[[:blank:]]*/, " "); - if ( c$smtp$current_header == "MESSAGE-ID" ) - c$smtp$msg_id += data; - else if ( c$smtp$current_header == "RECEIVED" ) - c$smtp$first_received += data; - else if ( c$smtp$current_header == "IN-REPLY-TO" ) - c$smtp$in_reply_to += data; - else if ( c$smtp$current_header == "SUBJECCT" ) - c$smtp$subject += data; - else if ( c$smtp$current_header == "FROM" ) - c$smtp$from += data; - else if ( c$smtp$current_header == "REPLY-TO" ) - c$smtp$reply_to += data; - else if ( c$smtp$current_header == "USER-AGENT" ) - c$smtp$user_agent += data; - return; - } - # Once there isn't a line starting with a blank, we're not continuing a - # header anymore. - c$smtp$current_header = ""; - - local header_parts = split1(data, /:[[:blank:]]*/); - # TODO: do something in this case? This would definitely be odd. - # Header wrapping needs to be handled more elegantly. This will happen - # if the header value is wrapped immediately after the header key. - if ( |header_parts| != 2 ) - return; - - local header_key = to_upper(header_parts[1]); - c$smtp$current_header = header_key; - - local header_val = header_parts[2]; - - if ( header_key == "MESSAGE-ID" ) - c$smtp$msg_id = header_val; - - else if ( header_key == "RECEIVED" ) + else if ( h$name == "RECEIVED" ) { if ( c$smtp?$first_received ) c$smtp$second_received = c$smtp$first_received; - c$smtp$first_received = header_val; + c$smtp$first_received = h$value; } - - else if ( header_key == "IN-REPLY-TO" ) - c$smtp$in_reply_to = header_val; - - else if ( header_key == "DATE" ) - c$smtp$date = header_val; - - else if ( header_key == "FROM" ) - c$smtp$from = header_val; - - else if ( header_key == "TO" ) + + else if ( h$name == "IN-REPLY-TO" ) + c$smtp$in_reply_to = h$value; + + else if ( h$name == "SUBJECT" ) + c$smtp$subject = h$value; + + else if ( h$name == "FROM" ) + c$smtp$from = h$value; + + else if ( h$name == "REPLY-TO" ) + c$smtp$reply_to = h$value; + + else if ( h$name == "DATE" ) + c$smtp$date = h$value; + + else if ( h$name == "TO" ) { if ( ! c$smtp?$to ) c$smtp$to = set(); - add c$smtp$to[header_val]; + add c$smtp$to[h$value]; } - - else if ( header_key == "REPLY-TO" ) - c$smtp$reply_to = header_val; - - else if ( header_key == "SUBJECT" ) - c$smtp$subject = header_val; - else if ( header_key == "X-ORIGINATING-IP" ) + else if ( h$name == "X-ORIGINATING-IP" ) { - local addresses = find_ip_addresses(header_val); + local addresses = find_ip_addresses(h$value); if ( 1 in addresses ) c$smtp$x_originating_ip = to_addr(addresses[1]); } - else if ( header_key == "X-MAILER" || - header_key == "USER-AGENT" || - header_key == "X-USER-AGENT" ) - { - c$smtp$user_agent = header_val; - # Explicitly set the current header here because there are several - # headers bulked under this same key. - c$smtp$current_header = "USER-AGENT"; - } + else if ( h$name == "X-MAILER" || + h$name == "USER-AGENT" || + h$name == "X-USER-AGENT" ) + c$smtp$user_agent = h$value; } # This event handler builds the "Received From" path by reading the # headers in the mail -event smtp_data(c: connection, is_orig: bool, data: string) &priority=3 +event mime_one_header(c: connection, h: mime_header_rec) &priority=3 { # If we've decided that we're done watching the received headers for # whatever reason, we're done. Could be due to only watching until # local addresses are seen in the received from headers. - if ( c$smtp$current_header != "RECEIVED" || - ! c$smtp$process_received_from ) + if ( ! c?$smtp || h$name != "RECEIVED" || ! c$smtp$process_received_from ) return; - - local text_ip = find_address_in_smtp_header(data); + + local text_ip = find_address_in_smtp_header(h$value); if ( text_ip == "" ) return; local ip = to_addr(text_ip); - + if ( ! addr_matches_host(ip, mail_path_capture) && ! Site::is_private_addr(ip) ) { c$smtp$process_received_from = F; } - if ( c$smtp$path[|c$smtp$path|-1] != ip ) c$smtp$path[|c$smtp$path|] = ip; } - event connection_state_remove(c: connection) &priority=-5 { - if ( c?$smtp && ! c$smtp$done ) + if ( c?$smtp ) smtp_message(c); } diff --git a/scripts/base/protocols/ssh/main.bro b/scripts/base/protocols/ssh/main.bro index 7cc87b6684..5233c6da97 100644 --- a/scripts/base/protocols/ssh/main.bro +++ b/scripts/base/protocols/ssh/main.bro @@ -1,74 +1,58 @@ +##! Base SSH analysis script. The heuristic to blindly determine success or +##! failure for SSH connections is implemented here. At this time, it only +##! uses the size of the data being returned from the server to make the +##! heuristic determination about success of the connection. +##! Requires that :bro:id:`use_conn_size_analyzer` is set to T! The heuristic +##! is not attempted if the connection size analyzer isn't enabled. module SSH; export { redef enum Log::ID += { SSH }; - redef enum Notice::Type += { - Login, - Password_Guessing, - Login_By_Password_Guesser, - Login_From_Interesting_Hostname, - Bytecount_Inconsistency, - }; - type Info: record { ts: time &log; uid: string &log; id: conn_id &log; + ## Indicates if the login was heuristically guessed to be "success" + ## or "failure". status: string &log &optional; - direction: string &log &optional; - remote_location: geo_location &log &optional; + ## Direction of the connection. If the client was a local host + ## logging into an external host, this would be OUTBOUD. INBOUND + ## would be set for the opposite situation. + # TODO: handle local-local and remote-remote better. + direction: Direction &log &optional; + ## The software string given by the client. client: string &log &optional; + ## The software string given by the server. server: string &log &optional; + ## The amount of data returned from the server. This is currently + ## the only measure of the success heuristic and it is logged to + ## assist analysts looking at the logs to make their own determination + ## about the success on a case-by-case basis. resp_size: count &log &default=0; ## Indicate if the SSH session is done being watched. done: bool &default=F; }; - - const password_guesses_limit = 30 &redef; - # The size in bytes at which the SSH connection is presumed to be - # successful. + ## The size in bytes at which the SSH connection is presumed to be + ## successful. const authentication_data_size = 5500 &redef; - # The amount of time to remember presumed non-successful logins to build - # model of a password guesser. - const guessing_timeout = 30 mins &redef; - - # The set of countries for which you'd like to throw notices upon successful login - # requires Bro compiled with libGeoIP support - const watched_countries: set[string] = {"RO"} &redef; - - # Strange/bad host names to originate successful SSH logins - const interesting_hostnames = - /^d?ns[0-9]*\./ | - /^smtp[0-9]*\./ | - /^mail[0-9]*\./ | - /^pop[0-9]*\./ | - /^imap[0-9]*\./ | - /^www[0-9]*\./ | - /^ftp[0-9]*\./ &redef; - - # This is a table with orig subnet as the key, and subnet as the value. - const ignore_guessers: table[subnet] of subnet &redef; - - # If true, we tell the event engine to not look at further data - # packets after the initial SSH handshake. Helps with performance - # (especially with large file transfers) but precludes some - # kinds of analyses (e.g., tracking connection size). + ## If true, we tell the event engine to not look at further data + ## packets after the initial SSH handshake. Helps with performance + ## (especially with large file transfers) but precludes some + ## kinds of analyses (e.g., tracking connection size). const skip_processing_after_detection = F &redef; - # Keeps count of how many rejections a host has had - global password_rejections: table[addr] of TrackCount - &write_expire=guessing_timeout - &synchronized; - - # Keeps track of hosts identified as guessing passwords - # TODO: guessing_timeout doesn't work correctly here. If a user redefs - # the variable, it won't take effect. - global password_guessers: set[addr] &read_expire=guessing_timeout+1hr &synchronized; + ## This event is generated when the heuristic thinks that a login + ## was successful. + global heuristic_successful_login: event(c: connection); + + ## This event is generated when the heuristic thinks that a login + ## failed. + global heuristic_failed_login: event(c: connection); global log_ssh: event(rec: Info); } @@ -106,116 +90,51 @@ function check_ssh_connection(c: connection, done: bool) # If this is still a live connection and the byte count has not # crossed the threshold, just return and let the resheduled check happen later. - if ( !done && c$resp$size < authentication_data_size ) + if ( !done && c$resp$num_bytes_ip < authentication_data_size ) return; # Make sure the server has sent back more than 50 bytes to filter out # hosts that are just port scanning. Nothing is ever logged if the server # doesn't send back at least 50 bytes. - if ( c$resp$size < 50 ) + if ( c$resp$num_bytes_ip < 50 ) return; - local status = "failure"; - local direction = Site::is_local_addr(c$id$orig_h) ? "to" : "from"; - local location: geo_location; - location = (direction == "to") ? lookup_location(c$id$resp_h) : lookup_location(c$id$orig_h); + c$ssh$direction = Site::is_local_addr(c$id$orig_h) ? OUTBOUND : INBOUND; + c$ssh$resp_size = c$resp$num_bytes_ip; - if ( done && c$resp$size < authentication_data_size ) + if ( c$resp$num_bytes_ip < authentication_data_size ) { - # presumed failure - if ( c$id$orig_h !in password_rejections ) - password_rejections[c$id$orig_h] = new_track_count(); - - # Track the number of rejections - if ( !(c$id$orig_h in ignore_guessers && - c$id$resp_h in ignore_guessers[c$id$orig_h]) ) - ++password_rejections[c$id$orig_h]$n; - - if ( default_check_threshold(password_rejections[c$id$orig_h]) ) - { - add password_guessers[c$id$orig_h]; - NOTICE([$note=Password_Guessing, - $conn=c, - $msg=fmt("SSH password guessing by %s", c$id$orig_h), - $sub=fmt("%d failed logins", password_rejections[c$id$orig_h]$n), - $n=password_rejections[c$id$orig_h]$n]); - } - } - # TODO: This is to work around a quasi-bug in Bro which occasionally - # causes the byte count to be oversized. - # Watch for Gregors work that adds an actual counter of bytes transferred. - else if ( c$resp$size < 20000000 ) + c$ssh$status = "failure"; + event SSH::heuristic_failed_login(c); + } + else { # presumed successful login - status = "success"; - c$ssh$done = T; - - if ( c$id$orig_h in password_rejections && - password_rejections[c$id$orig_h]$n > password_guesses_limit && - c$id$orig_h !in password_guessers ) - { - add password_guessers[c$id$orig_h]; - NOTICE([$note=Login_By_Password_Guesser, - $conn=c, - $n=password_rejections[c$id$orig_h]$n, - $msg=fmt("Successful SSH login by password guesser %s", c$id$orig_h), - $sub=fmt("%d failed logins", password_rejections[c$id$orig_h]$n)]); - } - - local message = fmt("SSH login %s %s \"%s\" \"%s\" %f %f %s (triggered with %d bytes)", - direction, location$country_code, location$region, location$city, - location$latitude, location$longitude, - id_string(c$id), c$resp$size); - NOTICE([$note=Login, - $conn=c, - $msg=message, - $sub=location$country_code]); - - # Check to see if this login came from an interesting hostname - when ( local hostname = lookup_addr(c$id$orig_h) ) - { - if ( interesting_hostnames in hostname ) - { - NOTICE([$note=Login_From_Interesting_Hostname, - $conn=c, - $msg=fmt("Strange login from %s", hostname), - $sub=hostname]); - } - } - - if ( location$country_code in watched_countries ) - { - - } - + c$ssh$status = "success"; + event SSH::heuristic_successful_login(c); } - else if ( c$resp$size >= 200000000 ) - { - NOTICE([$note=Bytecount_Inconsistency, - $conn=c, - $msg="During byte counting in SSH analysis, an overly large value was seen.", - $sub=fmt("%d",c$resp$size)]); - } - - c$ssh$remote_location = location; - c$ssh$status = status; - c$ssh$direction = direction; - c$ssh$resp_size = c$resp$size; - - Log::write(SSH, c$ssh); # Set the "done" flag to prevent the watching event from rescheduling # after detection is done. - c$ssh$done; + c$ssh$done=T; - # Stop watching this connection, we don't care about it anymore. if ( skip_processing_after_detection ) { + # Stop watching this connection, we don't care about it anymore. skip_further_processing(c$id); set_record_packets(c$id, F); } } +event SSH::heuristic_successful_login(c: connection) &priority=-5 + { + Log::write(SSH, c$ssh); + } +event SSH::heuristic_failed_login(c: connection) &priority=-5 + { + Log::write(SSH, c$ssh); + } + event connection_state_remove(c: connection) &priority=-5 { if ( c?$ssh ) @@ -226,7 +145,7 @@ event ssh_watcher(c: connection) { local id = c$id; # don't go any further if this connection is gone already! - if ( !connection_exists(id) ) + if ( ! connection_exists(id) ) return; check_ssh_connection(c, F); @@ -244,5 +163,9 @@ event ssh_client_version(c: connection, version: string) &priority=5 { set_session(c); c$ssh$client = version; - schedule +15secs { ssh_watcher(c) }; + + # The heuristic detection for SSH relies on the ConnSize analyzer. + # Don't do the heuristics if it's disabled. + if ( use_conn_size_analyzer ) + schedule +15secs { ssh_watcher(c) }; } diff --git a/scripts/base/utils/files.bro b/scripts/base/utils/files.bro index 429835c430..8111245c24 100644 --- a/scripts/base/utils/files.bro +++ b/scripts/base/utils/files.bro @@ -12,4 +12,15 @@ function generate_extraction_filename(prefix: string, c: connection, suffix: str conn_info = fmt("%s_%s", conn_info, suffix); return conn_info; - } \ No newline at end of file + } + +## For CONTENT-DISPOSITION headers, this function can be used to extract +## the filename. +function extract_filename_from_content_disposition(data: string): string + { + local filename = sub(data, /^.*[fF][iI][lL][eE][nN][aA][mM][eE]=/, ""); + # Remove quotes around the filename if they are there. + if ( /^\"/ in filename ) + filename = split_n(filename, /\"/, F, 2)[2]; + return filename; + } diff --git a/scripts/base/utils/site.bro b/scripts/base/utils/site.bro index b8414a7a84..536c891572 100644 --- a/scripts/base/utils/site.bro +++ b/scripts/base/utils/site.bro @@ -16,6 +16,12 @@ export { ## Networks that are considered "local". const local_nets: set[subnet] &redef; + + ## This is used for retrieving the subnet when you multiple + ## :bro:id:`local_nets`. A membership query can be done with an + ## :bro:type:`addr` and the table will yield the subnet it was found + ## within. + global local_nets_table: table[subnet] of subnet = {}; ## Networks that are considered "neighbors". const neighbor_nets: set[subnet] &redef; @@ -138,4 +144,9 @@ event bro_init() &priority=10 # Double backslashes are needed due to string parsing. local_dns_suffix_regex = set_to_regex(local_zones, "(^\\.?|\\.)(~~)$"); local_dns_neighbor_suffix_regex = set_to_regex(neighbor_zones, "(^\\.?|\\.)(~~)$"); + + # Create the local_nets mapping table. + for ( cidr in Site::local_nets ) + local_nets_table[cidr] = cidr; + } diff --git a/scripts/policy/frameworks/communication/listen-clear.bro b/scripts/policy/frameworks/communication/listen-clear.bro index c5dd6e1e70..ea94fe262a 100644 --- a/scripts/policy/frameworks/communication/listen-clear.bro +++ b/scripts/policy/frameworks/communication/listen-clear.bro @@ -15,5 +15,6 @@ export { event bro_init() &priority=-10 { + enable_communication(); listen(listen_if_clear, listen_port_clear, F); } diff --git a/scripts/policy/frameworks/communication/listen-ssl.bro b/scripts/policy/frameworks/communication/listen-ssl.bro index 188d55ea5f..b228289be2 100644 --- a/scripts/policy/frameworks/communication/listen-ssl.bro +++ b/scripts/policy/frameworks/communication/listen-ssl.bro @@ -16,5 +16,6 @@ export { event bro_init() &priority=-10 { + enable_communication(); listen(listen_if_ssl, listen_port_ssl, T); } diff --git a/scripts/policy/frameworks/metrics/conn-example.bro b/scripts/policy/frameworks/metrics/conn-example.bro new file mode 100644 index 0000000000..61360496cf --- /dev/null +++ b/scripts/policy/frameworks/metrics/conn-example.bro @@ -0,0 +1,20 @@ + +redef enum Metrics::ID += { + CONNS_ORIGINATED, + CONNS_RESPONDED +}; + +event bro_init() + { + Metrics::add_filter(CONNS_ORIGINATED, [$aggregation_mask=24, $break_interval=1mins]); + + # Site::local_nets must be defined in order for this to actually do anything. + Metrics::add_filter(CONNS_RESPONDED, [$aggregation_table=Site::local_nets_table, $break_interval=1mins]); + } + +event connection_established(c: connection) + { + Metrics::add_data(CONNS_ORIGINATED, [$host=c$id$orig_h], 1); + Metrics::add_data(CONNS_RESPONDED, [$host=c$id$resp_h], 1); + } + \ No newline at end of file diff --git a/scripts/policy/frameworks/metrics/http-example.bro b/scripts/policy/frameworks/metrics/http-example.bro new file mode 100644 index 0000000000..94592a852f --- /dev/null +++ b/scripts/policy/frameworks/metrics/http-example.bro @@ -0,0 +1,26 @@ + +redef enum Metrics::ID += { + HTTP_REQUESTS_BY_STATUS_CODE, + HTTP_REQUESTS_BY_HOST_HEADER, +}; + +event bro_init() + { + # TODO: these are waiting on a fix with table vals + records before they will work. + #Metrics::add_filter(HTTP_REQUESTS_BY_HOST_HEADER, + # [$pred(index: Index) = { return Site:is_local_addr(index$host) }, + # $aggregation_mask=24, + # $break_interval=5mins]); + # + ## Site::local_nets must be defined in order for this to actually do anything. + #Metrics::add_filter(HTTP_REQUESTS_BY_STATUS_CODE, [$aggregation_table=Site::local_nets_table, + # $break_interval=5mins]); + } + +event HTTP::log_http(rec: HTTP::Info) + { + if ( rec?$host ) + Metrics::add_data(HTTP_REQUESTS_BY_HOST_HEADER, [$str=rec$host]); + if ( rec?$status_code ) + Metrics::add_data(HTTP_REQUESTS_BY_STATUS_CODE, [$host=rec$id$orig_h, $str=fmt("%d", rec$status_code)]); + } \ No newline at end of file diff --git a/scripts/policy/frameworks/metrics/ssl-example.bro b/scripts/policy/frameworks/metrics/ssl-example.bro new file mode 100644 index 0000000000..f3c5b8b902 --- /dev/null +++ b/scripts/policy/frameworks/metrics/ssl-example.bro @@ -0,0 +1,22 @@ + + +redef enum Metrics::ID += { + SSL_SERVERNAME, +}; + +event bro_init() + { + Metrics::add_filter(SSL_SERVERNAME, + [$name="no-google-ssl-servers", + $pred(index: Metrics::Index) = { + return (/google\.com$/ !in index$str); + }, + $break_interval=10secs + ]); + } + +event SSL::log_ssl(rec: SSL::Info) + { + if ( rec?$server_name ) + Metrics::add_data(SSL_SERVERNAME, [$str=rec$server_name], 1); + } \ No newline at end of file diff --git a/scripts/policy/misc/profiling.bro b/scripts/policy/misc/profiling.bro new file mode 100644 index 0000000000..457675b1d6 --- /dev/null +++ b/scripts/policy/misc/profiling.bro @@ -0,0 +1,19 @@ +##! Turns on profiling of Bro resource consumption. + +module Profiling; + +redef profiling_file = open_log_file("prof"); + +export { + ## Cheap profiling every 15 seconds. + redef profiling_interval = 15 secs &redef; +} + +# Expensive profiling every 5 minutes. +redef expensive_profiling_multiple = 20; + +event bro_init() + { + set_buf(profiling_file, F); + } + diff --git a/scripts/policy/protocols/http/detect-MHR.bro b/scripts/policy/protocols/http/detect-MHR.bro index 11e1d9f87e..fd54a62aeb 100644 --- a/scripts/policy/protocols/http/detect-MHR.bro +++ b/scripts/policy/protocols/http/detect-MHR.bro @@ -1,7 +1,7 @@ ##! This script takes MD5 sums of files transferred over HTTP and checks them with ##! Team Cymru's Malware Hash Registry (http://www.team-cymru.org/Services/MHR/). ##! By default, not all file transfers will have MD5 sums calculated. Read the -##! documentation for the protocols/http/file-hash.bro script to see how to +##! documentation for the base/protocols/http/file-hash.bro script to see how to ##! configure which transfers will have hashes calculated. export { diff --git a/scripts/policy/protocols/http/detect-sqli.bro b/scripts/policy/protocols/http/detect-sqli.bro index 514c599e7e..45a2bdb205 100644 --- a/scripts/policy/protocols/http/detect-sqli.bro +++ b/scripts/policy/protocols/http/detect-sqli.bro @@ -9,17 +9,17 @@ export { }; redef enum Metrics::ID += { - SQL_ATTACKER, - SQL_ATTACK_AGAINST, + SQL_ATTACKS, + SQL_ATTACKS_AGAINST, }; redef enum Tags += { ## Indicator of a URI based SQL injection attack. URI_SQLI, ## Indicator of client body based SQL injection attack. This is - ## typically the body content of a POST request. Not implemented yet! + ## typically the body content of a POST request. Not implemented yet. POST_SQLI, - ## Indicator of a cookie based SQL injection attack. Not implemented yet! + ## Indicator of a cookie based SQL injection attack. Not implemented yet. COOKIE_SQLI, }; @@ -30,13 +30,18 @@ export { | /[\?&][^[:blank:]\x00-\x37]+?=[\-0-9%]*([[:blank:]\x00-\x37]|\/\*.*?\*\/)*['"]([[:blank:]\x00-\x37]|\/\*.*?\*\/)*(-|=|\+|\|\|)([[:blank:]\x00-\x37]|\/\*.*?\*\/)*([0-9]|\(?[cC][oO][nN][vV][eE][rR][tT]|[cC][aA][sS][tT])/ | /[\?&][^[:blank:]\x00-\x37\|]+?=([[:blank:]\x00-\x37]|\/\*.*?\*\/)*['"]([[:blank:]\x00-\x37]|\/\*.*?\*\/|;)*([xX]?[oO][rR]|[nN]?[aA][nN][dD]|[hH][aA][vV][iI][nN][gG]|[uU][nN][iI][oO][nN]|[eE][xX][eE][cC]|[sS][eE][lL][eE][cC][tT]|[dD][eE][lL][eE][tT][eE]|[dD][rR][oO][pP]|[dD][eE][cC][lL][aA][rR][eE]|[cC][rR][eE][aA][tT][eE]|[rR][eE][gG][eE][xX][pP]|[iI][nN][sS][eE][rR][tT])([[:blank:]\x00-\x37]|\/\*.*?\*\/|[\[(])+[a-zA-Z&]{2,}/ | /[\?&][^[:blank:]\x00-\x37]+?=[^\.]*?([cC][hH][aA][rR]|[aA][sS][cC][iI][iI]|[sS][uU][bB][sS][tT][rR][iI][nN][gG]|[tT][rR][uU][nN][cC][aA][tT][eE]|[vV][eE][rR][sS][iI][oO][nN]|[lL][eE][nN][gG][tT][hH])\(/ - | /\/\*![[:digit:]]{5}.*?\*\//; + | /\/\*![[:digit:]]{5}.*?\*\// &redef; } event bro_init() { - Metrics::add_filter(SQL_ATTACKER, [$break_interval=5mins, $note=SQL_Injection_Attack]); - Metrics::add_filter(SQL_ATTACK_AGAINST, [$break_interval=5mins, $note=SQL_Injection_Attack]); + Metrics::add_filter(SQL_ATTACKS, [$log=T, + $break_interval=1mins, + $note=SQL_Injection_Attacker]); + Metrics::add_filter(SQL_ATTACKS_AGAINST, [$log=T, + $break_interval=1mins, + $note=SQL_Injection_Attack, + $notice_thresholds=vector(10,100)]); } event http_request(c: connection, method: string, original_URI: string, @@ -46,7 +51,7 @@ event http_request(c: connection, method: string, original_URI: string, { add c$http$tags[URI_SQLI]; - Metrics::add_data(SQL_ATTACKER, [$host=c$id$orig_h], 1); - Metrics::add_data(SQL_ATTACK_AGAINST, [$host=c$id$resp_h], 1); + Metrics::add_data(SQL_ATTACKS, [$host=c$id$orig_h]); + Metrics::add_data(SQL_ATTACKS_AGAINST, [$host=c$id$resp_h]); } } \ No newline at end of file diff --git a/scripts/policy/protocols/smtp/blocklists.bro b/scripts/policy/protocols/smtp/blocklists.bro new file mode 100644 index 0000000000..a3e75318bb --- /dev/null +++ b/scripts/policy/protocols/smtp/blocklists.bro @@ -0,0 +1,58 @@ + +@load base/protocols/smtp + +module SMTP; + +export { + redef enum Notice::Type += { + ## Indicates that the server sent a reply mentioning an SMTP block list. + Blocklist_Error_Message, + ## Indicates the client's address is seen in the block list error message. + Blocklist_Blocked_Host, + }; + + # This matches content in SMTP error messages that indicate some + # block list doesn't like the connection/mail. + const blocklist_error_messages = + /spamhaus\.org\// + | /sophos\.com\/security\// + | /spamcop\.net\/bl/ + | /cbl\.abuseat\.org\// + | /sorbs\.net\// + | /bsn\.borderware\.com\// + | /mail-abuse\.com\// + | /b\.barracudacentral\.com\// + | /psbl\.surriel\.com\// + | /antispam\.imp\.ch\// + | /dyndns\.com\/.*spam/ + | /rbl\.knology\.net\// + | /intercept\.datapacket\.net\// + | /uceprotect\.net\// + | /hostkarma\.junkemailfilter\.com\// &redef; + +} + +event smtp_reply(c: connection, is_orig: bool, code: count, cmd: string, + msg: string, cont_resp: bool) &priority=3 + { + if ( code >= 400 && code != 421 ) + { + # Raise a notice when an SMTP error about a block list is discovered. + if ( blocklist_error_messages in msg ) + { + local note = Blocklist_Error_Message; + local message = fmt("%s received an error message mentioning an SMTP block list", c$id$orig_h); + + # Determine if the originator's IP address is in the message. + local ips = find_ip_addresses(msg); + local text_ip = ""; + if ( |ips| > 0 && to_addr(ips[0]) == c$id$orig_h ) + { + note = Blocklist_Blocked_Host; + message = fmt("%s is on an SMTP block list", c$id$orig_h); + } + + NOTICE([$note=note, $conn=c, $msg=message, $sub=msg]); + } + } + } diff --git a/scripts/policy/protocols/smtp/software.bro b/scripts/policy/protocols/smtp/software.bro index 09bc59c636..ccb7366a2a 100644 --- a/scripts/policy/protocols/smtp/software.bro +++ b/scripts/policy/protocols/smtp/software.bro @@ -43,10 +43,10 @@ export { | /ZimbraWebClient/ &redef; } -event smtp_data(c: connection, is_orig: bool, data: string) &priority=4 +event mime_one_header(c: connection, h: mime_header_rec) &priority=4 { - if ( c$smtp$current_header == "USER-AGENT" && - webmail_user_agents in c$smtp$user_agent ) + if ( ! c?$smtp ) return; + if ( h$name == "USER-AGENT" && webmail_user_agents in c$smtp$user_agent ) c$smtp$is_webmail = T; } diff --git a/scripts/policy/protocols/ssh/detect-bruteforcing.bro b/scripts/policy/protocols/ssh/detect-bruteforcing.bro new file mode 100644 index 0000000000..36e73bfa59 --- /dev/null +++ b/scripts/policy/protocols/ssh/detect-bruteforcing.bro @@ -0,0 +1,79 @@ + +module SSH; + +export { + redef enum Notice::Type += { + ## Indicates that a host has been identified as crossing the + ## :bro:id:`password_guesses_limit` threshold with heuristically + ## determined failed logins. + Password_Guessing, + ## Indicates that a host previously identified as a "password guesser" + ## has now had a heuristically successful login attempt. + Login_By_Password_Guesser, + }; + + ## The number of failed SSH connections before a host is designated as + ## guessing passwords. + const password_guesses_limit = 30 &redef; + + ## The amount of time to remember presumed non-successful logins to build + ## model of a password guesser. + const guessing_timeout = 30 mins &redef; + + ## This value can be used to exclude hosts or entire networks from being + ## tracked as potential "guessers". There are cases where the success + ## heuristic fails and this acts as the whitelist. The index represents + ## client subnets and the yield value represents server subnets. + const ignore_guessers: table[subnet] of subnet &redef; + + ## Keeps count of how many rejections a host has had. + global password_rejections: table[addr] of TrackCount + &write_expire=guessing_timeout + &synchronized; + + ## Keeps track of hosts identified as guessing passwords. + global password_guessers: set[addr] &read_expire=guessing_timeout+1hr &synchronized; +} + +event SSH::heuristic_successful_login(c: connection) + { + local id = c$id; + + # TODO: this should be migrated to the metrics framework. + if ( id$orig_h in password_rejections && + password_rejections[id$orig_h]$n > password_guesses_limit && + id$orig_h !in password_guessers ) + { + add password_guessers[id$orig_h]; + NOTICE([$note=Login_By_Password_Guesser, + $conn=c, + $n=password_rejections[id$orig_h]$n, + $msg=fmt("Successful SSH login by password guesser %s", id$orig_h), + $sub=fmt("%d failed logins", password_rejections[id$orig_h]$n)]); + } + } + +event SSH::heuristic_failed_login(c: connection) + { + local id = c$id; + + # presumed failure + if ( id$orig_h !in password_rejections ) + password_rejections[id$orig_h] = new_track_count(); + + # Track the number of rejections + # TODO: this should be migrated to the metrics framework. + if ( ! (id$orig_h in ignore_guessers && + id$resp_h in ignore_guessers[id$orig_h]) ) + ++password_rejections[id$orig_h]$n; + + if ( default_check_threshold(password_rejections[id$orig_h]) ) + { + add password_guessers[id$orig_h]; + NOTICE([$note=Password_Guessing, + $conn=c, + $msg=fmt("SSH password guessing by %s", id$orig_h), + $sub=fmt("%d apparently failed logins", password_rejections[id$orig_h]$n), + $n=password_rejections[id$orig_h]$n]); + } + } \ No newline at end of file diff --git a/scripts/policy/protocols/ssh/geo-data.bro b/scripts/policy/protocols/ssh/geo-data.bro new file mode 100644 index 0000000000..97bd0a5803 --- /dev/null +++ b/scripts/policy/protocols/ssh/geo-data.bro @@ -0,0 +1,39 @@ +##! This implements all of the additional information and geodata detections +##! for SSH analysis. + +module SSH; + +export { + redef enum Notice::Type += { + ## If an SSH login is seen to or from a "watched" country based on the + ## :bro:id:`SSH::watched_countries` variable then this notice will + ## be generated. + Login_From_Watched_Country, + }; + + ## The set of countries for which you'd like to throw notices upon + ## successful login + const watched_countries: set[string] = {"RO"} &redef; + + redef record Info += { + ## Add geographic data related to the "remote" host of the connection. + remote_location: geo_location &log &optional; + }; +} + +event SSH::heuristic_successful_login(c: connection) &priority=5 + { + local location: geo_location; + location = (c$ssh$direction == OUTBOUND) ? + lookup_location(c$id$resp_h) : lookup_location(c$id$orig_h); + + # Add the location data to the SSH record. + c$ssh$remote_location = location; + + if ( location$country_code in watched_countries ) + { + NOTICE([$note=Login_From_Watched_Country, + $conn=c, + $msg=fmt("SSH login from watched country: %s", location$country_code)]); + } + } diff --git a/scripts/policy/protocols/ssh/interesting-hostnames.bro b/scripts/policy/protocols/ssh/interesting-hostnames.bro new file mode 100644 index 0000000000..cf6ab7e40a --- /dev/null +++ b/scripts/policy/protocols/ssh/interesting-hostnames.bro @@ -0,0 +1,50 @@ + +module SSH; + +export { + redef enum Notice::Type += { + ## Generated if a login originates from a host matched by the + ## :bro:id:`interesting_hostnames` regular expression. + Login_From_Interesting_Hostname, + ## Generated if a login goes to a host matched by the + ## :bro:id:`interesting_hostnames` regular expression. + Login_To_Interesting_Hostname, + }; + + ## Strange/bad host names to see successful SSH logins from or to. + const interesting_hostnames = + /^d?ns[0-9]*\./ | + /^smtp[0-9]*\./ | + /^mail[0-9]*\./ | + /^pop[0-9]*\./ | + /^imap[0-9]*\./ | + /^www[0-9]*\./ | + /^ftp[0-9]*\./ &redef; +} + +event SSH::heuristic_successful_login(c: connection) + { + # Check to see if this login came from an interesting hostname. + when ( local orig_hostname = lookup_addr(c$id$orig_h) ) + { + if ( interesting_hostnames in orig_hostname ) + { + NOTICE([$note=Login_From_Interesting_Hostname, + $conn=c, + $msg=fmt("Interesting login from hostname: %s", orig_hostname), + $sub=orig_hostname]); + } + } + # Check to see if this login went to an interesting hostname. + when ( local resp_hostname = lookup_addr(c$id$orig_h) ) + { + if ( interesting_hostnames in resp_hostname ) + { + NOTICE([$note=Login_To_Interesting_Hostname, + $conn=c, + $msg=fmt("Interesting login to hostname: %s", resp_hostname), + $sub=resp_hostname]); + } + } + } + diff --git a/scripts/policy/protocols/ssh/software.bro b/scripts/policy/protocols/ssh/software.bro index d40ad513c8..1aa3bce1a2 100644 --- a/scripts/policy/protocols/ssh/software.bro +++ b/scripts/policy/protocols/ssh/software.bro @@ -3,8 +3,8 @@ module SSH; export { redef enum Software::Type += { - SSH_SERVER, - SSH_CLIENT, + SERVER, + CLIENT, }; } @@ -12,7 +12,7 @@ event ssh_client_version(c: connection, version: string) &priority=4 { # Get rid of the protocol information when passing to the software framework. local cleaned_version = sub(version, /^SSH[0-9\.\-]+/, ""); - local si = Software::parse(cleaned_version, c$id$orig_h, SSH_CLIENT); + local si = Software::parse(cleaned_version, c$id$orig_h, CLIENT); Software::found(c$id, si); } @@ -20,6 +20,6 @@ event ssh_server_version(c: connection, version: string) &priority=4 { # Get rid of the protocol information when passing to the software framework. local cleaned_version = sub(version, /SSH[0-9\.\-]{2,}/, ""); - local si = Software::parse(cleaned_version, c$id$resp_h, SSH_SERVER); + local si = Software::parse(cleaned_version, c$id$resp_h, SERVER); Software::found(c$id, si); } diff --git a/scripts/site/local-manager.bro b/scripts/site/local-manager.bro new file mode 100644 index 0000000000..aa28bd79da --- /dev/null +++ b/scripts/site/local-manager.bro @@ -0,0 +1,7 @@ +##! Local site policy loaded only by the manager in a cluster. + +# If you are running a cluster you should define your Notice::policy here +# so that notice processing occurs on the manager. +redef Notice::policy += { + +}; diff --git a/scripts/site/local-proxy.bro b/scripts/site/local-proxy.bro new file mode 100644 index 0000000000..1b71cc1870 --- /dev/null +++ b/scripts/site/local-proxy.bro @@ -0,0 +1,2 @@ +##! Local site policy loaded only by the proxies if Bro is running as a cluster. + diff --git a/scripts/site/local-worker.bro b/scripts/site/local-worker.bro new file mode 100644 index 0000000000..b2a100e135 --- /dev/null +++ b/scripts/site/local-worker.bro @@ -0,0 +1 @@ +##! Local site policy loaded only by the workers if Bro is running as a cluster. \ No newline at end of file diff --git a/scripts/site/local.bro b/scripts/site/local.bro index 5199cab288..f894a30432 100644 --- a/scripts/site/local.bro +++ b/scripts/site/local.bro @@ -22,6 +22,7 @@ redef signature_files += "frameworks/signatures/detect-windows-shells.sig"; # Load all of the scripts that detect software in various protocols. @load protocols/http/software +#@load protocols/http/detect-webapps @load protocols/ftp/software @load protocols/smtp/software @load protocols/ssh/software @@ -44,3 +45,24 @@ redef signature_files += "frameworks/signatures/detect-windows-shells.sig"; # Load the script to enable SSL/TLS certificate validation. @load protocols/ssl/validate-certs + +# If you have libGeoIP support built in, do some geographic detections and +# logging for SSH traffic. +@load protocols/ssh/geo-data +# Detect hosts doing SSH bruteforce attacks. +@load protocols/ssh/detect-bruteforcing +# Detect logins using "interesting" hostnames. +@load protocols/ssh/interesting-hostnames + +# Detect MD5 sums in Team Cymru's Malware Hash Registry. +@load protocols/http/detect-MHR +# Detect SQL injection attacks +@load protocols/http/detect-sqli + +# Uncomment this redef if you want to extract SMTP MIME entities for +# some file types. The numbers given indicate how many bytes to extract for +# the various mime types. +redef SMTP::entity_excerpt_len += { +# ["text/plain"] = 1024, +# ["text/html"] = 1024, +}; diff --git a/scripts/test-all.bro b/scripts/test-all-policy.bro similarity index 89% rename from scripts/test-all.bro rename to scripts/test-all-policy.bro index 42e1e91cb5..a42ef893fc 100644 --- a/scripts/test-all.bro +++ b/scripts/test-all-policy.bro @@ -2,18 +2,17 @@ # # This is rarely makes sense, and is for testing only. # -# Note that we have unit test that makes sure that all policy files shipped are +# Note that we have a unit test that makes sure that all policy files shipped are # actually loaded here. If we have files that are part of the distribution yet # can't be loaded here, these must still be listed here with their load command # commented out. # The base/ scripts are all loaded by default and not included here. -# @load test-all.bro # @load frameworks/communication/listen-clear.bro # @load frameworks/communication/listen-ssl.bro -# @load frameworks/control/controllee -# @load frameworks/control/controller +# @load frameworks/control/controllee.bro +# @load frameworks/control/controller.bro @load frameworks/dpd/detect-protocols.bro @load frameworks/dpd/packet-segment-logging.bro @load frameworks/software/version-changes.bro @@ -27,6 +26,7 @@ @load misc/trim-trace-file.bro @load protocols/conn/known-hosts.bro @load protocols/conn/known-services.bro +# @load protocols/conn/scan.bro @load protocols/dns/auth-addl.bro @load protocols/dns/detect-external-names.bro @load protocols/ftp/detect.bro @@ -50,4 +50,4 @@ @load tuning/defaults/remove-high-volume-notices.bro @load tuning/defaults/warnings.bro @load tuning/track-all-assets.bro -@load site/local +# @load hot.conn.bro diff --git a/src/ConnCompressor.cc b/src/ConnCompressor.cc index 2d617b0fc4..112abe089e 100644 --- a/src/ConnCompressor.cc +++ b/src/ConnCompressor.cc @@ -866,15 +866,10 @@ void ConnCompressor::Event(const PendingConn* pending, double t, if ( ConnSize_Analyzer::Available() ) { + // Fill in optional fields if ConnSize_Analyzer is on. orig_endp->Assign(2, new Val(pending->num_pkts, TYPE_COUNT)); orig_endp->Assign(3, new Val(pending->num_bytes_ip, TYPE_COUNT)); } - else - { - orig_endp->Assign(2, new Val(0, TYPE_COUNT)); - orig_endp->Assign(3, new Val(0, TYPE_COUNT)); - } - resp_endp->Assign(0, new Val(0, TYPE_COUNT)); resp_endp->Assign(1, new Val(resp_state, TYPE_COUNT)); @@ -900,14 +895,10 @@ void ConnCompressor::Event(const PendingConn* pending, double t, if ( ConnSize_Analyzer::Available() ) { + // Fill in optional fields if ConnSize_Analyzer is on resp_endp->Assign(2, new Val(pending->num_pkts, TYPE_COUNT)); resp_endp->Assign(3, new Val(pending->num_bytes_ip, TYPE_COUNT)); } - else - { - resp_endp->Assign(2, new Val(0, TYPE_COUNT)); - resp_endp->Assign(3, new Val(0, TYPE_COUNT)); - } DBG_LOG(DBG_COMPRESSOR, "%s swapped direction", fmt_conn_id(pending)); } diff --git a/src/DNS_Mgr.cc b/src/DNS_Mgr.cc index 8776f69d55..e6bebda875 100644 --- a/src/DNS_Mgr.cc +++ b/src/DNS_Mgr.cc @@ -1071,7 +1071,7 @@ void DNS_Mgr::Process() int status = nb_dns_activity(nb_dns, &r, err); if ( status < 0 ) - reporter->InternalError("NB-DNS error in DNS_Mgr::Process (%s)", err); + reporter->Warning("NB-DNS error in DNS_Mgr::Process (%s)", err); else if ( status > 0 ) { diff --git a/src/Expr.cc b/src/Expr.cc index c142026123..c4fbe5930a 100644 --- a/src/Expr.cc +++ b/src/Expr.cc @@ -231,7 +231,6 @@ bool Expr::DoUnserialize(UnserialInfo* info) NameExpr::NameExpr(ID* arg_id) : Expr(EXPR_NAME) { id = arg_id; - ReferenceID(); SetType(id->Type()->Ref()); EventHandler* h = event_registry->Lookup(id->Name()); @@ -244,29 +243,6 @@ NameExpr::~NameExpr() Unref(id); } -void NameExpr::ReferenceID() - { - // ### This is a hack. We check whether one of the remote serializer's - // built-in functions is referenced. If so, we activate the serializer. - // A better solution would be to either (1) a generic mechanism in - // which have (internal) attributes associated with identifiers and - // as we see references to the identifiers, we do bookkeeping - // associated with their attribute (so in this case the attribute - // would be "flag that inter-Bro communication is being used"), - // or (2) after the parse is done, we'd query whether these - // particular identifiers were seen, rather than doing the test - // here for every NameExpr we create. - if ( id->Type()->Tag() == TYPE_FUNC ) - { - const char* const* builtins = remote_serializer->GetBuiltins(); - while( *builtins ) - { - if ( streq(id->Name(), *builtins++) ) - using_communication = true; - } - } - } - Expr* NameExpr::Simplify(SimplifyType simp_type) { if ( simp_type != SIMPLIFY_LHS && id->IsConst() ) @@ -393,8 +369,6 @@ bool NameExpr::DoUnserialize(UnserialInfo* info) if ( ! id ) return false; - ReferenceID(); - return true; } @@ -5046,8 +5020,9 @@ Val* ListExpr::InitVal(const BroType* t, Val* aggr) const loop_over_list(exprs, i) { Expr* e = exprs[i]; + check_and_promote_expr(e, vec->Type()->AsVectorType()->YieldType()); Val* v = e->Eval(0); - if ( ! vec->Assign(i, v, e) ) + if ( ! vec->Assign(i, v->RefCnt() == 1 ? v->Ref() : v, e) ) { e->Error(fmt("type mismatch at index %d", i)); return 0; diff --git a/src/Expr.h b/src/Expr.h index 0f6ee67106..2e5d5b637a 100644 --- a/src/Expr.h +++ b/src/Expr.h @@ -217,7 +217,6 @@ protected: friend class Expr; NameExpr() { id = 0; } - void ReferenceID(); void ExprDescribe(ODesc* d) const; DECLARE_SERIAL(NameExpr); diff --git a/src/LogMgr.cc b/src/LogMgr.cc index 461bf25e02..4719d04a22 100644 --- a/src/LogMgr.cc +++ b/src/LogMgr.cc @@ -89,7 +89,7 @@ bool LogField::Write(SerializationFormat* fmt) const LogVal::~LogVal() { - if ( (type == TYPE_ENUM || type == TYPE_STRING || type == TYPE_FILE) + if ( (type == TYPE_ENUM || type == TYPE_STRING || type == TYPE_FILE || type == TYPE_FUNC) && present ) delete val.string_val; @@ -130,6 +130,7 @@ bool LogVal::IsCompatibleType(BroType* t, bool atomic_only) case TYPE_ENUM: case TYPE_STRING: case TYPE_FILE: + case TYPE_FUNC: return true; case TYPE_RECORD: @@ -231,6 +232,7 @@ bool LogVal::Read(SerializationFormat* fmt) case TYPE_ENUM: case TYPE_STRING: case TYPE_FILE: + case TYPE_FUNC: { val.string_val = new string; return fmt->Read(val.string_val, "string"); @@ -343,6 +345,7 @@ bool LogVal::Write(SerializationFormat* fmt) const case TYPE_ENUM: case TYPE_STRING: case TYPE_FILE: + case TYPE_FUNC: return fmt->Write(*val.string_val, "string"); case TYPE_TABLE: @@ -433,6 +436,25 @@ LogMgr::Stream* LogMgr::FindStream(EnumVal* id) return streams[idx]; } +LogMgr::WriterInfo* LogMgr::FindWriter(LogWriter* writer) + { + for ( vector::iterator s = streams.begin(); s != streams.end(); ++s ) + { + if ( ! *s ) + continue; + + for ( Stream::WriterMap::iterator i = (*s)->writers.begin(); i != (*s)->writers.end(); i++ ) + { + WriterInfo* winfo = i->second; + + if ( winfo->writer == writer ) + return winfo; + } + } + + return 0; + } + void LogMgr::RemoveDisabledWriters(Stream* stream) { list disabled; @@ -629,6 +651,11 @@ bool LogMgr::TraverseRecord(Stream* stream, Filter* filter, RecordType* rt, // That's ok, we handle it below. } + else if ( t->Tag() == TYPE_FUNC ) + { + // That's ok, we handle it below. + } + else { reporter->Error("unsupported field type for log column"); @@ -875,9 +902,10 @@ bool LogMgr::Write(EnumVal* id, RecordVal* columns) if ( filter->path_func ) { - val_list vl(2); + val_list vl(3); vl.append(id->Ref()); vl.append(filter->path_val->Ref()); + vl.append(columns->Ref()); Val* v = filter->path_func->Call(&vl); if ( ! v->Type()->Tag() == TYPE_STRING ) @@ -888,6 +916,7 @@ bool LogMgr::Write(EnumVal* id, RecordVal* columns) } path = v->AsString()->CheckString(); + Unref(v); #ifdef DEBUG DBG_LOG(DBG_LOGGING, "Path function for filter '%s' on stream '%s' return '%s'", @@ -1055,6 +1084,15 @@ LogVal* LogMgr::ValToLogVal(Val* val, BroType* ty) break; } + case TYPE_FUNC: + { + ODesc d; + const Func* f = val->AsFunc(); + f->Describe(&d); + lval->val.string_val = new string(d.Description()); + break; + } + case TYPE_TABLE: { ListVal* set = val->AsTableVal()->ConvertToPureList(); @@ -1411,6 +1449,8 @@ void LogMgr::InstallRotationTimer(WriterInfo* winfo) RecordVal* rc = LookupRotationControl(winfo->type, winfo->writer->Path()); + assert(rc); + int idx = rc->Type()->AsRecordType()->FieldOffset("interv"); double rotation_interval = rc->LookupWithDefault(idx)->AsInterval(); @@ -1448,34 +1488,63 @@ void LogMgr::Rotate(WriterInfo* winfo) DBG_LOG(DBG_LOGGING, "Rotating %s at %.6f", winfo->writer->Path().c_str(), network_time); - // Create the RotationInfo record. - RecordVal* info = new RecordVal(BifType::Record::Log::RotationInfo); - info->Assign(0, winfo->type->Ref()); - info->Assign(1, new StringVal(winfo->writer->Path().c_str())); - info->Assign(2, new Val(winfo->open_time, TYPE_TIME)); - info->Assign(3, new Val(network_time, TYPE_TIME)); + // Build a temporary path for the writer to move the file to. + struct tm tm; + char buf[128]; + const char* const date_fmt = "%y-%m-%d_%H.%M.%S"; + time_t teatime = (time_t)winfo->open_time; - // Call the function building us the new path. + localtime_r(&teatime, &tm); + strftime(buf, sizeof(buf), date_fmt, &tm); - Func* rotation_path_func = - internal_func("Log::default_rotation_path_func"); + string tmp = string(fmt("%s-%s", winfo->writer->Path().c_str(), buf)); + + // Trigger the rotation. + winfo->writer->Rotate(tmp, winfo->open_time, network_time, terminating); + } + +bool LogMgr::FinishedRotation(LogWriter* writer, string new_name, string old_name, + double open, double close, bool terminating) + { + DBG_LOG(DBG_LOGGING, "Finished rotating %s at %.6f, new name %s", + writer->Path().c_str(), network_time, new_name.c_str()); + + WriterInfo* winfo = FindWriter(writer); + assert(winfo); RecordVal* rc = LookupRotationControl(winfo->type, winfo->writer->Path()); + assert(rc); + + // Create the RotationInfo record. + RecordVal* info = new RecordVal(BifType::Record::Log::RotationInfo); + info->Assign(0, winfo->type->Ref()); + info->Assign(1, new StringVal(new_name.c_str())); + info->Assign(2, new StringVal(winfo->writer->Path().c_str())); + info->Assign(3, new Val(open, TYPE_TIME)); + info->Assign(4, new Val(close, TYPE_TIME)); + info->Assign(5, new Val(terminating, TYPE_BOOL)); + int idx = rc->Type()->AsRecordType()->FieldOffset("postprocessor"); + assert(idx >= 0); - string rotation_postprocessor = - rc->LookupWithDefault(idx)->AsString()->CheckString(); + Val* func = rc->Lookup(idx); + if ( ! func ) + { + ID* id = global_scope()->Lookup("Log::__default_rotation_postprocessor"); + assert(id); + func = id->ID_Val(); + } + assert(func); + + // Call the postprocessor function. val_list vl(1); vl.append(info); - Val* result = rotation_path_func->Call(&vl); - string new_path = result->AsString()->CheckString(); - Unref(result); - - winfo->writer->Rotate(new_path, rotation_postprocessor, - winfo->open_time, network_time, terminating); + Val* v = func->AsFunc()->Call(&vl); + int result = v->AsBool(); + Unref(v); + return result; } - diff --git a/src/LogMgr.h b/src/LogMgr.h index cc593374c5..033a6ba3fd 100644 --- a/src/LogMgr.h +++ b/src/LogMgr.h @@ -103,6 +103,10 @@ protected: //// Functions safe to use by writers. + // Signals that a file has been rotated. + bool FinishedRotation(LogWriter* writer, string new_name, string old_name, + double open, double close, bool terminating); + // Reports an error for the given writer. void Error(LogWriter* writer, const char* msg); @@ -127,6 +131,7 @@ private: void Rotate(WriterInfo* info); RecordVal* LookupRotationControl(EnumVal* writer, string path); Filter* FindFilter(EnumVal* id, StringVal* filter); + WriterInfo* FindWriter(LogWriter* writer); vector streams; // Indexed by stream enum. }; diff --git a/src/LogWriter.cc b/src/LogWriter.cc index 0017f8f246..8584a0b0b5 100644 --- a/src/LogWriter.cc +++ b/src/LogWriter.cc @@ -89,10 +89,10 @@ bool LogWriter::SetBuf(bool enabled) return true; } -bool LogWriter::Rotate(string rotated_path, string postprocessor, double open, +bool LogWriter::Rotate(string rotated_path, double open, double close, bool terminating) { - if ( ! DoRotate(rotated_path, postprocessor, open, close, terminating) ) + if ( ! DoRotate(rotated_path, open, close, terminating) ) { disabled = true; return false; @@ -151,42 +151,8 @@ void LogWriter::DeleteVals(LogVal** vals) log_mgr->DeleteVals(num_fields, vals); } -bool LogWriter::RunPostProcessor(string fname, string postprocessor, - string old_name, double open, double close, - bool terminating) +bool LogWriter::FinishedRotation(string new_name, string old_name, double open, + double close, bool terminating) { - // This function operates in a way that is backwards-compatible with - // the old Bro log rotation scheme. - - if ( ! postprocessor.size() ) - return true; - - const char* const fmt = "%y-%m-%d_%H.%M.%S"; - - struct tm tm1; - struct tm tm2; - - time_t tt1 = (time_t)open; - time_t tt2 = (time_t)close; - - localtime_r(&tt1, &tm1); - localtime_r(&tt2, &tm2); - - char buf1[128]; - char buf2[128]; - - strftime(buf1, sizeof(buf1), fmt, &tm1); - strftime(buf2, sizeof(buf2), fmt, &tm2); - - string cmd = postprocessor; - cmd += " " + fname; - cmd += " " + old_name; - cmd += " " + string(buf1); - cmd += " " + string(buf2); - cmd += " " + string(terminating ? "1" : "0"); - cmd += " &"; - - system(cmd.c_str()); - - return true; + return log_mgr->FinishedRotation(this, new_name, old_name, open, close, terminating); } diff --git a/src/LogWriter.h b/src/LogWriter.h index 8dcd05a67f..1d2f9fa4b2 100644 --- a/src/LogWriter.h +++ b/src/LogWriter.h @@ -60,8 +60,7 @@ public: // Triggers rotation, if the writer supports that. (If not, it will // be ignored). - bool Rotate(string rotated_path, string postprocessor, double open, - double close, bool terminating); + bool Rotate(string rotated_path, double open, double close, bool terminating); // Finishes writing to this logger regularly. Must not be called if // an error has been indicated earlier. After calling this, no @@ -77,7 +76,6 @@ public: const LogField* const * Fields() const { return fields; } protected: - // Methods for writers to override. If any of these returs false, it // will be assumed that a fatal error has occured that prevents the // writer from further operation. It will then be disabled and @@ -116,6 +114,10 @@ protected: // applies to writers writing into files, which should then close the // current file and open a new one. However, a writer may also // trigger other apppropiate actions if semantics are similar. + // + // Once rotation has finished, the implementation should call + // RotationDone() to signal the log manager that potential + // postprocessors can now run. // // "rotate_path" reflects the path to where the rotated output is to // be moved, with specifics depending on the writer. It should @@ -123,12 +125,7 @@ protected: // as passed into DoInit(). As an example, for file-based output, // "rotate_path" could be the original filename extended with a // timestamp indicating the time of the rotation. - - // "postprocessor" is the name of a command to execute on the rotated - // file. If empty, no postprocessing should take place; if given but - // the writer doesn't support postprocessing, it can be ignored (but - // the method must still return true in that case). - + // // "open" and "close" are the network time's when the *current* file // was opened and closed, respectively. // @@ -138,8 +135,8 @@ protected: // // A writer may ignore rotation requests if it doesn't fit with its // semantics (but must still return true in that case). - virtual bool DoRotate(string rotated_path, string postprocessor, - double open, double close, bool terminating) = 0; + virtual bool DoRotate(string rotated_path, double open, double close, + bool terminating) = 0; // Called once on termination. Not called when any of the other // methods has previously signaled an error, i.e., executing this @@ -157,11 +154,18 @@ protected: // Reports an error to the user. void Error(const char *msg); - // Runs a post-processor on the given file. Parameters correspond to - // those of DoRotate(). - bool RunPostProcessor(string fname, string postprocessor, - string old_name, double open, double close, - bool terminating); + // Signals to the log manager that a file has been rotated. + // + // new_name: The filename of the rotated file. old_name: The filename + // of the origina file. + // + // open/close: The timestamps when the original file was opened and + // closed, respectively. + // + // terminating: True if rotation request occured due to the main Bro + // process shutting down. + bool FinishedRotation(string new_name, string old_name, double open, + double close, bool terminating); private: friend class LogMgr; diff --git a/src/LogWriterAscii.cc b/src/LogWriterAscii.cc index 02a18bb672..446d6c8d65 100644 --- a/src/LogWriterAscii.cc +++ b/src/LogWriterAscii.cc @@ -155,6 +155,7 @@ bool LogWriterAscii::DoWriteOne(ODesc* desc, LogVal* val, const LogField* field) case TYPE_ENUM: case TYPE_STRING: case TYPE_FILE: + case TYPE_FUNC: { int size = val->val.string_val->size(); if ( size ) @@ -242,7 +243,7 @@ bool LogWriterAscii::DoWrite(int num_fields, const LogField* const * fields, return true; } -bool LogWriterAscii::DoRotate(string rotated_path, string postprocessor, double open, +bool LogWriterAscii::DoRotate(string rotated_path, double open, double close, bool terminating) { if ( IsSpecial(Path()) ) @@ -254,10 +255,8 @@ bool LogWriterAscii::DoRotate(string rotated_path, string postprocessor, double string nname = rotated_path + ".log"; rename(fname.c_str(), nname.c_str()); - if ( postprocessor.size() && - ! RunPostProcessor(nname, postprocessor, fname.c_str(), - open, close, terminating) ) - return false; + if ( ! FinishedRotation(nname, fname, open, close, terminating) ) + Error(Fmt("error rotating %s to %s", fname.c_str(), nname.c_str())); return DoInit(Path(), NumFields(), Fields()); } diff --git a/src/LogWriterAscii.h b/src/LogWriterAscii.h index fecbd9e94c..cceb685ff9 100644 --- a/src/LogWriterAscii.h +++ b/src/LogWriterAscii.h @@ -20,8 +20,8 @@ protected: virtual bool DoWrite(int num_fields, const LogField* const * fields, LogVal** vals); virtual bool DoSetBuf(bool enabled); - virtual bool DoRotate(string rotated_path, string postprocessr, - double open, double close, bool terminating); + virtual bool DoRotate(string rotated_path, double open, double close, + bool terminating); virtual bool DoFlush(); virtual void DoFinish(); diff --git a/src/Obj.cc b/src/Obj.cc index 666a57ac93..dfa8ed0148 100644 --- a/src/Obj.cc +++ b/src/Obj.cc @@ -127,6 +127,7 @@ void BroObj::BadTag(const char* msg, const char* t1, const char* t2) const ODesc d; DoMsg(&d, out); reporter->FatalError("%s", d.Description()); + reporter->PopLocation(); } void BroObj::Internal(const char* msg) const @@ -134,6 +135,7 @@ void BroObj::Internal(const char* msg) const ODesc d; DoMsg(&d, msg); reporter->InternalError("%s", d.Description()); + reporter->PopLocation(); } void BroObj::InternalWarning(const char* msg) const @@ -141,6 +143,7 @@ void BroObj::InternalWarning(const char* msg) const ODesc d; DoMsg(&d, msg); reporter->InternalWarning("%s", d.Description()); + reporter->PopLocation(); } void BroObj::AddLocation(ODesc* d) const diff --git a/src/RSH.cc b/src/RSH.cc index 0b833697fd..cbbce944f5 100644 --- a/src/RSH.cc +++ b/src/RSH.cc @@ -15,7 +15,7 @@ Contents_Rsh_Analyzer::Contents_Rsh_Analyzer(Connection* conn, bool orig, Rsh_Analyzer* arg_analyzer) : ContentLine_Analyzer(AnalyzerTag::Contents_Rsh, conn, orig) { - num_bytes_to_scan = num_bytes_to_scan = 0; + num_bytes_to_scan = 0; analyzer = arg_analyzer; if ( orig ) diff --git a/src/RemoteSerializer.cc b/src/RemoteSerializer.cc index a80157767f..814f387718 100644 --- a/src/RemoteSerializer.cc +++ b/src/RemoteSerializer.cc @@ -3060,13 +3060,6 @@ bool RemoteSerializer::IsActive() return false; } - -const char* const* RemoteSerializer::GetBuiltins() const - { - static const char* builtins[] = { "connect", "listen", 0 }; - return builtins; - } - void RemoteSerializer::ReportError(const char* msg) { if ( current_peer && current_peer->phase != Peer::SETUP ) diff --git a/src/RemoteSerializer.h b/src/RemoteSerializer.h index 18284463a1..5374e6f931 100644 --- a/src/RemoteSerializer.h +++ b/src/RemoteSerializer.h @@ -128,10 +128,6 @@ public: // Log some statistics. void LogStats(); - // Return a 0-terminated array of built-in functions which, - // when referenced, trigger the remote serializer's initialization. - const char* const* GetBuiltins() const; - // Tries to sent out all remaining data. // FIXME: Do we still need this? void Finish(); diff --git a/src/Reporter.cc b/src/Reporter.cc index 4a8e35e650..053d6370d7 100644 --- a/src/Reporter.cc +++ b/src/Reporter.cc @@ -302,7 +302,7 @@ void Reporter::DoLog(const char* prefix, EventHandlerPtr event, FILE* out, Conne s += buffer; s += "\n"; - fprintf(out, s.c_str()); + fprintf(out, "%s", s.c_str()); } if ( alloced ) diff --git a/src/bro.bif b/src/bro.bif index 240eeed9dd..76317f5dd4 100644 --- a/src/bro.bif +++ b/src/bro.bif @@ -3624,15 +3624,46 @@ function NFS3::mode2string%(mode: count%): string function piped_exec%(program: string, to_write: string%): bool %{ const char* prog = program->CheckString(); + FILE* f = popen(prog, "w"); if ( ! f ) { reporter->Error("Failed to popen %s", prog); - return new Val(false, TYPE_BOOL); + return new Val(0, TYPE_BOOL); } - fprintf(f, "%s", to_write->CheckString()); - pclose(f); + const u_char* input_data = to_write->Bytes(); + int input_data_len = to_write->Len(); - return new Val(true, TYPE_BOOL); + int bytes_written = fwrite(input_data, 1, input_data_len, f); + + pclose(f); + + if ( bytes_written != input_data_len ) + { + reporter->Error("Failed to write all given data to %s", prog); + return new Val(0, TYPE_BOOL); + } + + return new Val(1, TYPE_BOOL); + %} + +## Enables the communication system. Note that by default, +## communication is off until explicitly enabled, and all other calls +## to communication-related BiFs' will be ignored until done so. +function enable_communication%(%): any + %{ + if ( bro_start_network_time != 0.0 ) + { + builtin_error("communication must be enabled in bro_init"); + return 0; + } + + if ( using_communication ) + // Ignore duplicate calls. + return 0; + + using_communication = 1; + remote_serializer->Init(); + return 0; %} diff --git a/src/main.cc b/src/main.cc index 2a36b4019a..f1b393310b 100644 --- a/src/main.cc +++ b/src/main.cc @@ -880,9 +880,6 @@ int main(int argc, char** argv) exit(0); } - if ( using_communication ) - remote_serializer->Init(); - persistence_serializer->SetDir((const char *)state_dir->AsString()->CheckString()); // Print the ID. diff --git a/src/parse.y b/src/parse.y index c3624bfd2d..7964fa1bcc 100644 --- a/src/parse.y +++ b/src/parse.y @@ -1070,10 +1070,10 @@ decl: } | TOK_REDEF TOK_RECORD global_id TOK_ADD_TO - '{' { do_doc_token_start(); } type_decl_list '}' opt_attr ';' + '{' { ++in_record; do_doc_token_start(); } + type_decl_list + { --in_record; do_doc_token_stop(); } '}' opt_attr ';' { - do_doc_token_stop(); - if ( ! $3->Type() ) $3->Error("unknown identifier"); else @@ -1083,7 +1083,7 @@ decl: $3->Error("not a record type"); else { - const char* error = add_to->AddFields($7, $9); + const char* error = add_to->AddFields($7, $10); if ( error ) $3->Error(error); else if ( generate_documentation ) diff --git a/testing/btest/Baseline/bifs.piped_exec/test.txt b/testing/btest/Baseline/bifs.piped_exec/test.txt new file mode 100644 index 0000000000..a23f66ba7e Binary files /dev/null and b/testing/btest/Baseline/bifs.piped_exec/test.txt differ diff --git a/testing/btest/Baseline/core.reporter-fmt-strings/output b/testing/btest/Baseline/core.reporter-fmt-strings/output new file mode 100644 index 0000000000..10a883cb5d --- /dev/null +++ b/testing/btest/Baseline/core.reporter-fmt-strings/output @@ -0,0 +1 @@ +error in /Users/jsiwek/tmp/bro/testing/btest/.tmp/core.reporter-fmt-strings/reporter-fmt-strings.bro, line 9: not an event (dont_interpret_this(%s)) diff --git a/testing/btest/Baseline/language.record-extension/output b/testing/btest/Baseline/language.record-extension/output index 1e38084e0c..b60b1c93b6 100644 --- a/testing/btest/Baseline/language.record-extension/output +++ b/testing/btest/Baseline/language.record-extension/output @@ -1,2 +1,10 @@ -[a=21, b=, c=42, d=] -[a=21, b=, c=42, d=XXX] +[a=21, b=, myset={ + +}, c=42, d=, anotherset={ + +}] +[a=21, b=, myset={ + +}, c=42, d=XXX, anotherset={ + +}] diff --git a/testing/btest/Baseline/language.vector-list-init-records/output b/testing/btest/Baseline/language.vector-list-init-records/output new file mode 100644 index 0000000000..eccb029b4a --- /dev/null +++ b/testing/btest/Baseline/language.vector-list-init-records/output @@ -0,0 +1,3 @@ +element 0 = [s=bar, o=check] +element 1 = [s=baz, o=] +[[s=bar, o=check], [s=baz, o=]] diff --git a/testing/btest/Baseline/policy.frameworks.logging.adapt-filter/ssh-new-default.log b/testing/btest/Baseline/policy.frameworks.logging.adapt-filter/ssh-new-default.log index 469f2d1991..ee274bb0fa 100644 --- a/testing/btest/Baseline/policy.frameworks.logging.adapt-filter/ssh-new-default.log +++ b/testing/btest/Baseline/policy.frameworks.logging.adapt-filter/ssh-new-default.log @@ -1,3 +1,3 @@ # t id.orig_h id.orig_p id.resp_h id.resp_p status country -1299718503.40319 1.2.3.4 1234 2.3.4.5 80 success unknown -1299718503.40319 1.2.3.4 1234 2.3.4.5 80 failure US +1313212563.234939 1.2.3.4 1234 2.3.4.5 80 success unknown +1313212563.234939 1.2.3.4 1234 2.3.4.5 80 failure US diff --git a/testing/btest/Baseline/policy.frameworks.logging.path-func/output b/testing/btest/Baseline/policy.frameworks.logging.path-func/output index 25e4ca6696..7e8acf5106 100644 --- a/testing/btest/Baseline/policy.frameworks.logging.path-func/output +++ b/testing/btest/Baseline/policy.frameworks.logging.path-func/output @@ -1,13 +1,21 @@ -static-prefix-0.log -static-prefix-1.log -static-prefix-2.log +static-prefix-0-BR.log +static-prefix-0-MX3.log +static-prefix-0-unknown.log +static-prefix-1-MX.log +static-prefix-1-US.log +static-prefix-2-MX2.log +static-prefix-2-UK.log # t id.orig_h id.orig_p id.resp_h id.resp_p status country -1299718503.05867 1.2.3.4 1234 2.3.4.5 80 success unknown -1299718503.05867 1.2.3.4 1234 2.3.4.5 80 success BR -1299718503.05867 1.2.3.4 1234 2.3.4.5 80 failure MX3 +1313212701.542245 1.2.3.4 1234 2.3.4.5 80 success BR # t id.orig_h id.orig_p id.resp_h id.resp_p status country -1299718503.05867 1.2.3.4 1234 2.3.4.5 80 failure US -1299718503.05867 1.2.3.4 1234 2.3.4.5 80 failure MX +1313212701.542245 1.2.3.4 1234 2.3.4.5 80 failure MX3 # t id.orig_h id.orig_p id.resp_h id.resp_p status country -1299718503.05867 1.2.3.4 1234 2.3.4.5 80 failure UK -1299718503.05867 1.2.3.4 1234 2.3.4.5 80 failure MX2 +1313212701.542245 1.2.3.4 1234 2.3.4.5 80 success unknown +# t id.orig_h id.orig_p id.resp_h id.resp_p status country +1313212701.542245 1.2.3.4 1234 2.3.4.5 80 failure MX +# t id.orig_h id.orig_p id.resp_h id.resp_p status country +1313212701.542245 1.2.3.4 1234 2.3.4.5 80 failure US +# t id.orig_h id.orig_p id.resp_h id.resp_p status country +1313212701.542245 1.2.3.4 1234 2.3.4.5 80 failure MX2 +# t id.orig_h id.orig_p id.resp_h id.resp_p status country +1313212701.542245 1.2.3.4 1234 2.3.4.5 80 failure UK diff --git a/testing/btest/Baseline/policy.frameworks.logging.rotate-custom/out b/testing/btest/Baseline/policy.frameworks.logging.rotate-custom/out index af2b17dc75..18bd12d88f 100644 --- a/testing/btest/Baseline/policy.frameworks.logging.rotate-custom/out +++ b/testing/btest/Baseline/policy.frameworks.logging.rotate-custom/out @@ -1,33 +1,33 @@ -1st test-11-03-07_03.00.05.log test.log 11-03-07_03.00.05 11-03-07_04.00.05 0 -1st test-11-03-07_04.00.05.log test.log 11-03-07_04.00.05 11-03-07_05.00.05 0 -1st test-11-03-07_05.00.05.log test.log 11-03-07_05.00.05 11-03-07_06.00.05 0 -1st test-11-03-07_06.00.05.log test.log 11-03-07_06.00.05 11-03-07_07.00.05 0 -1st test-11-03-07_07.00.05.log test.log 11-03-07_07.00.05 11-03-07_08.00.05 0 -1st test-11-03-07_08.00.05.log test.log 11-03-07_08.00.05 11-03-07_09.00.05 0 -1st test-11-03-07_09.00.05.log test.log 11-03-07_09.00.05 11-03-07_10.00.05 0 -1st test-11-03-07_10.00.05.log test.log 11-03-07_10.00.05 11-03-07_11.00.05 0 -1st test-11-03-07_11.00.05.log test.log 11-03-07_11.00.05 11-03-07_12.00.05 0 -1st test-11-03-07_12.00.05.log test.log 11-03-07_12.00.05 11-03-07_12.59.55 1 -2nd test2-11-03-07_03.00.05.log test2.log 11-03-07_03.00.05 11-03-07_03.59.55 0 -2nd test2-11-03-07_03.59.55.log test2.log 11-03-07_03.59.55 11-03-07_04.00.05 0 -2nd test2-11-03-07_04.00.05.log test2.log 11-03-07_04.00.05 11-03-07_04.59.55 0 -2nd test2-11-03-07_04.59.55.log test2.log 11-03-07_04.59.55 11-03-07_05.00.05 0 -2nd test2-11-03-07_05.00.05.log test2.log 11-03-07_05.00.05 11-03-07_05.59.55 0 -2nd test2-11-03-07_05.59.55.log test2.log 11-03-07_05.59.55 11-03-07_06.00.05 0 -2nd test2-11-03-07_06.00.05.log test2.log 11-03-07_06.00.05 11-03-07_06.59.55 0 -2nd test2-11-03-07_06.59.55.log test2.log 11-03-07_06.59.55 11-03-07_07.00.05 0 -2nd test2-11-03-07_07.00.05.log test2.log 11-03-07_07.00.05 11-03-07_07.59.55 0 -2nd test2-11-03-07_07.59.55.log test2.log 11-03-07_07.59.55 11-03-07_08.00.05 0 -2nd test2-11-03-07_08.00.05.log test2.log 11-03-07_08.00.05 11-03-07_08.59.55 0 -2nd test2-11-03-07_08.59.55.log test2.log 11-03-07_08.59.55 11-03-07_09.00.05 0 -2nd test2-11-03-07_09.00.05.log test2.log 11-03-07_09.00.05 11-03-07_09.59.55 0 -2nd test2-11-03-07_09.59.55.log test2.log 11-03-07_09.59.55 11-03-07_10.00.05 0 -2nd test2-11-03-07_10.00.05.log test2.log 11-03-07_10.00.05 11-03-07_10.59.55 0 -2nd test2-11-03-07_10.59.55.log test2.log 11-03-07_10.59.55 11-03-07_11.00.05 0 -2nd test2-11-03-07_11.00.05.log test2.log 11-03-07_11.00.05 11-03-07_11.59.55 0 -2nd test2-11-03-07_11.59.55.log test2.log 11-03-07_11.59.55 11-03-07_12.00.05 0 -2nd test2-11-03-07_12.00.05.log test2.log 11-03-07_12.00.05 11-03-07_12.59.55 0 -2nd test2-11-03-07_12.59.55.log test2.log 11-03-07_12.59.55 11-03-07_12.59.55 1 +1st test.2011-03-07-03-00-05.log test 11-03-07_03.00.05 11-03-07_04.00.05 0 +1st test.2011-03-07-04-00-05.log test 11-03-07_04.00.05 11-03-07_05.00.05 0 +1st test.2011-03-07-05-00-05.log test 11-03-07_05.00.05 11-03-07_06.00.05 0 +1st test.2011-03-07-06-00-05.log test 11-03-07_06.00.05 11-03-07_07.00.05 0 +1st test.2011-03-07-07-00-05.log test 11-03-07_07.00.05 11-03-07_08.00.05 0 +1st test.2011-03-07-08-00-05.log test 11-03-07_08.00.05 11-03-07_09.00.05 0 +1st test.2011-03-07-09-00-05.log test 11-03-07_09.00.05 11-03-07_10.00.05 0 +1st test.2011-03-07-10-00-05.log test 11-03-07_10.00.05 11-03-07_11.00.05 0 +1st test.2011-03-07-11-00-05.log test 11-03-07_11.00.05 11-03-07_12.00.05 0 +1st test.2011-03-07-12-00-05.log test 11-03-07_12.00.05 11-03-07_12.59.55 1 +custom rotate, [writer=WRITER_ASCII, fname=test2-11-03-07_03.00.05.log, path=test2, open=1299466805.0, close=1299470395.0, terminating=F] +custom rotate, [writer=WRITER_ASCII, fname=test2-11-03-07_03.59.55.log, path=test2, open=1299470395.0, close=1299470405.0, terminating=F] +custom rotate, [writer=WRITER_ASCII, fname=test2-11-03-07_04.00.05.log, path=test2, open=1299470405.0, close=1299473995.0, terminating=F] +custom rotate, [writer=WRITER_ASCII, fname=test2-11-03-07_04.59.55.log, path=test2, open=1299473995.0, close=1299474005.0, terminating=F] +custom rotate, [writer=WRITER_ASCII, fname=test2-11-03-07_05.00.05.log, path=test2, open=1299474005.0, close=1299477595.0, terminating=F] +custom rotate, [writer=WRITER_ASCII, fname=test2-11-03-07_05.59.55.log, path=test2, open=1299477595.0, close=1299477605.0, terminating=F] +custom rotate, [writer=WRITER_ASCII, fname=test2-11-03-07_06.00.05.log, path=test2, open=1299477605.0, close=1299481195.0, terminating=F] +custom rotate, [writer=WRITER_ASCII, fname=test2-11-03-07_06.59.55.log, path=test2, open=1299481195.0, close=1299481205.0, terminating=F] +custom rotate, [writer=WRITER_ASCII, fname=test2-11-03-07_07.00.05.log, path=test2, open=1299481205.0, close=1299484795.0, terminating=F] +custom rotate, [writer=WRITER_ASCII, fname=test2-11-03-07_07.59.55.log, path=test2, open=1299484795.0, close=1299484805.0, terminating=F] +custom rotate, [writer=WRITER_ASCII, fname=test2-11-03-07_08.00.05.log, path=test2, open=1299484805.0, close=1299488395.0, terminating=F] +custom rotate, [writer=WRITER_ASCII, fname=test2-11-03-07_08.59.55.log, path=test2, open=1299488395.0, close=1299488405.0, terminating=F] +custom rotate, [writer=WRITER_ASCII, fname=test2-11-03-07_09.00.05.log, path=test2, open=1299488405.0, close=1299491995.0, terminating=F] +custom rotate, [writer=WRITER_ASCII, fname=test2-11-03-07_09.59.55.log, path=test2, open=1299491995.0, close=1299492005.0, terminating=F] +custom rotate, [writer=WRITER_ASCII, fname=test2-11-03-07_10.00.05.log, path=test2, open=1299492005.0, close=1299495595.0, terminating=F] +custom rotate, [writer=WRITER_ASCII, fname=test2-11-03-07_10.59.55.log, path=test2, open=1299495595.0, close=1299495605.0, terminating=F] +custom rotate, [writer=WRITER_ASCII, fname=test2-11-03-07_11.00.05.log, path=test2, open=1299495605.0, close=1299499195.0, terminating=F] +custom rotate, [writer=WRITER_ASCII, fname=test2-11-03-07_11.59.55.log, path=test2, open=1299499195.0, close=1299499205.0, terminating=F] +custom rotate, [writer=WRITER_ASCII, fname=test2-11-03-07_12.00.05.log, path=test2, open=1299499205.0, close=1299502795.0, terminating=F] +custom rotate, [writer=WRITER_ASCII, fname=test2-11-03-07_12.59.55.log, path=test2, open=1299502795.0, close=1299502795.0, terminating=T] # t id.orig_h id.orig_p id.resp_h id.resp_p 1299466805.000000 10.0.0.1 20 10.0.0.2 1024 1299470395.000000 10.0.0.2 20 10.0.0.3 0 @@ -49,16 +49,16 @@ 1299499195.000000 10.0.0.2 20 10.0.0.3 8 1299499205.000000 10.0.0.1 20 10.0.0.2 1033 1299502795.000000 10.0.0.2 20 10.0.0.3 9 -> test-11-03-07_03.00.05.log -> test-11-03-07_04.00.05.log -> test-11-03-07_05.00.05.log -> test-11-03-07_06.00.05.log -> test-11-03-07_07.00.05.log -> test-11-03-07_08.00.05.log -> test-11-03-07_09.00.05.log -> test-11-03-07_10.00.05.log -> test-11-03-07_11.00.05.log -> test-11-03-07_12.00.05.log +> test.2011-03-07-03-00-05.log +> test.2011-03-07-04-00-05.log +> test.2011-03-07-05-00-05.log +> test.2011-03-07-06-00-05.log +> test.2011-03-07-07-00-05.log +> test.2011-03-07-08-00-05.log +> test.2011-03-07-09-00-05.log +> test.2011-03-07-10-00-05.log +> test.2011-03-07-11-00-05.log +> test.2011-03-07-12-00-05.log > test.log > test2-11-03-07_03.00.05.log > test2-11-03-07_03.59.55.log diff --git a/testing/btest/Baseline/policy.frameworks.logging.rotate/out b/testing/btest/Baseline/policy.frameworks.logging.rotate/out index cfadfad390..b153c5b7fa 100644 --- a/testing/btest/Baseline/policy.frameworks.logging.rotate/out +++ b/testing/btest/Baseline/policy.frameworks.logging.rotate/out @@ -1,50 +1,50 @@ -test-11-03-07_03.00.05.log test.log 11-03-07_03.00.05 11-03-07_04.00.05 0 -test-11-03-07_04.00.05.log test.log 11-03-07_04.00.05 11-03-07_05.00.05 0 -test-11-03-07_05.00.05.log test.log 11-03-07_05.00.05 11-03-07_06.00.05 0 -test-11-03-07_06.00.05.log test.log 11-03-07_06.00.05 11-03-07_07.00.05 0 -test-11-03-07_07.00.05.log test.log 11-03-07_07.00.05 11-03-07_08.00.05 0 -test-11-03-07_08.00.05.log test.log 11-03-07_08.00.05 11-03-07_09.00.05 0 -test-11-03-07_09.00.05.log test.log 11-03-07_09.00.05 11-03-07_10.00.05 0 -test-11-03-07_10.00.05.log test.log 11-03-07_10.00.05 11-03-07_11.00.05 0 -test-11-03-07_11.00.05.log test.log 11-03-07_11.00.05 11-03-07_12.00.05 0 -test-11-03-07_12.00.05.log test.log 11-03-07_12.00.05 11-03-07_12.59.55 1 -> test-11-03-07_03.00.05.log +test.2011-03-07-03-00-05.log test 11-03-07_03.00.05 11-03-07_04.00.05 0 +test.2011-03-07-04-00-05.log test 11-03-07_04.00.05 11-03-07_05.00.05 0 +test.2011-03-07-05-00-05.log test 11-03-07_05.00.05 11-03-07_06.00.05 0 +test.2011-03-07-06-00-05.log test 11-03-07_06.00.05 11-03-07_07.00.05 0 +test.2011-03-07-07-00-05.log test 11-03-07_07.00.05 11-03-07_08.00.05 0 +test.2011-03-07-08-00-05.log test 11-03-07_08.00.05 11-03-07_09.00.05 0 +test.2011-03-07-09-00-05.log test 11-03-07_09.00.05 11-03-07_10.00.05 0 +test.2011-03-07-10-00-05.log test 11-03-07_10.00.05 11-03-07_11.00.05 0 +test.2011-03-07-11-00-05.log test 11-03-07_11.00.05 11-03-07_12.00.05 0 +test.2011-03-07-12-00-05.log test 11-03-07_12.00.05 11-03-07_12.59.55 1 +> test.2011-03-07-03-00-05.log # t id.orig_h id.orig_p id.resp_h id.resp_p 1299466805.000000 10.0.0.1 20 10.0.0.2 1024 1299470395.000000 10.0.0.2 20 10.0.0.3 0 -> test-11-03-07_04.00.05.log +> test.2011-03-07-04-00-05.log # t id.orig_h id.orig_p id.resp_h id.resp_p 1299470405.000000 10.0.0.1 20 10.0.0.2 1025 1299473995.000000 10.0.0.2 20 10.0.0.3 1 -> test-11-03-07_05.00.05.log +> test.2011-03-07-05-00-05.log # t id.orig_h id.orig_p id.resp_h id.resp_p 1299474005.000000 10.0.0.1 20 10.0.0.2 1026 1299477595.000000 10.0.0.2 20 10.0.0.3 2 -> test-11-03-07_06.00.05.log +> test.2011-03-07-06-00-05.log # t id.orig_h id.orig_p id.resp_h id.resp_p 1299477605.000000 10.0.0.1 20 10.0.0.2 1027 1299481195.000000 10.0.0.2 20 10.0.0.3 3 -> test-11-03-07_07.00.05.log +> test.2011-03-07-07-00-05.log # t id.orig_h id.orig_p id.resp_h id.resp_p 1299481205.000000 10.0.0.1 20 10.0.0.2 1028 1299484795.000000 10.0.0.2 20 10.0.0.3 4 -> test-11-03-07_08.00.05.log +> test.2011-03-07-08-00-05.log # t id.orig_h id.orig_p id.resp_h id.resp_p 1299484805.000000 10.0.0.1 20 10.0.0.2 1029 1299488395.000000 10.0.0.2 20 10.0.0.3 5 -> test-11-03-07_09.00.05.log +> test.2011-03-07-09-00-05.log # t id.orig_h id.orig_p id.resp_h id.resp_p 1299488405.000000 10.0.0.1 20 10.0.0.2 1030 1299491995.000000 10.0.0.2 20 10.0.0.3 6 -> test-11-03-07_10.00.05.log +> test.2011-03-07-10-00-05.log # t id.orig_h id.orig_p id.resp_h id.resp_p 1299492005.000000 10.0.0.1 20 10.0.0.2 1031 1299495595.000000 10.0.0.2 20 10.0.0.3 7 -> test-11-03-07_11.00.05.log +> test.2011-03-07-11-00-05.log # t id.orig_h id.orig_p id.resp_h id.resp_p 1299495605.000000 10.0.0.1 20 10.0.0.2 1032 1299499195.000000 10.0.0.2 20 10.0.0.3 8 -> test-11-03-07_12.00.05.log +> test.2011-03-07-12-00-05.log # t id.orig_h id.orig_p id.resp_h id.resp_p 1299499205.000000 10.0.0.1 20 10.0.0.2 1033 1299502795.000000 10.0.0.2 20 10.0.0.3 9 diff --git a/testing/btest/Baseline/policy.frameworks.logging.types/ssh.log b/testing/btest/Baseline/policy.frameworks.logging.types/ssh.log index 02e5b69579..5666db73c6 100644 Binary files a/testing/btest/Baseline/policy.frameworks.logging.types/ssh.log and b/testing/btest/Baseline/policy.frameworks.logging.types/ssh.log differ diff --git a/testing/btest/Baseline/policy.frameworks.metrics.basic-cluster/manager-1.metrics.log b/testing/btest/Baseline/policy.frameworks.metrics.basic-cluster/manager-1.metrics.log new file mode 100644 index 0000000000..ff692027b2 --- /dev/null +++ b/testing/btest/Baseline/policy.frameworks.metrics.basic-cluster/manager-1.metrics.log @@ -0,0 +1,4 @@ +# ts metric_id filter_name index.host index.str index.network value +1313429477.091485 TEST_METRIC foo-bar 6.5.4.3 - - 4 +1313429477.091485 TEST_METRIC foo-bar 1.2.3.4 - - 6 +1313429477.091485 TEST_METRIC foo-bar 7.2.1.5 - - 2 diff --git a/testing/btest/Baseline/policy.frameworks.metrics.basic/metrics.log b/testing/btest/Baseline/policy.frameworks.metrics.basic/metrics.log new file mode 100644 index 0000000000..fb4a2c4528 --- /dev/null +++ b/testing/btest/Baseline/policy.frameworks.metrics.basic/metrics.log @@ -0,0 +1,4 @@ +# ts metric_id filter_name index.host index.str index.network value +1313430544.678529 TEST_METRIC foo-bar 6.5.4.3 - - 2 +1313430544.678529 TEST_METRIC foo-bar 1.2.3.4 - - 3 +1313430544.678529 TEST_METRIC foo-bar 7.2.1.5 - - 1 diff --git a/testing/btest/Baseline/policy.frameworks.metrics.notice/notice.log b/testing/btest/Baseline/policy.frameworks.metrics.notice/notice.log new file mode 100644 index 0000000000..112fe69e4b --- /dev/null +++ b/testing/btest/Baseline/policy.frameworks.metrics.notice/notice.log @@ -0,0 +1,4 @@ +# ts uid id.orig_h id.orig_p id.resp_h id.resp_p note msg sub src dst p n peer_descr actions policy_items dropped remote_location.country_code remote_location.region remote_location.city remote_location.latitude remote_location.longitude metric_index.host metric_index.str metric_index.network +1313432466.662314 - - - - - Test_Notice Metrics threshold crossed by 6.5.4.3 2/1 - 6.5.4.3 - - 2 bro Notice::ACTION_LOG 4 - - - - - - 6.5.4.3 - - +1313432466.662314 - - - - - Test_Notice Metrics threshold crossed by 1.2.3.4 3/1 - 1.2.3.4 - - 3 bro Notice::ACTION_LOG 4 - - - - - - 1.2.3.4 - - +1313432466.662314 - - - - - Test_Notice Metrics threshold crossed by 7.2.1.5 1/1 - 7.2.1.5 - - 1 bro Notice::ACTION_LOG 4 - - - - - - 7.2.1.5 - - diff --git a/testing/btest/Baseline/policy.misc.bare-loaded-scripts/canonified_loaded_scripts.log b/testing/btest/Baseline/policy.misc.bare-loaded-scripts/canonified_loaded_scripts.log new file mode 100644 index 0000000000..7d40d728da --- /dev/null +++ b/testing/btest/Baseline/policy.misc.bare-loaded-scripts/canonified_loaded_scripts.log @@ -0,0 +1,13 @@ +# depth name +0 scripts/base/init-bare.bro +1 build/src/const.bif.bro +1 build/src/types.bif.bro +1 build/src/strings.bif.bro +1 build/src/bro.bif.bro +1 build/src/reporter.bif.bro +1 build/src/event.bif.bro +1 scripts/base/frameworks/logging/__load__.bro +2 scripts/base/frameworks/logging/./main.bro +3 build/src/logging.bif.bro +2 scripts/base/frameworks/logging/./writers/ascii.bro +0 scripts/policy/misc/loaded-scripts.bro diff --git a/testing/btest/Baseline/policy.misc.check-test-all/output b/testing/btest/Baseline/policy.misc.check-test-all-policy/output similarity index 100% rename from testing/btest/Baseline/policy.misc.check-test-all/output rename to testing/btest/Baseline/policy.misc.check-test-all-policy/output diff --git a/testing/btest/Baseline/policy.misc.default-loaded-scripts/canonified_loaded_scripts.log b/testing/btest/Baseline/policy.misc.default-loaded-scripts/canonified_loaded_scripts.log index 73ef868313..825be4fbb5 100644 --- a/testing/btest/Baseline/policy.misc.default-loaded-scripts/canonified_loaded_scripts.log +++ b/testing/btest/Baseline/policy.misc.default-loaded-scripts/canonified_loaded_scripts.log @@ -27,6 +27,8 @@ 2 scripts/base/frameworks/notice/./actions/drop.bro 2 scripts/base/frameworks/notice/./actions/email_admin.bro 2 scripts/base/frameworks/notice/./actions/page.bro +2 scripts/base/frameworks/notice/./actions/add-geodata.bro +2 scripts/base/frameworks/notice/./extend-email/hostnames.bro 1 scripts/base/frameworks/dpd/__load__.bro 2 scripts/base/frameworks/dpd/./main.bro 1 scripts/base/frameworks/signatures/__load__.bro @@ -41,6 +43,7 @@ 1 scripts/base/frameworks/metrics/__load__.bro 2 scripts/base/frameworks/metrics/./main.bro 1 scripts/base/frameworks/communication/__load__.bro +2 scripts/base/frameworks/communication/./main.bro 1 scripts/base/frameworks/control/__load__.bro 2 scripts/base/frameworks/control/./main.bro 1 scripts/base/frameworks/cluster/__load__.bro @@ -69,6 +72,8 @@ 2 scripts/base/protocols/irc/./dcc-send.bro 1 scripts/base/protocols/smtp/__load__.bro 2 scripts/base/protocols/smtp/./main.bro +2 scripts/base/protocols/smtp/./entities.bro +2 scripts/base/protocols/smtp/./entities-excerpt.bro 1 scripts/base/protocols/ssh/__load__.bro 2 scripts/base/protocols/ssh/./main.bro 1 scripts/base/protocols/ssl/__load__.bro diff --git a/testing/btest/Baseline/policy.protocols.smtp.basic/smtp.log b/testing/btest/Baseline/policy.protocols.smtp.basic/smtp.log new file mode 100644 index 0000000000..ea638d1892 --- /dev/null +++ b/testing/btest/Baseline/policy.protocols.smtp.basic/smtp.log @@ -0,0 +1,2 @@ +# ts uid id.orig_h id.orig_p id.resp_h id.resp_p mid helo mailfrom rcptto date from to reply_to msg_id in_reply_to subject x_originating_ip first_received second_received last_reply path user_agent +1254722768.219663 56gKBmhBBB6 10.10.1.4 1470 74.53.140.153 25 @50da4BEzauh GP Mon, 5 Oct 2009 11:36:07 +0530 "Gurpartap Singh" - <000301ca4581$ef9e57f0$cedb07d0$@in> - SMTP - - - 250 OK id=1Mugho-0003Dg-Un 74.53.140.153,10.10.1.4 Microsoft Office Outlook 12.0 diff --git a/testing/btest/Baseline/policy.protocols.smtp.mime-extract/smtp-entity_10.10.1.4:1470-74.53.140.153:25_1.dat b/testing/btest/Baseline/policy.protocols.smtp.mime-extract/smtp-entity_10.10.1.4:1470-74.53.140.153:25_1.dat new file mode 100644 index 0000000000..f4dd7d22f4 --- /dev/null +++ b/testing/btest/Baseline/policy.protocols.smtp.mime-extract/smtp-entity_10.10.1.4:1470-74.53.140.153:25_1.dat @@ -0,0 +1,13 @@ +Hello + + + +I send u smtp pcap file + +Find the attachment + + + +GPS + + diff --git a/testing/btest/Baseline/policy.protocols.smtp.mime-extract/smtp-entity_10.10.1.4:1470-74.53.140.153:25_2.dat b/testing/btest/Baseline/policy.protocols.smtp.mime-extract/smtp-entity_10.10.1.4:1470-74.53.140.153:25_2.dat new file mode 100644 index 0000000000..9eb3055735 --- /dev/null +++ b/testing/btest/Baseline/policy.protocols.smtp.mime-extract/smtp-entity_10.10.1.4:1470-74.53.140.153:25_2.dat @@ -0,0 +1,264 @@ +Version 4.9.9.1 +* Many bug fixes +* Improved editor + +Version 4.9.9.0 +* Support for latest Mingw compiler system builds +* Bug fixes + +Version 4.9.8.9 +* New code tooltip display +* Improved Indent/Unindent and Remove Comment +* Improved automatic indent +* Added support for the "interface" keyword +* WebUpdate should now report installation problems from PackMan +* New splash screen and association icons +* Improved installer +* Many bug fixes + +Version 4.9.8.7 +* Added support for GCC > 3.2 +* Debug variables are now resent during next debug session +* Watched Variables not in correct context are now kept and updated when it is needed +* Added new compiler/linker options: 20 + - Strip executable + - Generate instructions for a specific machine (i386, i486, i586, i686, pentium, pentium-mmx, pentiumpro, pentium2, pentium3, pentium4, 20 + k6, k6-2, k6-3, athlon, athlon-tbird, athlon-4, athlon-xp, athlon-mp, winchip-c6, winchip2, k8, c3 and c3-2) + - Enable use of processor specific built-in functions (mmmx, sse, sse2, pni, 3dnow) +* "Default" button in Compiler Options is back +* Error messages parsing improved +* Bug fixes + +Version 4.9.8.5 +* Added the possibility to modify the value of a variable during debugging (right click on a watch variable and select "Modify value") +* During Dev-C++ First Time COnfiguration window, users can now choose between using or not class browser and code completion features. +* Many bug fixes + +Version 4.9.8.4 +* Added the possibility to specify an include directory for the code completion cache to be created at Dev-C++ first startup +* Improved code completion cache +* WebUpdate will now backup downloaded DevPaks in Dev-C++\Packages directory, and Dev-C++ executable in devcpp.exe.BACKUP +* Big speed up in function parameters listing while editing +* Bug fixes + +Version 4.9.8.3 +* On Dev-C++ first time configuration dialog, a code completion cache of all the standard 20 + include files can now be generated. +* Improved WebUpdate module +* Many bug fixes + +Version 4.9.8.2 +* New debug feature for DLLs: attach to a running process +* New project option: Use custom Makefile. 20 +* New WebUpdater module. +* Allow user to specify an alternate configuration file in Environment Options 20 + (still can be overriden by using "-c" command line parameter). +* Lots of bug fixes. + +Version 4.9.8.1 +* When creating a DLL, the created static lib respects now the project-defined output directory + +Version 4.9.8.0 +* Changed position of compiler/linker parameters in Project Options. +* Improved help file +* Bug fixes + +Version 4.9.7.9 +* Resource errors are now reported in the Resource sheet +* Many bug fixes + +Version 4.9.7.8 +* Made whole bottom report control floating instead of only debug output. +* Many bug fixes + +Version 4.9.7.7 +* Printing settings are now saved +* New environment options : "watch variable under mouse" and "Report watch errors" +* Bug fixes + +Version 4.9.7.6 +* Debug variable browser +* Added possibility to include in a Template the Project's directories (include, libs and ressources) +* Changed tint of Class browser pictures colors to match the New Look style +* Bug fixes + +Version 4.9.7.5 +* Bug fixes + +Version 4.9.7.4 +* When compiling with debugging symbols, an extra definition is passed to the + compiler: -D__DEBUG__ +* Each project creates a _private.h file containing version + information definitions +* When compiling the current file only, no dependency checks are performed +* ~300% Speed-up in class parser +* Added "External programs" in Tools/Environment Options (for units "Open with") +* Added "Open with" in project units context menu +* Added "Classes" toolbar +* Fixed pre-compilation dependency checks to work correctly +* Added new file menu entry: Save Project As +* Bug-fix for double quotes in devcpp.cfg file read by vUpdate +* Other bug fixes + +Version 4.9.7.3 +* When adding debugging symbols on request, remove "-s" option from linker +* Compiling progress window +* Environment options : "Show progress window" and "Auto-close progress window" +* Bug fixes + +Version 4.9.7.2 +* Bug fixes + +Version 4.9.7.1 +* "Build priority" per-unit +* "Include file in linking process" per-unit +* New feature: compile current file only +* Separated C++ compiler options from C compiler options in Makefile (see bug report #654744) +* Separated C++ include dirs from C include dirs in Makefile (see bug report #654744) +* Necessary UI changes in Project Options +* Added display of project filename, project output and a summary of the project files in Project Options General tab. +* Fixed the "compiler-dirs-with-spaces" bug that crept-in in 4.9.7.0 +* Multi-select files in project-view (when "double-click to open" is configured in Environment Settings) +* Resource files are treated as ordinary files now +* Updates in "Project Options/Files" code +* MSVC import now creates the folders structure of the original VC project +* Bug fixes + +Version 4.9.7.0 +* Allow customizing of per-unit compile command in projects +* Added two new macros: and +* Added support for macros in the "default source code" (Tools/Editor Options/Code) +* Separated layout info from project file. It is now kept in a different file + (the same filename as the project's but with extension ".layout"). If you + have your project under CVS control, you ''ll know why this had to happen... +* Compiler settings per-project +* Compiler set per-project +* Implemented new compiler settings framework +* "Compile as C++" per-unit +* "Include file in compilation process" per-unit +* Project version info (creates the relevant VERSIONINFO struct in the private + resource) +* Support XP Themes (creates the CommonControls 6.0 manifest file and includes + it in the private resource) +* Added CVS "login" and "logout" commands +* Project manager and debugging window (in Debug tab) can now be trasnformed into floating windows. +* Added "Add Library" button in Project Options +* Bug fixes + +Version 4.9.6.9 +* Implemented search in help files for the word at cursor (context sensitive help) +* Implemented "compiler sets" infrastructure to switch between different compilers easily (e.g. gcc-2.95 and gcc-3.2) +* Added "Files" tab in CVS form to allow selection of more than one file for + the requested CVS action + 20 +Version 4.9.6.8 +* support for DLL application hosting, for debugging and executing DLLs under Dev-C++. +* New class browser option: "Show inherited members" +* Added support for the '::' member access operator in code-completion +* Added *working* function arguments hint +* Added bracket highlighting. When the caret is on a bracket, that bracket and + its counterpart are highlighted +* Nested folders in project view + +Version 4.9.6.7 +* XP Theme support +* Added CVS commands "Add" and "Remove" +* Added configuration option for "Templates Directory" in "Environment Options" +* Code-completion updates +* Bug fixes + +Version 4.9.6.6 +* Editor colors are initialized properly on Dev-C++ first-run +* Added doxygen-style comments in NewClass, NewMemberFunction and NewMemberVariable wizards +* Added file's date/time stamp in File/Properties window +* Current windows listing in Window menu +* Bug fixes + +Version 4.9.6.5 +* CVS support +* Window list (in Window menu) +* bug fixes + +version 4.9.6.4 +* added ENTER key for opening file in project browser, DEL to delete from the project. +* bug fixes + +version 4.9.6.3 +* Bug fixes + +version 4.9.6.2 +* Bug fixes + +version 4.9.6.1 +* New "Abort compilation" button +* Bug fixes +* Now checks for vRoach existance when sending a crash report + +Version 4.9.5.5 +* New option in Editor Options: Show editor hints. User can disable the hints + displayed in the editor when the mouse moves over a word. Since this was the + cause of many errors (although it should be fixed by now), we are giving the + user the option to disable this feature. +* New option in Editor Options (code-completion): Use code-completion cache. + Well, it adds caching to code-completion. Depending on the cache size, + the program may take a bit longer to start-up, but provides very fast + code-completion and the user has all the commands (belonging to the files + he added in the cache) at his fingertips. If, for example, the user adds + "windows.h", he gets all the WinAPI! If he adds "wx/wx.h", he gets all of + wxWindows! You get the picture... +* Removed "Only show classes from current file" option in class browser settings. + It used to be a checkbox, allowing only two states (on or off), but there is + a third relevant option now: "Project classes" so it didn't fit the purpose... + The user can define this in the class browser's context menu under "View mode". +* Fixed the dreaded "Clock skew detected" compiler warning! +* Fixed many class browser bugs, including some that had to do with class folders. + +Version 4.9.5.4 +* Under NT, 2000 and XP, user application data directory will be used to store config files (i.e : C:\Documents and Settings\Username\Local Settings\Application Data) + +Version 4.9.5.3 +* Added ExceptionsAnalyzer. If the devcpp.map file is in the devcpp.exe directory + then we even get a stack trace in the bug report! +* Added new WebUpdate module (inactive temporarily). +* Added new code for code-completion caching of files (disabled - work in progress). + +Version 4.9.5.2 +* Added new option in class-browser: Use colors + (available when right-clicking the class-browser + and selecting "View mode"). +* Dev-C++ now traps access violation of your programs (and of itself too ;) + +Version 4.9.5.1 +* Implemented the "File/Export/Project to HTML" function. +* Added "Tip of the day" system. +* When running a source file in explorer, don't spawn new instance. + Instead open the file in an already launched Dev-C++. +* Class-parser speed-up (50% to 85% improvement timed!!!) +* Many code-completion updates. Now takes into account context, + class inheritance and visibility (shows items only from files + #included directly or indirectly)! +* Caching of result set of code-completion for speed-up. +* New option "Execution/Parameters" (and "Debug/Parameters"). + +Version 4.9.5.0 (5.0 beta 5): +* CPU Window (still in development) +* ToDo list +* Backtrace in debugging +* Run to cursor +* Folders in Project and Class Browser +* Send custom commands to GDB +* Makefile can now be customized. +* Modified the behaviour of the -c param : 20 + -c +* Saving of custom syntax parameter group +* Possibility of changing compilers and tools filename. +* Many bug fixes + + +Version 4.9.4.1 (5.0 beta 4.1): + +* back to gcc 2.95.3 +* Profiling support +* new update/packages checker (vUpdate) +* Lots of bugfixes + diff --git a/testing/btest/Baseline/policy.protocols.smtp.mime-extract/smtp_entities.log b/testing/btest/Baseline/policy.protocols.smtp.mime-extract/smtp_entities.log new file mode 100644 index 0000000000..9496887d65 --- /dev/null +++ b/testing/btest/Baseline/policy.protocols.smtp.mime-extract/smtp_entities.log @@ -0,0 +1,4 @@ +# ts uid id.orig_h id.orig_p id.resp_h id.resp_p mid filename content_len mime_type md5 extraction_file excerpt +1254722770.692743 56gKBmhBBB6 10.10.1.4 1470 74.53.140.153 25 @50da4BEzauh - 79 FAKE_MIME - smtp-entity_10.10.1.4:1470-74.53.140.153:25_1.dat - +1254722770.692743 56gKBmhBBB6 10.10.1.4 1470 74.53.140.153 25 @50da4BEzauh - 1918 FAKE_MIME - - - +1254722770.692804 56gKBmhBBB6 10.10.1.4 1470 74.53.140.153 25 @50da4BEzauh NEWS.txt 10823 FAKE_MIME - smtp-entity_10.10.1.4:1470-74.53.140.153:25_2.dat - diff --git a/testing/btest/Baseline/policy.protocols.smtp.mime/smtp_entities.log b/testing/btest/Baseline/policy.protocols.smtp.mime/smtp_entities.log new file mode 100644 index 0000000000..2b143eacda --- /dev/null +++ b/testing/btest/Baseline/policy.protocols.smtp.mime/smtp_entities.log @@ -0,0 +1,4 @@ +# ts uid id.orig_h id.orig_p id.resp_h id.resp_p mid filename content_len mime_type md5 extraction_file excerpt +1254722770.692743 56gKBmhBBB6 10.10.1.4 1470 74.53.140.153 25 @50da4BEzauh - 79 FAKE_MIME 92bca2e6cdcde73647125da7dccbdd07 - - +1254722770.692743 56gKBmhBBB6 10.10.1.4 1470 74.53.140.153 25 @50da4BEzauh - 1918 FAKE_MIME - - - +1254722770.692804 56gKBmhBBB6 10.10.1.4 1470 74.53.140.153 25 @50da4BEzauh NEWS.txt 10823 FAKE_MIME a968bb0f9f9d95835b2e74c845877e87 - - diff --git a/testing/btest/Traces/smtp.trace b/testing/btest/Traces/smtp.trace new file mode 100644 index 0000000000..931b43b3b8 Binary files /dev/null and b/testing/btest/Traces/smtp.trace differ diff --git a/testing/btest/bifs/piped_exec.bro b/testing/btest/bifs/piped_exec.bro index 4405f0b500..32fd5c5f80 100644 --- a/testing/btest/bifs/piped_exec.bro +++ b/testing/btest/bifs/piped_exec.bro @@ -1,6 +1,12 @@ # @TEST-EXEC: bro %INPUT >output # @TEST-EXEC: btest-diff output +# @TEST-EXEC: btest-diff test.txt + global cmds = "print \"hello world\";"; cmds = string_cat(cmds, "\nprint \"foobar\";"); piped_exec("bro", cmds); + +# Test null output. +piped_exec("cat > test.txt", "\x00\x00hello\x00\x00"); + diff --git a/testing/btest/core/leaks.bro b/testing/btest/core/leaks.bro index 04ef8d8ef2..6e605372c9 100644 --- a/testing/btest/core/leaks.bro +++ b/testing/btest/core/leaks.bro @@ -2,4 +2,4 @@ # # @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks # -# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local bro -m -r $TRACES/wikipedia.trace test-all +# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local bro -m -r $TRACES/wikipedia.trace test-all-policy diff --git a/testing/btest/core/reporter-fmt-strings.bro b/testing/btest/core/reporter-fmt-strings.bro new file mode 100644 index 0000000000..0e0be77844 --- /dev/null +++ b/testing/btest/core/reporter-fmt-strings.bro @@ -0,0 +1,10 @@ +# The format string below should end up as a literal part of the reporter's +# error message to stderr and shouldn't be replaced internally. +# +# @TEST-EXEC-FAIL: bro %INPUT >output 2>&1 +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-remove-abspath btest-diff output + +event bro_init() +{ + event dont_interpret_this("%s"); +} diff --git a/testing/btest/istate/broccoli.bro b/testing/btest/istate/broccoli.bro index 19b2bdf23f..7f97f40585 100644 --- a/testing/btest/istate/broccoli.bro +++ b/testing/btest/istate/broccoli.bro @@ -1,7 +1,7 @@ # @TEST-REQUIRES: grep -vq '#define BROv6' $BUILD/config.h # @TEST-REQUIRES: test -e $BUILD/aux/broccoli/src/libbroccoli.so || test -e $BUILD/aux/broccoli/src/libbroccoli.dylib # -# @TEST-EXEC: ENABLE_COMMUNICATION=1 btest-bg-run bro bro %INPUT $DIST/aux/broccoli/test/broping-record.bro +# @TEST-EXEC: btest-bg-run bro bro %INPUT $DIST/aux/broccoli/test/broping-record.bro # @TEST-EXEC: btest-bg-run broccoli $BUILD/aux/broccoli/test/broping -r -c 3 127.0.0.1 # @TEST-EXEC: btest-bg-wait -k 20 # @TEST-EXEC: cat bro/ping.log | sed 's/one-way.*//g' >bro.log diff --git a/testing/btest/istate/events-ssl.bro b/testing/btest/istate/events-ssl.bro index b72fc477cb..cfacae9da8 100644 --- a/testing/btest/istate/events-ssl.bro +++ b/testing/btest/istate/events-ssl.bro @@ -1,14 +1,14 @@ # -# @TEST-EXEC: ENABLE_COMMUNICATION=1 btest-bg-run sender bro -C -r $TRACES/web.trace --pseudo-realtime ../sender.bro -# @TEST-EXEC: ENABLE_COMMUNICATION=1 btest-bg-run receiver bro ../receiver.bro +# @TEST-EXEC: btest-bg-run sender bro -C -r $TRACES/web.trace --pseudo-realtime ../sender.bro +# @TEST-EXEC: btest-bg-run receiver bro ../receiver.bro # @TEST-EXEC: btest-bg-wait -k 20 # # @TEST-EXEC: btest-diff sender/http.log # @TEST-EXEC: btest-diff receiver/http.log # @TEST-EXEC: cmp sender/http.log receiver/http.log # -# @TEST-EXEC: ENABLE_COMMUNICATION=1 bro -x sender/events.bst http/base | sed 's/^Event \[[-0-9.]*\] //g' | grep '^http_' | grep -v http_stats | sed 's/(.*$//g' >events.snd.log -# @TEST-EXEC: ENABLE_COMMUNICATION=1 bro -x receiver/events.bst http/base | sed 's/^Event \[[-0-9.]*\] //g' | grep '^http_' | grep -v http_stats | sed 's/(.*$//g' >events.rec.log +# @TEST-EXEC: bro -x sender/events.bst http/base | sed 's/^Event \[[-0-9.]*\] //g' | grep '^http_' | grep -v http_stats | sed 's/(.*$//g' >events.snd.log +# @TEST-EXEC: bro -x receiver/events.bst http/base | sed 's/^Event \[[-0-9.]*\] //g' | grep '^http_' | grep -v http_stats | sed 's/(.*$//g' >events.rec.log # @TEST-EXEC: cmp events.rec.log events.snd.log # # We don't compare the transmitted event paramerters anymore. With the dynamic diff --git a/testing/btest/istate/events.bro b/testing/btest/istate/events.bro index 36b245db58..ecf2f2e2ad 100644 --- a/testing/btest/istate/events.bro +++ b/testing/btest/istate/events.bro @@ -1,14 +1,14 @@ # -# @TEST-EXEC: ENABLE_COMMUNICATION=1 btest-bg-run sender bro -C -r $TRACES/web.trace --pseudo-realtime ../sender.bro -# @TEST-EXEC: ENABLE_COMMUNICATION=1 btest-bg-run receiver bro ../receiver.bro +# @TEST-EXEC: btest-bg-run sender bro -C -r $TRACES/web.trace --pseudo-realtime ../sender.bro +# @TEST-EXEC: btest-bg-run receiver bro ../receiver.bro # @TEST-EXEC: btest-bg-wait -k 20 # # @TEST-EXEC: btest-diff sender/http.log # @TEST-EXEC: btest-diff receiver/http.log # @TEST-EXEC: cmp sender/http.log receiver/http.log # -# @TEST-EXEC: ENABLE_COMMUNICATION=1 bro -x sender/events.bst | sed 's/^Event \[[-0-9.]*\] //g' | grep '^http_' | grep -v http_stats | sed 's/(.*$//g' >events.snd.log -# @TEST-EXEC: ENABLE_COMMUNICATION=1 bro -x receiver/events.bst | sed 's/^Event \[[-0-9.]*\] //g' | grep '^http_' | grep -v http_stats | sed 's/(.*$//g' >events.rec.log +# @TEST-EXEC: bro -x sender/events.bst | sed 's/^Event \[[-0-9.]*\] //g' | grep '^http_' | grep -v http_stats | sed 's/(.*$//g' >events.snd.log +# @TEST-EXEC: bro -x receiver/events.bst | sed 's/^Event \[[-0-9.]*\] //g' | grep '^http_' | grep -v http_stats | sed 's/(.*$//g' >events.rec.log # @TEST-EXEC: cmp events.rec.log events.snd.log # # We don't compare the transmitted event paramerters anymore. With the dynamic diff --git a/testing/btest/istate/pybroccoli.py b/testing/btest/istate/pybroccoli.py index 829797080e..b7fb53a955 100644 --- a/testing/btest/istate/pybroccoli.py +++ b/testing/btest/istate/pybroccoli.py @@ -2,7 +2,7 @@ # @TEST-REQUIRES: test -e $BUILD/aux/broccoli/src/libbroccoli.so || test -e $BUILD/aux/broccoli/src/libbroccoli.dylib # @TEST-REQUIRES: test -e $BUILD/aux/broccoli/bindings/broccoli-python/_broccoli_intern.so # -# @TEST-EXEC: ENABLE_COMMUNICATION=1 btest-bg-run bro bro %INPUT $DIST/aux/broccoli/bindings/broccoli-python/tests/test.bro +# @TEST-EXEC: btest-bg-run bro bro %INPUT $DIST/aux/broccoli/bindings/broccoli-python/tests/test.bro # @TEST-EXEC: btest-bg-run python PYTHONPATH=$DIST/aux/broccoli/bindings/broccoli-python/:$BUILD/aux/broccoli/bindings/broccoli-python python $DIST/aux/broccoli/bindings/broccoli-python/tests/test.py # @TEST-EXEC: btest-bg-wait -k 20 # @TEST-EXEC: btest-diff bro/.stdout diff --git a/testing/btest/istate/sync.bro b/testing/btest/istate/sync.bro index ca3f936db7..fcd4c0cfb3 100644 --- a/testing/btest/istate/sync.bro +++ b/testing/btest/istate/sync.bro @@ -1,6 +1,6 @@ # -# @TEST-EXEC: ENABLE_COMMUNICATION=1 btest-bg-run sender bro %INPUT ../sender.bro -# @TEST-EXEC: ENABLE_COMMUNICATION=1 btest-bg-run receiver bro %INPUT ../receiver.bro +# @TEST-EXEC: btest-bg-run sender bro %INPUT ../sender.bro +# @TEST-EXEC: btest-bg-run receiver bro %INPUT ../receiver.bro # @TEST-EXEC: btest-bg-wait 20 # # @TEST-EXEC: btest-diff sender/vars.log diff --git a/testing/btest/language/record-extension.bro b/testing/btest/language/record-extension.bro index c05b9da5e8..21b704ca7a 100644 --- a/testing/btest/language/record-extension.bro +++ b/testing/btest/language/record-extension.bro @@ -4,11 +4,13 @@ type Foo: record { a: count; b: count &optional; + myset: set[count] &default=set(); }; redef record Foo += { c: count &default=42; d: count &optional; + anotherset: set[count] &default=set(); }; global f1: Foo = [$a=21]; diff --git a/testing/btest/language/vector-list-init-records.bro b/testing/btest/language/vector-list-init-records.bro new file mode 100644 index 0000000000..ee2b78c4a5 --- /dev/null +++ b/testing/btest/language/vector-list-init-records.bro @@ -0,0 +1,20 @@ +# Initializing a vector with a list of records should promote elements as +# necessary to match the vector's yield type. + +# @TEST-EXEC: bro %INPUT >output +# @TEST-EXEC: btest-diff output + +type Foo: record { + s: string; + o: string &optional; +}; + +const v: vector of Foo = { + [$s="bar", $o="check"], + [$s="baz"] +}; + +for ( i in v ) + print fmt("element %d = %s", i, v[i]); + +print v; diff --git a/testing/btest/policy/frameworks/cluster/start-it-up.bro b/testing/btest/policy/frameworks/cluster/start-it-up.bro index 0e58b662c3..d1eb94d5e1 100644 --- a/testing/btest/policy/frameworks/cluster/start-it-up.bro +++ b/testing/btest/policy/frameworks/cluster/start-it-up.bro @@ -1,8 +1,8 @@ -# @TEST-EXEC: btest-bg-run manager-1 BROPATH=$BROPATH:.. ENABLE_COMMUNICATION=1 CLUSTER_NODE=manager-1 bro %INPUT -# @TEST-EXEC: btest-bg-run proxy-1 BROPATH=$BROPATH:.. ENABLE_COMMUNICATION=1 CLUSTER_NODE=proxy-1 bro %INPUT -# @TEST-EXEC: btest-bg-run proxy-2 BROPATH=$BROPATH:.. ENABLE_COMMUNICATION=1 CLUSTER_NODE=proxy-2 bro %INPUT -# @TEST-EXEC: btest-bg-run worker-1 BROPATH=$BROPATH:.. ENABLE_COMMUNICATION=1 CLUSTER_NODE=worker-1 bro %INPUT -# @TEST-EXEC: btest-bg-run worker-2 BROPATH=$BROPATH:.. ENABLE_COMMUNICATION=1 CLUSTER_NODE=worker-2 bro %INPUT +# @TEST-EXEC: btest-bg-run manager-1 BROPATH=$BROPATH:.. CLUSTER_NODE=manager-1 bro %INPUT +# @TEST-EXEC: btest-bg-run proxy-1 BROPATH=$BROPATH:.. CLUSTER_NODE=proxy-1 bro %INPUT +# @TEST-EXEC: btest-bg-run proxy-2 BROPATH=$BROPATH:.. CLUSTER_NODE=proxy-2 bro %INPUT +# @TEST-EXEC: btest-bg-run worker-1 BROPATH=$BROPATH:.. CLUSTER_NODE=worker-1 bro %INPUT +# @TEST-EXEC: btest-bg-run worker-2 BROPATH=$BROPATH:.. CLUSTER_NODE=worker-2 bro %INPUT # @TEST-EXEC: btest-bg-wait -k 2 # @TEST-EXEC: btest-diff manager-1/.stdout # @TEST-EXEC: btest-diff proxy-1/.stdout diff --git a/testing/btest/policy/frameworks/control/configuration_update.bro b/testing/btest/policy/frameworks/control/configuration_update.bro index 919ba72ee3..23b4998a1b 100644 --- a/testing/btest/policy/frameworks/control/configuration_update.bro +++ b/testing/btest/policy/frameworks/control/configuration_update.bro @@ -1,6 +1,6 @@ -# @TEST-EXEC: btest-bg-run controllee BROPATH=$BROPATH:.. ENABLE_COMMUNICATION=1 bro %INPUT frameworks/control/controllee Communication::listen_port_clear=65531/tcp -# @TEST-EXEC: btest-bg-run controller BROPATH=$BROPATH:.. ENABLE_COMMUNICATION=1 bro %INPUT test-redef frameworks/control/controller Control::host=127.0.0.1 Control::host_port=65531/tcp Control::cmd=configuration_update -# @TEST-EXEC: btest-bg-run controller2 BROPATH=$BROPATH:.. ENABLE_COMMUNICATION=1 bro %INPUT frameworks/control/controller Control::host=127.0.0.1 Control::host_port=65531/tcp Control::cmd=shutdown +# @TEST-EXEC: btest-bg-run controllee BROPATH=$BROPATH:.. bro %INPUT frameworks/control/controllee Communication::listen_port_clear=65531/tcp +# @TEST-EXEC: btest-bg-run controller BROPATH=$BROPATH:.. bro %INPUT test-redef frameworks/control/controller Control::host=127.0.0.1 Control::host_port=65531/tcp Control::cmd=configuration_update +# @TEST-EXEC: btest-bg-run controller2 BROPATH=$BROPATH:.. bro %INPUT frameworks/control/controller Control::host=127.0.0.1 Control::host_port=65531/tcp Control::cmd=shutdown # @TEST-EXEC: btest-bg-wait 1 # @TEST-EXEC: btest-diff controllee/.stdout diff --git a/testing/btest/policy/frameworks/control/id_value.bro b/testing/btest/policy/frameworks/control/id_value.bro index 850cb1dd73..9f0cb76861 100644 --- a/testing/btest/policy/frameworks/control/id_value.bro +++ b/testing/btest/policy/frameworks/control/id_value.bro @@ -1,5 +1,5 @@ -# @TEST-EXEC: btest-bg-run controllee BROPATH=$BROPATH:.. ENABLE_COMMUNICATION=1 bro %INPUT only-for-controllee frameworks/control/controllee Communication::listen_port_clear=65532/tcp -# @TEST-EXEC: btest-bg-run controller BROPATH=$BROPATH:.. ENABLE_COMMUNICATION=1 bro %INPUT frameworks/control/controller Control::host=127.0.0.1 Control::host_port=65532/tcp Control::cmd=id_value Control::arg=test_var +# @TEST-EXEC: btest-bg-run controllee BROPATH=$BROPATH:.. bro %INPUT only-for-controllee frameworks/control/controllee Communication::listen_port_clear=65532/tcp +# @TEST-EXEC: btest-bg-run controller BROPATH=$BROPATH:.. bro %INPUT frameworks/control/controller Control::host=127.0.0.1 Control::host_port=65532/tcp Control::cmd=id_value Control::arg=test_var # @TEST-EXEC: btest-bg-wait -k 1 # @TEST-EXEC: btest-diff controller/.stdout diff --git a/testing/btest/policy/frameworks/control/shutdown.bro b/testing/btest/policy/frameworks/control/shutdown.bro index f527af6ecb..55af973faa 100644 --- a/testing/btest/policy/frameworks/control/shutdown.bro +++ b/testing/btest/policy/frameworks/control/shutdown.bro @@ -1,5 +1,5 @@ -# @TEST-EXEC: btest-bg-run controllee BROPATH=$BROPATH:.. ENABLE_COMMUNICATION=1 bro %INPUT frameworks/control/controllee Communication::listen_port_clear=65530/tcp -# @TEST-EXEC: btest-bg-run controller BROPATH=$BROPATH:.. ENABLE_COMMUNICATION=1 bro %INPUT frameworks/control/controller Control::host=127.0.0.1 Control::host_port=65530/tcp Control::cmd=shutdown +# @TEST-EXEC: btest-bg-run controllee BROPATH=$BROPATH:.. bro %INPUT frameworks/control/controllee Communication::listen_port_clear=65530/tcp +# @TEST-EXEC: btest-bg-run controller BROPATH=$BROPATH:.. bro %INPUT frameworks/control/controller Control::host=127.0.0.1 Control::host_port=65530/tcp Control::cmd=shutdown # @TEST-EXEC: btest-bg-wait 1 redef Communication::nodes = { diff --git a/testing/btest/policy/frameworks/logging/path-func.bro b/testing/btest/policy/frameworks/logging/path-func.bro index 79d96e1431..ade6aedbc9 100644 --- a/testing/btest/policy/frameworks/logging/path-func.bro +++ b/testing/btest/policy/frameworks/logging/path-func.bro @@ -21,11 +21,11 @@ export { global c = -1; -function path_func(id: Log::ID, path: string) : string +function path_func(id: Log::ID, path: string, rec: Log) : string { c = (c + 1) % 3; - return fmt("%s-%d", path, c); + return fmt("%s-%d-%s", path, c, rec$country); } event bro_init() diff --git a/testing/btest/policy/frameworks/logging/remote-types.bro b/testing/btest/policy/frameworks/logging/remote-types.bro index e36754bd7d..1e60ce70af 100644 --- a/testing/btest/policy/frameworks/logging/remote-types.bro +++ b/testing/btest/policy/frameworks/logging/remote-types.bro @@ -1,6 +1,6 @@ # -# @TEST-EXEC: ENABLE_COMMUNICATION=1 btest-bg-run sender bro --pseudo-realtime %INPUT ../sender.bro -# @TEST-EXEC: ENABLE_COMMUNICATION=1 btest-bg-run receiver bro --pseudo-realtime %INPUT ../receiver.bro +# @TEST-EXEC: btest-bg-run sender bro --pseudo-realtime %INPUT ../sender.bro +# @TEST-EXEC: btest-bg-run receiver bro --pseudo-realtime %INPUT ../receiver.bro # @TEST-EXEC: btest-bg-wait -k 1 # @TEST-EXEC: btest-diff receiver/test.log # @TEST-EXEC: cmp receiver/test.log sender/test.log diff --git a/testing/btest/policy/frameworks/logging/remote.bro b/testing/btest/policy/frameworks/logging/remote.bro index 1fc459e833..00d7d69463 100644 --- a/testing/btest/policy/frameworks/logging/remote.bro +++ b/testing/btest/policy/frameworks/logging/remote.bro @@ -1,7 +1,7 @@ # -# @TEST-EXEC: ENABLE_COMMUNICATION=1 btest-bg-run sender bro --pseudo-realtime %INPUT ../sender.bro +# @TEST-EXEC: btest-bg-run sender bro --pseudo-realtime %INPUT ../sender.bro # @TEST-EXEC: sleep 1 -# @TEST-EXEC: ENABLE_COMMUNICATION=1 btest-bg-run receiver bro --pseudo-realtime %INPUT ../receiver.bro +# @TEST-EXEC: btest-bg-run receiver bro --pseudo-realtime %INPUT ../receiver.bro # @TEST-EXEC: sleep 1 # @TEST-EXEC: btest-bg-wait -k 1 # @TEST-EXEC: btest-diff sender/test.log diff --git a/testing/btest/policy/frameworks/logging/rotate-custom.bro b/testing/btest/policy/frameworks/logging/rotate-custom.bro index 9f5960a09a..788fa090e9 100644 --- a/testing/btest/policy/frameworks/logging/rotate-custom.bro +++ b/testing/btest/policy/frameworks/logging/rotate-custom.bro @@ -1,5 +1,5 @@ # -# @TEST-EXEC: bro -b -r %DIR/rotation.trace %INPUT | egrep "test|test2" | sort >out +# @TEST-EXEC: bro -b -r %DIR/rotation.trace %INPUT 2>&1 | egrep "test|test2" | sort >out # @TEST-EXEC: for i in `ls test*.log | sort`; do printf '> %s\n' $i; cat $i; done | sort | uniq >>out # @TEST-EXEC: btest-diff out @@ -18,10 +18,16 @@ export { } redef Log::default_rotation_interval = 1hr; -redef Log::default_rotation_postprocessor = "echo 1st"; +redef Log::default_rotation_postprocessor_cmd = "echo 1st"; + +function custom_rotate(info: Log::RotationInfo) : bool +{ + print "custom rotate", info; + return T; +} redef Log::rotation_control += { - [Log::WRITER_ASCII, "test2"] = [$interv=30mins, $postprocessor="echo 2nd"] + [Log::WRITER_ASCII, "test2"] = [$interv=30mins, $postprocessor=custom_rotate] }; event bro_init() diff --git a/testing/btest/policy/frameworks/logging/rotate.bro b/testing/btest/policy/frameworks/logging/rotate.bro index 0179a0bbe2..d53b92f169 100644 --- a/testing/btest/policy/frameworks/logging/rotate.bro +++ b/testing/btest/policy/frameworks/logging/rotate.bro @@ -1,6 +1,6 @@ # -# @TEST-EXEC: bro -r %DIR/rotation.trace %INPUT | grep "test" >out -# @TEST-EXEC: for i in test-*.log; do printf '> %s\n' $i; cat $i; done >>out +# @TEST-EXEC: bro -r %DIR/rotation.trace %INPUT 2>&1 | grep "test" >out +# @TEST-EXEC: for i in test.*.log; do printf '> %s\n' $i; cat $i; done >>out # @TEST-EXEC: btest-diff out module Test; @@ -18,7 +18,7 @@ export { } redef Log::default_rotation_interval = 1hr; -redef Log::default_rotation_postprocessor = "echo"; +redef Log::default_rotation_postprocessor_cmd = "echo"; event bro_init() { diff --git a/testing/btest/policy/frameworks/logging/types.bro b/testing/btest/policy/frameworks/logging/types.bro index 21cfd1fa70..2c4d52f34e 100644 --- a/testing/btest/policy/frameworks/logging/types.bro +++ b/testing/btest/policy/frameworks/logging/types.bro @@ -29,9 +29,18 @@ export { se: set[string]; vc: vector of count; ve: vector of string; + f: function(i: count) : string; } &log; } +function foo(i : count) : string + { + if ( i > 0 ) + return "Foo"; + else + return "Bar"; + } + event bro_init() { Log::create_stream(SSH, [$columns=Log]); @@ -56,7 +65,8 @@ event bro_init() $ss=set("AA", "BB", "CC"), $se=empty_set, $vc=vector(10, 20, 30), - $ve=empty_vector + $ve=empty_vector, + $f=foo ]); } diff --git a/testing/btest/policy/frameworks/metrics/basic-cluster.bro b/testing/btest/policy/frameworks/metrics/basic-cluster.bro new file mode 100644 index 0000000000..eda41c3759 --- /dev/null +++ b/testing/btest/policy/frameworks/metrics/basic-cluster.bro @@ -0,0 +1,38 @@ +# @TEST-EXEC: btest-bg-run manager-1 BROPATH=$BROPATH:.. CLUSTER_NODE=manager-1 bro %INPUT +# @TEST-EXEC: btest-bg-run proxy-1 BROPATH=$BROPATH:.. CLUSTER_NODE=proxy-1 bro %INPUT +# @TEST-EXEC: sleep 1 +# @TEST-EXEC: btest-bg-run worker-1 BROPATH=$BROPATH:.. CLUSTER_NODE=worker-1 bro %INPUT +# @TEST-EXEC: btest-bg-run worker-2 BROPATH=$BROPATH:.. CLUSTER_NODE=worker-2 bro %INPUT +# @TEST-EXEC: btest-bg-wait -k 6 +# @TEST-EXEC: btest-diff manager-1/metrics.log + +@TEST-START-FILE cluster-layout.bro +redef Cluster::nodes = { + ["manager-1"] = [$node_type=Cluster::MANAGER, $ip=127.0.0.1, $p=37757/tcp, $workers=set("worker-1")], + ["proxy-1"] = [$node_type=Cluster::PROXY, $ip=127.0.0.1, $p=37758/tcp, $manager="manager-1", $workers=set("worker-1")], + ["worker-1"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=37760/tcp, $manager="manager-1", $proxy="proxy-1", $interface="eth0"], + ["worker-2"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=37761/tcp, $manager="manager-1", $proxy="proxy-1", $interface="eth1"], +}; +@TEST-END-FILE + +redef enum Metrics::ID += { + TEST_METRIC, +}; + +event bro_init() &priority=5 + { + Metrics::add_filter(TEST_METRIC, + [$name="foo-bar", + $break_interval=3secs]); + } + +@if ( Cluster::local_node_type() == Cluster::WORKER ) + +event bro_init() + { + Metrics::add_data(TEST_METRIC, [$host=1.2.3.4], 3); + Metrics::add_data(TEST_METRIC, [$host=6.5.4.3], 2); + Metrics::add_data(TEST_METRIC, [$host=7.2.1.5], 1); + } + +@endif \ No newline at end of file diff --git a/testing/btest/policy/frameworks/metrics/basic.bro b/testing/btest/policy/frameworks/metrics/basic.bro new file mode 100644 index 0000000000..43e7ac28ef --- /dev/null +++ b/testing/btest/policy/frameworks/metrics/basic.bro @@ -0,0 +1,16 @@ +# @TEST-EXEC: bro %INPUT +# @TEST-EXEC: btest-diff metrics.log + +redef enum Metrics::ID += { + TEST_METRIC, +}; + +event bro_init() &priority=5 + { + Metrics::add_filter(TEST_METRIC, + [$name="foo-bar", + $break_interval=3secs]); + Metrics::add_data(TEST_METRIC, [$host=1.2.3.4], 3); + Metrics::add_data(TEST_METRIC, [$host=6.5.4.3], 2); + Metrics::add_data(TEST_METRIC, [$host=7.2.1.5], 1); + } diff --git a/testing/btest/policy/frameworks/metrics/notice.bro b/testing/btest/policy/frameworks/metrics/notice.bro new file mode 100644 index 0000000000..3451af18f4 --- /dev/null +++ b/testing/btest/policy/frameworks/metrics/notice.bro @@ -0,0 +1,23 @@ +# @TEST-EXEC: bro %INPUT +# @TEST-EXEC: btest-diff notice.log + +redef enum Notice::Type += { + Test_Notice, +}; + +redef enum Metrics::ID += { + TEST_METRIC, +}; + +event bro_init() &priority=5 + { + Metrics::add_filter(TEST_METRIC, + [$name="foo-bar", + $break_interval=3secs, + $note=Test_Notice, + $notice_threshold=1, + $log=F]); + Metrics::add_data(TEST_METRIC, [$host=1.2.3.4], 3); + Metrics::add_data(TEST_METRIC, [$host=6.5.4.3], 2); + Metrics::add_data(TEST_METRIC, [$host=7.2.1.5], 1); + } diff --git a/testing/btest/policy/misc/bare-loaded-scripts.test b/testing/btest/policy/misc/bare-loaded-scripts.test new file mode 100644 index 0000000000..fb7074f2c2 --- /dev/null +++ b/testing/btest/policy/misc/bare-loaded-scripts.test @@ -0,0 +1,12 @@ +# This test is meant to cover whether the set of scripts that get loaded by +# default in bare mode matches a baseline of known defaults. +# +# As the output has absolute paths in it, we need to remove the common +# prefix to make the test work everywhere. That's what the sed magic +# below does. Don't ask. :-) + +# @TEST-EXEC: bro -b misc/loaded-scripts +# @TEST-EXEC: test -e loaded_scripts.log +# @TEST-EXEC: cat loaded_scripts.log | awk 'NR>1{print $2}' | sed -e ':a' -e '$!N' -e 's/^\(.*\).*\n\1.*/\1/' -e 'ta' >prefix +# @TEST-EXEC: cat loaded_scripts.log | sed "s#`cat prefix`##g" >canonified_loaded_scripts.log +# @TEST-EXEC: btest-diff canonified_loaded_scripts.log diff --git a/testing/btest/policy/misc/check-bare-test-all-policy.bro b/testing/btest/policy/misc/check-bare-test-all-policy.bro new file mode 100644 index 0000000000..a3474942e3 --- /dev/null +++ b/testing/btest/policy/misc/check-bare-test-all-policy.bro @@ -0,0 +1,7 @@ +# Makes sures test-all-policy.bro (which loads *all* other policy scripts) +# compiles correctly even in bare mode. +# +# @TEST-EXEC: bro -b %INPUT >output +# @TEST-EXEC: btest-diff output + +@load test-all-policy diff --git a/testing/btest/policy/misc/check-test-all-policy.bro b/testing/btest/policy/misc/check-test-all-policy.bro new file mode 100644 index 0000000000..9a9d120e6d --- /dev/null +++ b/testing/btest/policy/misc/check-test-all-policy.bro @@ -0,0 +1,6 @@ +# Makes sures test-all-policy.bro (which loads *all* other policy scripts) compiles correctly. +# +# @TEST-EXEC: bro %INPUT >output +# @TEST-EXEC: btest-diff output + +@load test-all-policy diff --git a/testing/btest/policy/misc/check-test-all.bro b/testing/btest/policy/misc/check-test-all.bro deleted file mode 100644 index d9f1f42b4d..0000000000 --- a/testing/btest/policy/misc/check-test-all.bro +++ /dev/null @@ -1,6 +0,0 @@ -# Makes sures test-all.bro (which loads *all* other scripts) compiles correctly. -# -# @TEST-EXEC: bro %INPUT >output -# @TEST-EXEC: btest-diff output - -@load test-all diff --git a/testing/btest/policy/misc/init-default-coverage.bro b/testing/btest/policy/misc/init-default-coverage.bro new file mode 100644 index 0000000000..c2b2bb737d --- /dev/null +++ b/testing/btest/policy/misc/init-default-coverage.bro @@ -0,0 +1,11 @@ +# Makes sure that all base/* scripts are loaded by default via init-default.bro; +# and that all scripts loaded there in there actually exist. + +@TEST-EXEC: test -d $DIST/scripts/base +@TEST-EXEC: test -e $DIST/scripts/base/init-default.bro +@TEST-EXEC: ( cd $DIST/scripts/base && find . -name '*.bro' ) | sort >"all scripts found" +@TEST-EXEC: bro misc/loaded-scripts +@TEST-EXEC: cat loaded_scripts.log | egrep -v '/build/|/loaded-scripts.bro' | awk 'NR>1{print $2}' | sed 's#/./#/#g' >loaded_scripts.log.tmp +@TEST-EXEC: cat loaded_scripts.log.tmp | sed -e ':a' -e '$!N' -e 's/^\(.*\).*\n\1.*/\1/' -e 'ta' >prefix +@TEST-EXEC: cat loaded_scripts.log.tmp | sed "s#`cat prefix`#./#g" | sort >init-default.bro +@TEST-EXEC: diff -u "all scripts found" init-default.bro 1>&2 diff --git a/testing/btest/policy/misc/test-all-default-coverage.bro b/testing/btest/policy/misc/test-all-default-coverage.bro new file mode 100644 index 0000000000..89cbcb55fe --- /dev/null +++ b/testing/btest/policy/misc/test-all-default-coverage.bro @@ -0,0 +1,8 @@ +# Makes sure that all policy/* scripts are loaded in test-all-policy.bro; and that +# all scripts loaded there actually exist. + +@TEST-EXEC: test -e $DIST/scripts/test-all-policy.bro +@TEST-EXEC: test -d $DIST/scripts +@TEST-EXEC: ( cd $DIST/scripts/policy && find . -name '*.bro' ) | sort >"all scripts found" +@TEST-EXEC: cat $DIST/scripts/test-all-policy.bro | grep '@load' | sed 'sm^\( *# *\)\{0,\}@load *m./mg' | sort >test-all-policy.bro +@TEST-EXEC: diff -u "all scripts found" test-all-policy.bro 1>&2 diff --git a/testing/btest/policy/misc/testing-coverage.bro b/testing/btest/policy/misc/testing-coverage.bro deleted file mode 100644 index c5898f9f65..0000000000 --- a/testing/btest/policy/misc/testing-coverage.bro +++ /dev/null @@ -1,6 +0,0 @@ -# Makes sure that all policy scripts are loading in testing.bro; and that all -# scripts loaded there actually exist. - -@TEST-EXEC: ( cd $DIST/policy && find . -name '*.bro' ) | sort >"all scripts found" -@TEST-EXEC: cat $DIST/policy/test-all.bro | grep '@load' | sed 'sm^\( *# *\)\{0,\}@load *m./mg' | sort >test-all.bro -@TEST-EXEC: diff -u "all scripts found" test-all.bro 1>&2 diff --git a/testing/btest/policy/protocols/smtp/basic.test b/testing/btest/policy/protocols/smtp/basic.test new file mode 100644 index 0000000000..6be512a255 --- /dev/null +++ b/testing/btest/policy/protocols/smtp/basic.test @@ -0,0 +1,4 @@ +# @TEST-EXEC: bro -r $TRACES/smtp.trace %INPUT +# @TEST-EXEC: btest-diff smtp.log + +@load base/protocols/smtp diff --git a/testing/btest/policy/protocols/smtp/mime-extract.test b/testing/btest/policy/protocols/smtp/mime-extract.test new file mode 100644 index 0000000000..09e6d3b242 --- /dev/null +++ b/testing/btest/policy/protocols/smtp/mime-extract.test @@ -0,0 +1,25 @@ +# @TEST-REQUIRES: grep -q '#define HAVE_LIBMAGIC' $BUILD/config.h +# @TEST-EXEC: bro -r $TRACES/smtp.trace %INPUT +# @TEST-EXEC: btest-diff smtp_entities.log +# @TEST-EXEC: btest-diff smtp-entity_10.10.1.4:1470-74.53.140.153:25_1.dat +# @TEST-EXEC: btest-diff smtp-entity_10.10.1.4:1470-74.53.140.153:25_2.dat +# @TEST-EXEC: bro -r $TRACES/smtp.trace %INPUT SMTP::extraction_prefix="test" +# @TEST-EXEC: test -e test_10.10.1.4:1470-74.53.140.153:25_1.dat +# @TEST-EXEC: test -e test_10.10.1.4:1470-74.53.140.153:25_2.dat + +@load base/protocols/smtp + +redef SMTP::extract_file_types=/text\/plain/; + +event bro_init() + { + Log::remove_default_filter(SMTP::SMTP_ENTITIES); + Log::add_filter(SMTP::SMTP_ENTITIES, [$name="normalized-mime-types", + $pred=function(rec: SMTP::EntityInfo): bool + { + if ( rec?$mime_type ) + rec$mime_type = "FAKE_MIME"; + return T; + } + ]); + } diff --git a/testing/btest/policy/protocols/smtp/mime.test b/testing/btest/policy/protocols/smtp/mime.test new file mode 100644 index 0000000000..37bbc6b14d --- /dev/null +++ b/testing/btest/policy/protocols/smtp/mime.test @@ -0,0 +1,23 @@ +# Checks logging of mime types and md5 calculation. Mime type in the log +# is normalized to prevent sensitivity to libmagic version. + +# @TEST-REQUIRES: grep -q '#define HAVE_LIBMAGIC' $BUILD/config.h +# @TEST-EXEC: bro -r $TRACES/smtp.trace %INPUT +# @TEST-EXEC: btest-diff smtp_entities.log + +@load base/protocols/smtp + +redef SMTP::generate_md5=/text\/plain/; + +event bro_init() + { + Log::remove_default_filter(SMTP::SMTP_ENTITIES); + Log::add_filter(SMTP::SMTP_ENTITIES, [$name="normalized-mime-types", + $pred=function(rec: SMTP::EntityInfo): bool + { + if ( rec?$mime_type ) + rec$mime_type = "FAKE_MIME"; + return T; + } + ]); + }