Merge remote-tracking branch 'origin/master' into topic/seth/notice-suppression

This commit is contained in:
Seth Hall 2011-09-21 02:26:19 -04:00
commit d18fdef2d4
548 changed files with 523 additions and 29934 deletions

3
.gitmodules vendored
View file

@ -13,3 +13,6 @@
[submodule "aux/btest"] [submodule "aux/btest"]
path = aux/btest path = aux/btest
url = git://git.bro-ids.org/btest url = git://git.bro-ids.org/btest
[submodule "cmake"]
path = cmake
url = git://git.bro-ids.org/cmake

108
CHANGES
View file

@ -1,4 +1,74 @@
1.6-dev-1261 | 2011-09-15 17:13:55 -0700
* Memory leak fixes. Addresses #574 (Jon Siwek)
* Add configure options for ruby/bindings integration. (Jon Siwek)
* Fix filter path_func to allow record argument as a subset of
stream's columns. Addresses #600. (Jon Siwek)
* Log rotation is now controlled directly through Filter records. (Jon Siwek)
* Fix indexing for record types with optional fields. Addresses #378
(Jon Siwek)
1.6-dev-1248 | 2011-09-15 16:01:32 -0700
* Removed custom malloc() implementation for FreeBSD. Closes #557.
(Jon Siwek)
* Testing/external scripts no longer compute MD5 checksums for SMTP
entities. (Robin Sommer)
* External tests no longer include the full content of mismatching
files in the diagnostics output. (Robin Sommer)
1.6-dev-1241 | 2011-09-14 22:51:52 -0400
* Fixing a major memory utilization issues with SSL analysis. (Seth
Hall)
* Enhancements to HTTP analysis: (Seth Hall)
- More options for the header-names.bro script.
- New script for logging header names and values. Closes #519.
(Seth Hall)
- HTTP body size measurement added to http.log.
- The value of the content-length headers has now been removed
in the default output but it could be added back locally at an
installation by a user.
- Added fields to indicate if some parsing interruption happened
during the body transfer. Closes #581 (Seth Hall)
* Misc smaller usability and correctness updates: (Seth Hall)
- Removed an notice definition from the base SSL scripts.
- Moved a logging stream ID into the export section for known-services
and bumped priority for creating the stream.
- Adding configuration knobs for the SQL injection attack detection
script and renaming the HTTP::SQL_Injection_Attack notice to
HTTP::SQL_Injection_Attack_Against
- Bumped priority when creating Known::CERTS_LOG.
- Fixing a warning from the cluster framework. (Seth Hall)
* Bugfix for log writer, which didn't escape binary stuff in some
situations. Closes #585. (Robin Sommer)
* A larget set of changes to the testing/external infrastructure.
The traces for external test-suites are no longer kept inside the
repositories themselves but downloaded separately via curl. This
is because git is pretty bad at dealing with large files. See the
README for more information. (Robin Sommer)
1.6-dev-1221 | 2011-09-08 08:41:17 -0700 1.6-dev-1221 | 2011-09-08 08:41:17 -0700
* Updates for documentation framework and script docs. (Jon Siwek) * Updates for documentation framework and script docs. (Jon Siwek)
@ -26,7 +96,7 @@
parse Bro log files. (Gilbert Clark) parse Bro log files. (Gilbert Clark)
* Potential fix for rotation crashes. Addresses #588. (Robin Sommer) * Potential fix for rotation crashes. Addresses #588. (Robin Sommer)
* Added PF_RING load balancing support to the scripting layer, * Added PF_RING load balancing support to the scripting layer,
enabled by loading the misc/pf-ring-load-balancing script. (Seth enabled by loading the misc/pf-ring-load-balancing script. (Seth
Hall) Hall)
@ -51,7 +121,7 @@
line option to set it explicitly. Addresses #447. (Jon Siwek) line option to set it explicitly. Addresses #447. (Jon Siwek)
* Various updates to logging framework. (Seth Hall) * Various updates to logging framework. (Seth Hall)
* Changed presentation of enum labels to include namespace. (Jon * Changed presentation of enum labels to include namespace. (Jon
Siwek) Siwek)
@ -65,7 +135,7 @@
(Gregor Maier) (Gregor Maier)
* Added a Reporter::fatal BIF. (Jon Siwek) * Added a Reporter::fatal BIF. (Jon Siwek)
* Fixes for GeoIP support. Addresses #538. (Jon Siwek) * Fixes for GeoIP support. Addresses #538. (Jon Siwek)
* Fixed excessive memory usage of SSL analyzer on connections with * Fixed excessive memory usage of SSL analyzer on connections with
@ -82,7 +152,7 @@
* Fixed bare-mode @load dependency problems. (Jon Siwek) * Fixed bare-mode @load dependency problems. (Jon Siwek)
* Fixed check_for_unused_event_handlers option. (Jon Siwek) * Fixed check_for_unused_event_handlers option. (Jon Siwek)
* Fixing some more bare-mode @load dependency issues (Jon Siwek) * Fixing some more bare-mode @load dependency issues (Jon Siwek)
* Reorganizing btest/policy directory to match new scripts/ * Reorganizing btest/policy directory to match new scripts/
@ -92,18 +162,18 @@
$prefix/share/bro/base. Addresses #545 (Jon Siwek) $prefix/share/bro/base. Addresses #545 (Jon Siwek)
* Changeed/fixed some cluster script error reporting. (Jon Siwek) * Changeed/fixed some cluster script error reporting. (Jon Siwek)
* Various script normalization. (Jon Siwek) * Various script normalization. (Jon Siwek)
* Add a test that checks each individual script can be loaded in * Add a test that checks each individual script can be loaded in
bare-mode. Adressess #545. (Jon Siwek) bare-mode. Adressess #545. (Jon Siwek)
* Tune when c$conn is set. Addresses #554. (Gregor Maier) * Tune when c$conn is set. Addresses #554. (Gregor Maier)
* Add ConnSize_Analyzer's fields to conn.log. (Gregor Maier) * Add ConnSize_Analyzer's fields to conn.log. (Gregor Maier)
* Fixing bug in "interesting hostnames" detection. (Seth Hall) * Fixing bug in "interesting hostnames" detection. (Seth Hall)
* Adding metrics framework intermediate updates. (Seth Hall) * Adding metrics framework intermediate updates. (Seth Hall)
1.6-dev-1120 | 2011-08-19 19:00:15 -0700 1.6-dev-1120 | 2011-08-19 19:00:15 -0700
@ -131,7 +201,7 @@
framework updates. (Seth Hall) framework updates. (Seth Hall)
* Metrics framework now works on cluster setups. (Seth Hall) * Metrics framework now works on cluster setups. (Seth Hall)
* Reclassifying more DNS manager errors as non-fatal errors. (Robin * Reclassifying more DNS manager errors as non-fatal errors. (Robin
Sommer) Sommer)
@ -147,9 +217,9 @@
Siwek) Siwek)
* Workaround for FreeBSD CMake port missing debug flags. (Jon Siwek) * Workaround for FreeBSD CMake port missing debug flags. (Jon Siwek)
* piped_exec() can now deal with null bytes. (Seth Hall) * piped_exec() can now deal with null bytes. (Seth Hall)
* Fix vector initialization for lists of records with optional * Fix vector initialization for lists of records with optional
types. Closes #485. (Jon Siwek) types. Closes #485. (Jon Siwek)
@ -163,19 +233,19 @@
(Jon Siwek) (Jon Siwek)
* Fixing reporter's location tracking. Closes #492. (Robin Sommer) * Fixing reporter's location tracking. Closes #492. (Robin Sommer)
* Turning DNS errors into warnings. Closes #255. (Robin Sommer) * Turning DNS errors into warnings. Closes #255. (Robin Sommer)
* Logging's path_func now receives the log record as argument. * Logging's path_func now receives the log record as argument.
Closes #555. (Robin Sommer) Closes #555. (Robin Sommer)
* Functions can now be logged; their full body gets recorded. * Functions can now be logged; their full body gets recorded.
Closes #506. (Robin Sommer) Closes #506. (Robin Sommer)
* Bugfix for hostname notice email extension. (Seth Hall) * Bugfix for hostname notice email extension. (Seth Hall)
* Updates for notice framework. (Seth Hall) * Updates for notice framework. (Seth Hall)
- New ACTION_ADD_GEODATA to add geodata to notices in an extension - New ACTION_ADD_GEODATA to add geodata to notices in an extension
field named remote_location. field named remote_location.
@ -185,7 +255,7 @@
* Updates to local.bro (Seth Hall) * Updates to local.bro (Seth Hall)
* Added the profiling script. (Seth Hall) * Added the profiling script. (Seth Hall)
* Updates for SSH scripts. (Seth Hall) * Updates for SSH scripts. (Seth Hall)
* ConnSize analyzer is turned on by default now. (Seth Hall) * ConnSize analyzer is turned on by default now. (Seth Hall)
@ -196,12 +266,12 @@
* HTTP now uses the extract_filename_from_content_disposition function. (Seth Hall) * HTTP now uses the extract_filename_from_content_disposition function. (Seth Hall)
* Major SMTP script refactor. Closes #509. (Jon Siwek and Seth Hall) * Major SMTP script refactor. Closes #509. (Jon Siwek and Seth Hall)
* New variable Site::local_nets_table in utils/site for mapping * New variable Site::local_nets_table in utils/site for mapping
address to defined local subnet. address to defined local subnet.
* Metrics framework updates, more to come. (Seth Hall) * Metrics framework updates, more to come. (Seth Hall)
1.6-dev-1061 | 2011-08-08 18:25:27 -0700 1.6-dev-1061 | 2011-08-08 18:25:27 -0700

View file

@ -1,43 +1,6 @@
project(Bro C CXX) project(Bro C CXX)
if (NOT CMAKE_C_COMPILER)
message(FATAL_ERROR "Could not find prerequisite C compiler")
endif ()
if (NOT CMAKE_CXX_COMPILER)
message(FATAL_ERROR "Could not find prerequisite C++ compiler")
endif ()
########################################################################
## CMake Configuration
cmake_minimum_required(VERSION 2.6 FATAL_ERROR) cmake_minimum_required(VERSION 2.6 FATAL_ERROR)
include(cmake/CommonCMakeConfig.cmake)
# Prohibit in-source builds.
if ("${CMAKE_SOURCE_DIR}" STREQUAL "${CMAKE_BINARY_DIR}")
message(FATAL_ERROR "In-source builds are not allowed. Please use "
"./configure to choose a build directory and "
"initialize the build configuration.")
endif ()
set(CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/cmake)
if ("${PROJECT_SOURCE_DIR}" STREQUAL "${CMAKE_SOURCE_DIR}")
# uninstall target
configure_file("${CMAKE_CURRENT_SOURCE_DIR}/cmake/cmake_uninstall.cmake.in"
"${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake"
@ONLY)
add_custom_target(uninstall COMMAND
${CMAKE_COMMAND} -P ${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake)
endif ()
# Keep RPATH upon installing so that user doesn't have to ensure the linker
# can find internal/private libraries or libraries external to the build
# directory that were explicitly linked against
if (NOT BINARY_PACKAGING_MODE)
SET(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE)
SET(CMAKE_INSTALL_RPATH "${CMAKE_INSTALL_PREFIX}/lib")
endif ()
######################################################################## ########################################################################
## Project/Build Configuration ## Project/Build Configuration
@ -69,25 +32,9 @@ list(GET version_numbers 0 VERSION_MAJOR)
list(GET version_numbers 1 VERSION_MINOR) list(GET version_numbers 1 VERSION_MINOR)
set(VERSION_MAJ_MIN "${VERSION_MAJOR}.${VERSION_MINOR}") set(VERSION_MAJ_MIN "${VERSION_MAJOR}.${VERSION_MINOR}")
set(EXTRA_COMPILE_FLAGS "-Wall -Wno-unused")
if (ENABLE_DEBUG)
set(CMAKE_BUILD_TYPE Debug)
# manual add of -g works around its omission in FreeBSD's CMake port
set(EXTRA_COMPILE_FLAGS "${EXTRA_COMPILE_FLAGS} -g -DDEBUG")
else ()
set(CMAKE_BUILD_TYPE RelWithDebInfo)
endif ()
# Compiler flags may already exist in CMake cache (e.g. when specifying
# CFLAGS environment variable before running cmake for the the first time)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${EXTRA_COMPILE_FLAGS}")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${EXTRA_COMPILE_FLAGS}")
######################################################################## ########################################################################
## Dependency Configuration ## Dependency Configuration
include(MacDependencyPaths)
include(FindRequiredPackage) include(FindRequiredPackage)
# Check cache value first to avoid displaying "Found sed" messages everytime # Check cache value first to avoid displaying "Found sed" messages everytime
@ -244,16 +191,4 @@ message(
"\n================================================================\n" "\n================================================================\n"
) )
######################################################################## include(UserChangedWarning)
## Show warning when installing user is different from the one that configured
install(CODE "
if (NOT $ENV{USER} STREQUAL \$ENV{USER})
message(STATUS \"ATTENTION: Install is being performed by user \"
\"'\$ENV{USER}', but the build directory was configured by \"
\"user '$ENV{USER}'. This may result in a permissions error \"
\"when writing the install manifest, but you can ignore it \"
\"and consider the installation as successful if you don't \"
\"care about the install manifest.\")
endif ()
")

45
INSTALL
View file

@ -59,47 +59,50 @@ installation time:
Installation Installation
============ ============
To build and install into /usr/local/bro: To build and install into ``/usr/local/bro``::
> ./configure > ./configure
> cd build
> make > make
> make install > make install
This will perform an out-of-source build into a directory called This will perform an out-of-source build into a directory called
build/, using default build options. It then installs the Bro binary ``build/``, using default build options. It then installs the Bro binary
into /usr/local/bro/bin. Depending on the Bro package you into ``/usr/local/bro/bin``. Depending on the Bro package you
downloaded, there may be auxiliary tools and libraries available in downloaded, there may be auxiliary tools and libraries available in the
the aux/ directory. If so, they will be installed by default as well ``aux/`` directory. All of them except for ``aux/bro-aux`` will also be
if not explicitly disabled via configure options and may also have built and installed by doing ``make install``. To install the programs
additional installation/configuration instructions that you can that come in the ``aux/bro-aux`` directory, additionally use ``make
find in their source directories. install-aux``. There are ``--disable`` options that can be given to the
configure script to turn off unwanted auxiliary projects.
You can specify a different installation directory with You can specify a different installation directory with::
> ./configure --prefix=<dir> > ./configure --prefix=<dir>
Note that "/usr" and "/opt/bro" are standard prefixes for binary Note that ``/usr`` and ``/opt/bro`` are standard prefixes for binary
packages to be installed, so those are typically not good choices packages to be installed, so those are typically not good choices
unless you are creating such a package. unless you are creating such a package.
Run "./configure --help" for more options. Run ``./configure --help`` for more options.
Running Bro Running Bro
=========== ===========
Bro is a complex program and it takes a bit of time to get familiar Bro is a complex program and it takes a bit of time to get familiar
with it. In the following we give a few simple examples. See the with it. A good place for newcomers to start is the quick start guide
quickstart guide at http://www.bro-ids.org for more information; you available here:
can the source that in doc/quick-start.
For developers that wish to run Bro from the the build/ directory http://www.bro-ids.org/documentation/quickstart.html
after performing "make", but without performing "make install", they
will have to first set BROPATH to look for scripts inside the build For developers that wish to run Bro from the the ``build/`` directory
directory. Sourcing either build/bro-path-dev.sh or build/bro-path-dev.csh after performing ``make``, but without performing ``make install``, they
as appropriate for the current shell accomplishes this. e.g.: will have to first set ``BROPATH`` to look for scripts inside the build
directory. Sourcing either ``build/bro-path-dev.sh`` or
``build/bro-path-dev.csh`` as appropriate for the current shell
accomplishes this and also augments your ``PATH`` so you can use Bro
without qualifying the path to it. e.g.::
> ./configure > ./configure
> make > make
> source build/bro-path-dev.sh > source build/bro-path-dev.sh
> ./build/src/bro > bro <options>

View file

@ -13,6 +13,9 @@ all: configured
install: configured install: configured
( cd $(BUILD) && make install ) ( cd $(BUILD) && make install )
install-aux: configured
( cd $(BUILD) && make install-aux )
clean: configured docclean clean: configured docclean
( cd $(BUILD) && make clean ) ( cd $(BUILD) && make clean )

38
README
View file

@ -1,31 +1,23 @@
This is release 1.6 of Bro, a system for detecting network intruders in ============================
real-time using passive network monitoring. Bro Network Security Monitor
============================
Please see the file INSTALL for installation instructions and Bro is a powerful framework for network analysis and security
pointers for getting started. For more documentation, see the monitoring.
documentation on Bro's home page:
http://www.bro-ids.org/docs Please see the INSTALL file for installation instructions and pointers
for getting started. For more documentation, research publications, or
community contact information see Bro's home page:
The main parts of Bro's documentation are also available in the doc/ http://www.bro-ids.org
directory of the distribution. (Please note that the documentation
is still a work in progress; there will be more in future releases.)
Numerous other Bro-related publications, including a paper describing the Please see COPYING for licensing information.
system, can be found at
http://www.bro-ids.org/publications.html On behalf of the Bro Development Team,
Send comments, etc., to the Bro mailing list, bro@bro-ids.org.
However, please note that you must first subscribe to the list in
order to be able to post to it.
- Vern Paxson & Robin Sommer, on behalf of the Bro development team
Vern Paxson & Robin Sommer,
International Computer Science Institute &
Lawrence Berkeley National Laboratory Lawrence Berkeley National Laboratory
University of California, Berkeley USA
ICSI Center for Internet Research (ICIR)
International Computer Science Institute
Berkeley, CA USA
vern@icir.org / robin@icir.org vern@icir.org / robin@icir.org

View file

@ -1 +1 @@
1.6-dev-1221 1.6-dev-1261

@ -1 +1 @@
Subproject commit 4fc13f7c6987b4163609e3df7a31f38501411cb7 Subproject commit 796b6e7e1492021984bbc297b3fcc2952f20c778

@ -1 +1 @@
Subproject commit 86990f1640d986e39d5bb1287dbeb03b59a464f0 Subproject commit 2a5a709b5c85e74a88d88ef385ee0fb44ca02cd6

@ -1 +1 @@
Subproject commit 6df97331bb74d02ef2252138b301e4ca14523962 Subproject commit adae807493dbb24ec8baf759e4589258ae57cc3e

@ -1 +1 @@
Subproject commit c7499ee54f50bca65606dc3edc1aff132d93af80 Subproject commit 0731a7b88145c73efc6221fa4f7ce30c4036a35b

@ -1 +1 @@
Subproject commit ab78a66dd782f165ddf921faaf1f065b2f987481 Subproject commit 3c0b0e9a91060a7a453a5d6fb72ed1fd9071fda9

1
cmake Submodule

@ -0,0 +1 @@
Subproject commit 47fa45b726793aa15491d240cd5a84f4a4f7bd0a

View file

@ -1,87 +0,0 @@
# Calling this macro with the name of a list variable will modify that
# list such that any third party libraries that do not come with a
# vanilla Mac OS X system will be replaced by an adjusted library that
# has an install_name relative to the location of any executable that
# links to it.
#
# Also, it will schedule the modified libraries for installation in a
# 'support_libs' subdirectory of the CMAKE_INSTALL_PREFIX.
#
# The case of third party libraries depending on other third party
# libraries is currently not handled by this macro.
#
# Ex.
#
# set(libs /usr/lib/libz.dylib
# /usr/lib/libssl.dylib
# /usr/local/lib/libmagic.dylib
# /usr/local/lib/libGeoIP.dylib
# /usr/local/lib/somestaticlib.a)
#
# include(ChangeMacInstallNames)
# ChangeMacInstallNames(libs)
#
# Should result in ${libs} containing:
# /usr/lib/libz.dylib
# /usr/lib/libssl.dylib
# ${CMAKE_BINARY_DIR}/darwin_support_libs/libmagic.dylib
# ${CMAKE_BINARY_DIR}/darwin_support_libs/libGeoIP.dylib
# /usr/local/lib/somestaticlib.a
#
# such that we can now do:
#
# add_executable(some_exe ${srcs})
# target_link_libraries(some_exe ${libs})
#
# Any binary packages created from such a build should be self-contained
# and provide working installs on vanilla OS X systems.
macro(ChangeMacInstallNames libListVar)
if (APPLE)
find_program(INSTALL_NAME_TOOL install_name_tool)
set(MAC_INSTALL_NAME_DEPS)
set(SUPPORT_BIN_DIR ${CMAKE_BINARY_DIR}/darwin_support_libs)
set(SUPPORT_INSTALL_DIR support_libs)
file(MAKE_DIRECTORY ${SUPPORT_BIN_DIR})
foreach (_lib ${${libListVar}})
# only care about install_name for shared libraries that are
# not shipped in Apple's vanilla OS X installs
string(REGEX MATCH ^/usr/lib/* apple_provided_lib ${_lib})
string(REGEX MATCH dylib$ is_shared_lib ${_lib})
if (NOT apple_provided_lib AND is_shared_lib)
get_filename_component(_libname ${_lib} NAME)
set(_adjustedLib ${SUPPORT_BIN_DIR}/${_libname})
set(_tmpLib
${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/${_libname})
# make a tempory copy so we can adjust permissions
configure_file(${_lib} ${_tmpLib} COPYONLY)
# copy to build directory with correct write permissions
file(COPY ${_tmpLib}
DESTINATION ${SUPPORT_BIN_DIR}
FILE_PERMISSIONS OWNER_READ OWNER_WRITE
GROUP_READ WORLD_READ)
# remove the old library from the list provided as macro
# argument and add the new library with modified install_name
list(REMOVE_ITEM ${libListVar} ${_lib})
list(APPEND ${libListVar} ${_adjustedLib})
# update the install target to install the third party libs
# with modified install_name
install(FILES ${_adjustedLib}
DESTINATION ${SUPPORT_INSTALL_DIR})
# perform the install_name change
execute_process(COMMAND install_name_tool -id
@executable_path/../${SUPPORT_INSTALL_DIR}/${_libname}
${_adjustedLib})
endif ()
endforeach ()
endif ()
endmacro()

View file

@ -1,12 +0,0 @@
include(CheckFunctionExists)
check_function_exists(getopt_long HAVE_GETOPT_LONG)
check_function_exists(mallinfo HAVE_MALLINFO)
check_function_exists(strcasestr HAVE_STRCASESTR)
check_function_exists(strerror HAVE_STRERROR)
check_function_exists(strsep HAVE_STRSEP)
check_function_exists(sigset HAVE_SIGSET)
if (NOT HAVE_SIGSET)
check_function_exists(sigaction HAVE_SIGACTION)
endif ()

View file

@ -1,28 +0,0 @@
include(CheckIncludeFiles)
include(CheckStructHasMember)
check_include_files(getopt.h HAVE_GETOPT_H)
check_include_files(magic.h HAVE_MAGIC_H)
check_include_files(memory.h HAVE_MEMORY_H)
check_include_files("sys/socket.h;netinet/in.h;net/if.h;netinet/if_ether.h"
HAVE_NETINET_IF_ETHER_H)
check_include_files("sys/socket.h;netinet/in.h;net/if.h;netinet/ip6.h"
HAVE_NETINET_IP6_H)
check_include_files("sys/socket.h;net/if.h;net/ethernet.h" HAVE_NET_ETHERNET_H)
check_include_files(sys/ethernet.h HAVE_SYS_ETHERNET_H)
check_include_files(sys/time.h HAVE_SYS_TIME_H)
check_include_files("time.h;sys/time.h" TIME_WITH_SYS_TIME)
check_include_files(os-proto.h HAVE_OS_PROTO_H)
check_struct_has_member(HISTORY_STATE entries "stdio.h;readline/readline.h"
HAVE_READLINE_HISTORY_ENTRIES)
check_include_files("stdio.h;readline/readline.h" HAVE_READLINE_READLINE_H)
check_include_files("stdio.h;readline/history.h" HAVE_READLINE_HISTORY_H)
if (HAVE_READLINE_READLINE_H AND
HAVE_READLINE_HISTORY_H AND
HAVE_READLINE_HISTORY_ENTRIES)
set(HAVE_READLINE true)
endif ()
check_struct_has_member("struct sockaddr_in" sin_len "netinet/in.h" SIN_LEN)

View file

@ -1,21 +0,0 @@
include(CheckCSourceCompiles)
# Check whether the namser compatibility header is required
# This can be the case on the Darwin platform
check_c_source_compiles("
#include <arpa/nameser.h>
int main() { HEADER *hdr; int d = NS_IN6ADDRSZ; return 0; }"
have_nameser_header)
if (NOT have_nameser_header)
check_c_source_compiles("
#include <arpa/nameser.h>
#include <arpa/nameser_compat.h>
int main() { HEADER *hdr; int d = NS_IN6ADDRSZ; return 0; }"
NEED_NAMESER_COMPAT_H)
if (NOT NEED_NAMESER_COMPAT_H)
message(FATAL_ERROR
"Asynchronous DNS support compatibility check failed.")
endif ()
endif ()

View file

@ -1,21 +0,0 @@
# A macro that checks whether optional sources exist and if they do, they
# are added to the build/install process, else a warning is issued
#
# _dir: the subdir of the current source dir in which the optional
# sources are located
# _packageName: a string that identifies the package
# _varName: name of the variable indicating whether package is scheduled
# to be installed
macro(CheckOptionalBuildSources _dir _packageName _varName)
if (${_varName})
if (EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/${_dir}/CMakeLists.txt)
add_subdirectory(${_dir})
else ()
message(WARNING "${_packageName} source code does not exist in "
"${CMAKE_CURRENT_SOURCE_DIR}/${_dir} "
"so it will not be built or installed")
set(${_varName} false)
endif ()
endif ()
endmacro(CheckOptionalBuildSources)

View file

@ -1,31 +0,0 @@
include(CheckTypeSize)
check_type_size("long int" SIZEOF_LONG_INT)
check_type_size("long long" SIZEOF_LONG_LONG)
check_type_size("void *" SIZEOF_VOID_P)
# checks existence of ${_type}, and if it does not, sets CMake variable ${_var}
# to alternative type, ${_alt_type}
macro(CheckType _type _alt_type _var)
# don't perform check if we have a result from a previous CMake run
if (NOT HAVE_${_var})
check_type_size(${_type} ${_var})
if (NOT ${_var})
set(${_var} ${_alt_type})
else ()
unset(${_var})
unset(${_var} CACHE)
endif ()
endif ()
endmacro(CheckType _type _alt_type _var)
set(CMAKE_EXTRA_INCLUDE_FILES sys/types.h)
CheckType(int32_t int int32_t)
CheckType(u_int32_t u_int u_int32_t)
CheckType(u_int16_t u_short u_int16_t)
CheckType(u_int8_t u_char u_int8_t)
set(CMAKE_EXTRA_INCLUDE_FILES)
set(CMAKE_EXTRA_INCLUDE_FILES sys/socket.h)
CheckType(socklen_t int socklen_t)
set(CMAKE_EXTRA_INCLUDE_FILES)

View file

@ -1,238 +0,0 @@
# A collection of macros to assist in configuring CMake/Cpack
# source and binary packaging
# Sets CPack version variables by splitting the first macro argument
# using "." as a delimiter. If the length of the split list is
# greater than 2, all remaining elements are tacked on to the patch
# level version. Not that the version set by the macro is internal
# to binary packaging, the file name of our package will reflect the
# exact version number.
macro(SetPackageVersion _version)
string(REPLACE "." " " version_numbers ${_version})
separate_arguments(version_numbers)
list(GET version_numbers 0 CPACK_PACKAGE_VERSION_MAJOR)
list(REMOVE_AT version_numbers 0)
list(GET version_numbers 0 CPACK_PACKAGE_VERSION_MINOR)
list(REMOVE_AT version_numbers 0)
list(LENGTH version_numbers version_length)
while (version_length GREATER 0)
list(GET version_numbers 0 patch_level)
if (CPACK_PACKAGE_VERSION_PATCH)
set(CPACK_PACKAGE_VERSION_PATCH
"${CPACK_PACKAGE_VERSION_PATCH}.${patch_level}")
else ()
set(CPACK_PACKAGE_VERSION_PATCH ${patch_level})
endif ()
list(REMOVE_AT version_numbers 0)
list(LENGTH version_numbers version_length)
endwhile ()
if (APPLE)
# Mac PackageMaker package requires only numbers in the versioning
string(REGEX REPLACE "[_a-zA-Z-]" "" CPACK_PACKAGE_VERSION_MAJOR
${CPACK_PACKAGE_VERSION_MAJOR})
string(REGEX REPLACE "[_a-zA-Z-]" "" CPACK_PACKAGE_VERSION_MINOR
${CPACK_PACKAGE_VERSION_MINOR})
if (CPACK_PACKAGE_VERSION_PATCH)
string(REGEX REPLACE "[_a-zA-Z-]" "" CPACK_PACKAGE_VERSION_PATCH
${CPACK_PACKAGE_VERSION_PATCH})
endif ()
endif ()
if (${CMAKE_SYSTEM_NAME} MATCHES "Linux")
# RPM version accepts letters, but not dashes.
string(REGEX REPLACE "[-]" "" CPACK_PACKAGE_VERSION_MAJOR
${CPACK_PACKAGE_VERSION_MAJOR})
string(REGEX REPLACE "[-]" "" CPACK_PACKAGE_VERSION_MINOR
${CPACK_PACKAGE_VERSION_MINOR})
if (CPACK_PACKAGE_VERSION_PATCH)
string(REGEX REPLACE "[-]" "" CPACK_PACKAGE_VERSION_PATCH
${CPACK_PACKAGE_VERSION_PATCH})
endif ()
endif ()
# Minimum supported OS X version
set(CPACK_OSX_PACKAGE_VERSION 10.5)
endmacro(SetPackageVersion)
# Sets the list of desired package types to be created by the make
# package target. A .tar.gz is only made for source packages, and
# binary pacakage format depends on the operating system:
#
# Darwin - PackageMaker
# Linux - RPM if the platform has rpmbuild installed
# DEB if the platform has dpkg-shlibdeps installed
#
# CPACK_GENERATOR is set by this macro
# CPACK_SOURCE_GENERATOR is set by this macro
macro(SetPackageGenerators)
set(CPACK_SOURCE_GENERATOR TGZ)
#set(CPACK_GENERATOR TGZ)
if (APPLE)
list(APPEND CPACK_GENERATOR PackageMaker)
elseif (${CMAKE_SYSTEM_NAME} MATCHES "Linux")
find_program(RPMBUILD_EXE rpmbuild)
find_program(DPKGSHLIB_EXE dpkg-shlibdeps)
if (RPMBUILD_EXE)
set(CPACK_GENERATOR ${CPACK_GENERATOR} RPM)
endif ()
if (DPKGSHLIB_EXE)
set(CPACK_GENERATOR ${CPACK_GENERATOR} DEB)
set(CPACK_DEBIAN_PACKAGE_SHLIBDEPS true)
endif ()
endif ()
endmacro(SetPackageGenerators)
# Sets CPACK_PACKAGE_FILE_NAME in the following format:
#
# <project_name>-<version>-<OS/platform>-<arch>
#
# and CPACK_SOURCE_PACKAGE_FILE_NAME as:
#
# <project_name>-<version>
macro(SetPackageFileName _version)
if (PACKAGE_NAME_PREFIX)
set(CPACK_PACKAGE_FILE_NAME "${PACKAGE_NAME_PREFIX}-${_version}")
set(CPACK_SOURCE_PACKAGE_FILE_NAME "${PACKAGE_NAME_PREFIX}-${_version}")
else ()
set(CPACK_PACKAGE_FILE_NAME "${CMAKE_PROJECT_NAME}-${_version}")
set(CPACK_SOURCE_PACKAGE_FILE_NAME "${CMAKE_PROJECT_NAME}-${_version}")
endif ()
set(CPACK_PACKAGE_FILE_NAME
"${CPACK_PACKAGE_FILE_NAME}-${CMAKE_SYSTEM_NAME}")
if (APPLE)
# Only Intel-based Macs are supported. CMAKE_SYSTEM_PROCESSOR may
# return the confusing 'i386' if running a 32-bit kernel, but chances
# are the binary is x86_64 (or more generally 'Intel') compatible.
set(arch "Intel")
else ()
set (arch ${CMAKE_SYSTEM_PROCESSOR})
endif ()
set(CPACK_PACKAGE_FILE_NAME "${CPACK_PACKAGE_FILE_NAME}-${arch}")
endmacro(SetPackageFileName)
# Sets up binary package metadata
macro(SetPackageMetadata)
set(CPACK_PACKAGE_VENDOR "Lawrence Berkeley National Laboratory")
set(CPACK_PACKAGE_CONTACT "info@bro-ids.org")
set(CPACK_PACKAGE_DESCRIPTION_SUMMARY
"The Bro Network Intrusion Detection System")
# CPack may enforce file name extensions for certain package generators
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/README
${CMAKE_CURRENT_BINARY_DIR}/README.txt
COPYONLY)
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/COPYING
${CMAKE_CURRENT_BINARY_DIR}/COPYING.txt
COPYONLY)
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/cmake/MAC_PACKAGE_INTRO
${CMAKE_CURRENT_BINARY_DIR}/MAC_PACKAGE_INTRO.txt)
set(CPACK_PACKAGE_DESCRIPTION_FILE ${CMAKE_CURRENT_BINARY_DIR}/README.txt)
set(CPACK_RESOURCE_FILE_LICENSE ${CMAKE_CURRENT_BINARY_DIR}/COPYING.txt)
set(CPACK_RESOURCE_FILE_README ${CMAKE_CURRENT_BINARY_DIR}/README.txt)
set(CPACK_RESOURCE_FILE_WELCOME
${CMAKE_CURRENT_BINARY_DIR}/MAC_PACKAGE_INTRO.txt)
set(CPACK_RPM_PACKAGE_LICENSE "BSD")
endmacro(SetPackageMetadata)
# Sets pre and post install scripts for PackageMaker packages.
# The main functionality that such scripts offer is a way to make backups
# of "configuration" files that a user may have modified.
# Note that RPMs already have a robust mechanism for dealing with
# user-modified files, so we do not need this additional functionality
macro(SetPackageInstallScripts VERSION)
if (INSTALLED_CONFIG_FILES)
# Remove duplicates from the list of installed config files
separate_arguments(INSTALLED_CONFIG_FILES)
list(REMOVE_DUPLICATES INSTALLED_CONFIG_FILES)
# Space delimit the list again
foreach (_file ${INSTALLED_CONFIG_FILES})
set(_tmp "${_tmp} ${_file}")
endforeach ()
set(INSTALLED_CONFIG_FILES "${_tmp}" CACHE STRING "" FORCE)
endif ()
if (${CMAKE_SYSTEM_NAME} MATCHES "Linux")
# DEB packages can automatically handle configuration files
# if provided in a "conffiles" file in the packaging
set(conffiles_file ${CMAKE_CURRENT_BINARY_DIR}/conffiles)
if (INSTALLED_CONFIG_FILES)
string(REPLACE " " ";" conffiles ${INSTALLED_CONFIG_FILES})
endif ()
file(WRITE ${conffiles_file} "")
foreach (_file ${conffiles})
file(APPEND ${conffiles_file} "${_file}\n")
endforeach ()
list(APPEND CPACK_DEBIAN_PACKAGE_CONTROL_EXTRA
${CMAKE_CURRENT_BINARY_DIR}/conffiles)
# RPMs don't need any explicit direction regarding config files.
# Leaving the set of installed config files empty will just
# bypass the logic in the default pre/post install scripts and let
# the RPMs/DEBs do their own thing (regarding backups, etc.)
# when upgrading packages.
set(INSTALLED_CONFIG_FILES "")
endif ()
if (EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/cmake/package_preinstall.sh.in)
configure_file(
${CMAKE_CURRENT_SOURCE_DIR}/cmake/package_preinstall.sh.in
${CMAKE_CURRENT_BINARY_DIR}/package_preinstall.sh
@ONLY)
configure_file(
${CMAKE_CURRENT_SOURCE_DIR}/cmake/package_preinstall.sh.in
${CMAKE_CURRENT_BINARY_DIR}/preinst
@ONLY)
set(CPACK_PREFLIGHT_SCRIPT
${CMAKE_CURRENT_BINARY_DIR}/package_preinstall.sh)
set(CPACK_RPM_PRE_INSTALL_SCRIPT_FILE
${CMAKE_CURRENT_BINARY_DIR}/package_preinstall.sh)
list(APPEND CPACK_DEBIAN_PACKAGE_CONTROL_EXTRA
${CMAKE_CURRENT_BINARY_DIR}/preinst)
endif ()
if (EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/cmake/package_postupgrade.sh.in)
configure_file(
${CMAKE_CURRENT_SOURCE_DIR}/cmake/package_postupgrade.sh.in
${CMAKE_CURRENT_BINARY_DIR}/package_postupgrade.sh
@ONLY)
configure_file(
${CMAKE_CURRENT_SOURCE_DIR}/cmake/package_postupgrade.sh.in
${CMAKE_CURRENT_BINARY_DIR}/postinst
@ONLY)
set(CPACK_POSTUPGRADE_SCRIPT
${CMAKE_CURRENT_BINARY_DIR}/package_postupgrade.sh)
set(CPACK_RPM_POST_INSTALL_SCRIPT_FILE
${CMAKE_CURRENT_BINARY_DIR}/package_postupgrade.sh)
list(APPEND CPACK_DEBIAN_PACKAGE_CONTROL_EXTRA
${CMAKE_CURRENT_BINARY_DIR}/postinst)
endif ()
endmacro(SetPackageInstallScripts)
# Main macro to configure all the packaging options
macro(ConfigurePackaging _version)
SetPackageVersion(${_version})
SetPackageGenerators()
SetPackageFileName(${_version})
SetPackageMetadata()
SetPackageInstallScripts(${_version})
set(CPACK_SET_DESTDIR true)
set(CPACK_PACKAGING_INSTALL_PREFIX ${CMAKE_INSTALL_PREFIX})
# add default files/directories to ignore for source package
# user may specify others via configure script
list(APPEND CPACK_SOURCE_IGNORE_FILES ${CMAKE_BINARY_DIR} ".git")
include(CPack)
endmacro(ConfigurePackaging)

View file

@ -1,101 +0,0 @@
# - Try to find libpcap include dirs and libraries
#
# Usage of this module as follows:
#
# find_package(BIND)
#
# Variables used by this module, they can change the default behaviour and need
# to be set before calling find_package:
#
# BIND_ROOT_DIR Set this variable to the root installation of BIND
# if the module has problems finding the proper
# installation path.
#
# Variables defined by this module:
#
# BIND_FOUND System has BIND, include and library dirs found
# BIND_INCLUDE_DIR The BIND include directories.
# BIND_LIBRARY The BIND library (if any) required for
# ns_inittab and res_mkquery symbols
find_path(BIND_ROOT_DIR
NAMES include/resolv.h
)
find_path(BIND_INCLUDE_DIR
NAMES resolv.h
HINTS ${BIND_ROOT_DIR}/include
)
if (${CMAKE_SYSTEM_NAME} MATCHES "Linux")
# the static resolv library is preferred because
# on some systems, the ns_initparse symbol is not
# exported in the shared library (strangely)
# see http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=291609
set(bind_libs none libresolv.a resolv bind)
else ()
set(bind_libs none resolv bind)
endif ()
include(CheckCSourceCompiles)
# Find which library has the res_mkquery and ns_initparse symbols
set(CMAKE_REQUIRED_INCLUDES ${BIND_INCLUDE_DIR})
foreach (bindlib ${bind_libs})
if (NOT ${bindlib} MATCHES "none")
find_library(BIND_LIBRARY
NAMES ${bindlib}
HINTS ${BIND_ROOT_DIR}/lib
)
endif ()
set(CMAKE_REQUIRED_LIBRARIES ${BIND_LIBRARY})
check_c_source_compiles("
#include <arpa/nameser.h>
int main() {
ns_initparse(0, 0, 0);
return 0;
}
" ns_initparse_works_${bindlib})
check_c_source_compiles("
#include <sys/types.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/nameser.h>
#include <resolv.h>
int main() {
int (*p)() = res_mkquery;
}
" res_mkquery_works_${bindlib})
set(CMAKE_REQUIRED_LIBRARIES)
if (ns_initparse_works_${bindlib} AND res_mkquery_works_${bindlib})
break ()
else ()
set(BIND_LIBRARY BIND_LIBRARY-NOTFOUND)
endif ()
endforeach ()
set(CMAKE_REQUIRED_INCLUDES)
include(FindPackageHandleStandardArgs)
if (ns_initparse_works_none AND res_mkquery_works_none)
# system does not require linking to a BIND library
find_package_handle_standard_args(BIND DEFAULT_MSG
BIND_INCLUDE_DIR
)
else ()
find_package_handle_standard_args(BIND DEFAULT_MSG
BIND_LIBRARY
BIND_INCLUDE_DIR
)
endif ()
mark_as_advanced(
BIND_ROOT_DIR
BIND_LIBRARY
BIND_INCLUDE_DIR
)

View file

@ -1,221 +0,0 @@
# - Find bison executable and provides macros to generate custom build rules
# The module defines the following variables:
#
# BISON_EXECUTABLE - path to the bison program
# BISON_VERSION - version of bison
# BISON_FOUND - true if the program was found
#
# If bison is found, the module defines the macros:
# BISON_TARGET(<Name> <YaccInput> <CodeOutput> [VERBOSE <file>]
# [COMPILE_FLAGS <string>] [HEADER <FILE>])
# which will create a custom rule to generate a parser. <YaccInput> is
# the path to a yacc file. <CodeOutput> is the name of the source file
# generated by bison. A header file containing the token list is also
# generated according to bison's -d option by default or if the HEADER
# option is used, the argument is passed to bison's --defines option to
# specify output file. If COMPILE_FLAGS option is specified, the next
# parameter is added in the bison command line. if VERBOSE option is
# specified, <file> is created and contains verbose descriptions of the
# grammar and parser. The macro defines a set of variables:
# BISON_${Name}_DEFINED - true is the macro ran successfully
# BISON_${Name}_INPUT - The input source file, an alias for <YaccInput>
# BISON_${Name}_OUTPUT_SOURCE - The source file generated by bison
# BISON_${Name}_OUTPUT_HEADER - The header file generated by bison
# BISON_${Name}_OUTPUTS - The sources files generated by bison
# BISON_${Name}_COMPILE_FLAGS - Options used in the bison command line
#
# ====================================================================
# Example:
#
# find_package(BISON)
# BISON_TARGET(MyParser parser.y ${CMAKE_CURRENT_BINARY_DIR}/parser.cpp)
# add_executable(Foo main.cpp ${BISON_MyParser_OUTPUTS})
# ====================================================================
#=============================================================================
# Copyright 2009 Kitware, Inc.
# Copyright 2006 Tristan Carel
# Modified 2010 by Jon Siwek, adding HEADER option
#
# Distributed under the OSI-approved BSD License (the "License"):
# CMake - Cross Platform Makefile Generator
# Copyright 2000-2009 Kitware, Inc., Insight Software Consortium
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the names of Kitware, Inc., the Insight Software Consortium,
# nor the names of their contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# This software is distributed WITHOUT ANY WARRANTY; without even the
# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the License for more information.
#=============================================================================
FIND_PROGRAM(BISON_EXECUTABLE bison DOC "path to the bison executable")
MARK_AS_ADVANCED(BISON_EXECUTABLE)
IF(BISON_EXECUTABLE)
EXECUTE_PROCESS(COMMAND ${BISON_EXECUTABLE} --version
OUTPUT_VARIABLE BISON_version_output
ERROR_VARIABLE BISON_version_error
RESULT_VARIABLE BISON_version_result
OUTPUT_STRIP_TRAILING_WHITESPACE)
IF(NOT ${BISON_version_result} EQUAL 0)
MESSAGE(SEND_ERROR "Command \"${BISON_EXECUTABLE} --version\" failed with output:\n${BISON_version_error}")
ELSE()
STRING(REGEX REPLACE "^bison \\(GNU Bison\\) ([^\n]+)\n.*" "\\1"
BISON_VERSION "${BISON_version_output}")
ENDIF()
# internal macro
MACRO(BISON_TARGET_option_verbose Name BisonOutput filename)
LIST(APPEND BISON_TARGET_cmdopt "--verbose")
GET_FILENAME_COMPONENT(BISON_TARGET_output_path "${BisonOutput}" PATH)
GET_FILENAME_COMPONENT(BISON_TARGET_output_name "${BisonOutput}" NAME_WE)
ADD_CUSTOM_COMMAND(OUTPUT ${filename}
COMMAND ${CMAKE_COMMAND}
ARGS -E copy
"${BISON_TARGET_output_path}/${BISON_TARGET_output_name}.output"
"${filename}"
DEPENDS
"${BISON_TARGET_output_path}/${BISON_TARGET_output_name}.output"
COMMENT "[BISON][${Name}] Copying bison verbose table to ${filename}"
WORKING_DIRECTORY ${CMAKE_SOURCE_DIR})
SET(BISON_${Name}_VERBOSE_FILE ${filename})
LIST(APPEND BISON_TARGET_extraoutputs
"${BISON_TARGET_output_path}/${BISON_TARGET_output_name}.output")
ENDMACRO(BISON_TARGET_option_verbose)
# internal macro
MACRO(BISON_TARGET_option_extraopts Options)
SET(BISON_TARGET_extraopts "${Options}")
SEPARATE_ARGUMENTS(BISON_TARGET_extraopts)
LIST(APPEND BISON_TARGET_cmdopt ${BISON_TARGET_extraopts})
ENDMACRO(BISON_TARGET_option_extraopts)
#============================================================
# BISON_TARGET (public macro)
#============================================================
#
MACRO(BISON_TARGET Name BisonInput BisonOutput)
SET(BISON_TARGET_output_header "")
#SET(BISON_TARGET_command_opt "")
SET(BISON_TARGET_cmdopt "")
SET(BISON_TARGET_outputs "${BisonOutput}")
IF(NOT ${ARGC} EQUAL 3 AND
NOT ${ARGC} EQUAL 5 AND
NOT ${ARGC} EQUAL 7 AND
NOT ${ARGC} EQUAL 9)
MESSAGE(SEND_ERROR "Usage")
ELSE()
# Parsing parameters
IF(${ARGC} GREATER 5 OR ${ARGC} EQUAL 5)
IF("${ARGV3}" STREQUAL "VERBOSE")
BISON_TARGET_option_verbose(${Name} ${BisonOutput} "${ARGV4}")
ENDIF()
IF("${ARGV3}" STREQUAL "COMPILE_FLAGS")
BISON_TARGET_option_extraopts("${ARGV4}")
ENDIF()
IF("${ARGV3}" STREQUAL "HEADER")
set(BISON_TARGET_output_header "${ARGV4}")
ENDIF()
ENDIF()
IF(${ARGC} GREATER 7 OR ${ARGC} EQUAL 7)
IF("${ARGV5}" STREQUAL "VERBOSE")
BISON_TARGET_option_verbose(${Name} ${BisonOutput} "${ARGV6}")
ENDIF()
IF("${ARGV5}" STREQUAL "COMPILE_FLAGS")
BISON_TARGET_option_extraopts("${ARGV6}")
ENDIF()
IF("${ARGV5}" STREQUAL "HEADER")
set(BISON_TARGET_output_header "${ARGV6}")
ENDIF()
ENDIF()
IF(${ARGC} EQUAL 9)
IF("${ARGV7}" STREQUAL "VERBOSE")
BISON_TARGET_option_verbose(${Name} ${BisonOutput} "${ARGV8}")
ENDIF()
IF("${ARGV7}" STREQUAL "COMPILE_FLAGS")
BISON_TARGET_option_extraopts("${ARGV8}")
ENDIF()
IF("${ARGV7}" STREQUAL "HEADER")
set(BISON_TARGET_output_header "${ARGV8}")
ENDIF()
ENDIF()
IF(BISON_TARGET_output_header)
# Header's name passed in as argument to be used in --defines option
LIST(APPEND BISON_TARGET_cmdopt
"--defines=${BISON_TARGET_output_header}")
set(BISON_${Name}_OUTPUT_HEADER ${BISON_TARGET_output_header})
ELSE()
# Header's name generated by bison (see option -d)
LIST(APPEND BISON_TARGET_cmdopt "-d")
STRING(REGEX REPLACE "^(.*)(\\.[^.]*)$" "\\2" _fileext "${ARGV2}")
STRING(REPLACE "c" "h" _fileext ${_fileext})
STRING(REGEX REPLACE "^(.*)(\\.[^.]*)$" "\\1${_fileext}"
BISON_${Name}_OUTPUT_HEADER "${ARGV2}")
ENDIF()
LIST(APPEND BISON_TARGET_outputs "${BISON_${Name}_OUTPUT_HEADER}")
ADD_CUSTOM_COMMAND(OUTPUT ${BISON_TARGET_outputs}
${BISON_TARGET_extraoutputs}
COMMAND ${BISON_EXECUTABLE}
ARGS ${BISON_TARGET_cmdopt} -o ${ARGV2} ${ARGV1}
DEPENDS ${ARGV1}
COMMENT "[BISON][${Name}] Building parser with bison ${BISON_VERSION}"
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR})
# define target variables
SET(BISON_${Name}_DEFINED TRUE)
SET(BISON_${Name}_INPUT ${ARGV1})
SET(BISON_${Name}_OUTPUTS ${BISON_TARGET_outputs})
SET(BISON_${Name}_COMPILE_FLAGS ${BISON_TARGET_cmdopt})
SET(BISON_${Name}_OUTPUT_SOURCE "${BisonOutput}")
ENDIF(NOT ${ARGC} EQUAL 3 AND
NOT ${ARGC} EQUAL 5 AND
NOT ${ARGC} EQUAL 7 AND
NOT ${ARGC} EQUAL 9)
ENDMACRO(BISON_TARGET)
#
#============================================================
ENDIF(BISON_EXECUTABLE)
INCLUDE(FindPackageHandleStandardArgs)
FIND_PACKAGE_HANDLE_STANDARD_ARGS(BISON DEFAULT_MSG BISON_EXECUTABLE)
# FindBISON.cmake ends here

View file

@ -1,53 +0,0 @@
# - Try to find BinPAC binary and library
#
# Usage of this module as follows:
#
# find_package(BinPAC)
#
# Variables used by this module, they can change the default behaviour and need
# to be set before calling find_package:
#
# BinPAC_ROOT_DIR Set this variable to the root installation of
# BinPAC if the module has problems finding the
# proper installation path.
#
# Variables defined by this module:
#
# BINPAC_FOUND System has BinPAC binary and library
# BinPAC_EXE The binpac executable
# BinPAC_LIBRARY The libbinpac.a library
# BinPAC_INCLUDE_DIR The binpac headers
# look for BinPAC in standard locations or user-provided root
find_path(BinPAC_ROOT_DIR
NAMES include/binpac.h
)
find_file(BinPAC_EXE
NAMES binpac
HINTS ${BinPAC_ROOT_DIR}/bin
)
find_library(BinPAC_LIBRARY
NAMES libbinpac.a
HINTS ${BinPAC_ROOT_DIR}/lib
)
find_path(BinPAC_INCLUDE_DIR
NAMES binpac.h
HINTS ${BinPAC_ROOT_DIR}/include
)
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(BinPAC DEFAULT_MSG
BinPAC_EXE
BinPAC_LIBRARY
BinPAC_INCLUDE_DIR
)
mark_as_advanced(
BinPAC_ROOT_DIR
BinPAC_EXE
BinPAC_LIBRARY
BinPAC_INCLUDE_DIR
)

View file

@ -1,179 +0,0 @@
# - Find flex executable and provides a macro to generate custom build rules
#
# The module defines the following variables:
# FLEX_FOUND - true is flex executable is found
# FLEX_EXECUTABLE - the path to the flex executable
# FLEX_VERSION - the version of flex
# FLEX_LIBRARIES - The flex libraries
#
# The minimum required version of flex can be specified using the
# standard syntax, e.g. FIND_PACKAGE(FLEX 2.5.13)
#
#
# If flex is found on the system, the module provides the macro:
# FLEX_TARGET(Name FlexInput FlexOutput [COMPILE_FLAGS <string>])
# which creates a custom command to generate the <FlexOutput> file from
# the <FlexInput> file. If COMPILE_FLAGS option is specified, the next
# parameter is added to the flex command line. Name is an alias used to
# get details of this custom command. Indeed the macro defines the
# following variables:
# FLEX_${Name}_DEFINED - true is the macro ran successfully
# FLEX_${Name}_OUTPUTS - the source file generated by the custom rule, an
# alias for FlexOutput
# FLEX_${Name}_INPUT - the flex source file, an alias for ${FlexInput}
#
# Flex scanners oftenly use tokens defined by Bison: the code generated
# by Flex depends of the header generated by Bison. This module also
# defines a macro:
# ADD_FLEX_BISON_DEPENDENCY(FlexTarget BisonTarget)
# which adds the required dependency between a scanner and a parser
# where <FlexTarget> and <BisonTarget> are the first parameters of
# respectively FLEX_TARGET and BISON_TARGET macros.
#
# ====================================================================
# Example:
#
# find_package(BISON)
# find_package(FLEX)
#
# BISON_TARGET(MyParser parser.y ${CMAKE_CURRENT_BINARY_DIR}/parser.cpp
# FLEX_TARGET(MyScanner lexer.l ${CMAKE_CURRENT_BIANRY_DIR}/lexer.cpp)
# ADD_FLEX_BISON_DEPENDENCY(MyScanner MyParser)
#
# include_directories(${CMAKE_CURRENT_BINARY_DIR})
# add_executable(Foo
# Foo.cc
# ${BISON_MyParser_OUTPUTS}
# ${FLEX_MyScanner_OUTPUTS}
# )
# ====================================================================
#=============================================================================
# Copyright 2009 Kitware, Inc.
# Copyright 2006 Tristan Carel
# Modified 2010 by Jon Siwek, backporting for CMake 2.6 compat
#
# Distributed under the OSI-approved BSD License (the "License"):
# CMake - Cross Platform Makefile Generator
# Copyright 2000-2009 Kitware, Inc., Insight Software Consortium
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the names of Kitware, Inc., the Insight Software Consortium,
# nor the names of their contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# This software is distributed WITHOUT ANY WARRANTY; without even the
# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the License for more information.
#=============================================================================
FIND_PROGRAM(FLEX_EXECUTABLE flex DOC "path to the flex executable")
MARK_AS_ADVANCED(FLEX_EXECUTABLE)
FIND_LIBRARY(FL_LIBRARY NAMES fl
DOC "path to the fl library")
MARK_AS_ADVANCED(FL_LIBRARY)
SET(FLEX_LIBRARIES ${FL_LIBRARY})
IF(FLEX_EXECUTABLE)
EXECUTE_PROCESS(COMMAND ${FLEX_EXECUTABLE} --version
OUTPUT_VARIABLE FLEX_version_output
ERROR_VARIABLE FLEX_version_error
RESULT_VARIABLE FLEX_version_result
OUTPUT_STRIP_TRAILING_WHITESPACE)
IF(NOT ${FLEX_version_result} EQUAL 0)
IF(FLEX_FIND_REQUIRED)
MESSAGE(SEND_ERROR "Command \"${FLEX_EXECUTABLE} --version\" failed with output:\n${FLEX_version_output}\n${FLEX_version_error}")
ELSE()
MESSAGE("Command \"${FLEX_EXECUTABLE} --version\" failed with output:\n${FLEX_version_output}\n${FLEX_version_error}\nFLEX_VERSION will not be available")
ENDIF()
ELSE()
STRING(REGEX REPLACE "^flex (.*)$" "\\1"
FLEX_VERSION "${FLEX_version_output}")
ENDIF()
#============================================================
# FLEX_TARGET (public macro)
#============================================================
#
MACRO(FLEX_TARGET Name Input Output)
SET(FLEX_TARGET_usage "FLEX_TARGET(<Name> <Input> <Output> [COMPILE_FLAGS <string>]")
IF(${ARGC} GREATER 3)
IF(${ARGC} EQUAL 5)
IF("${ARGV3}" STREQUAL "COMPILE_FLAGS")
SET(FLEX_EXECUTABLE_opts "${ARGV4}")
SEPARATE_ARGUMENTS(FLEX_EXECUTABLE_opts)
ELSE()
MESSAGE(SEND_ERROR ${FLEX_TARGET_usage})
ENDIF()
ELSE()
MESSAGE(SEND_ERROR ${FLEX_TARGET_usage})
ENDIF()
ENDIF()
ADD_CUSTOM_COMMAND(OUTPUT ${Output}
COMMAND ${FLEX_EXECUTABLE}
ARGS ${FLEX_EXECUTABLE_opts} -o${Output} ${Input}
DEPENDS ${Input}
COMMENT "[FLEX][${Name}] Building scanner with flex ${FLEX_VERSION}"
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR})
SET(FLEX_${Name}_DEFINED TRUE)
SET(FLEX_${Name}_OUTPUTS ${Output})
SET(FLEX_${Name}_INPUT ${Input})
SET(FLEX_${Name}_COMPILE_FLAGS ${FLEX_EXECUTABLE_opts})
ENDMACRO(FLEX_TARGET)
#============================================================
#============================================================
# ADD_FLEX_BISON_DEPENDENCY (public macro)
#============================================================
#
MACRO(ADD_FLEX_BISON_DEPENDENCY FlexTarget BisonTarget)
IF(NOT FLEX_${FlexTarget}_OUTPUTS)
MESSAGE(SEND_ERROR "Flex target `${FlexTarget}' does not exists.")
ENDIF()
IF(NOT BISON_${BisonTarget}_OUTPUT_HEADER)
MESSAGE(SEND_ERROR "Bison target `${BisonTarget}' does not exists.")
ENDIF()
SET_SOURCE_FILES_PROPERTIES(${FLEX_${FlexTarget}_OUTPUTS}
PROPERTIES OBJECT_DEPENDS ${BISON_${BisonTarget}_OUTPUT_HEADER})
ENDMACRO(ADD_FLEX_BISON_DEPENDENCY)
#============================================================
ENDIF(FLEX_EXECUTABLE)
INCLUDE(FindPackageHandleStandardArgs)
FIND_PACKAGE_HANDLE_STANDARD_ARGS(FLEX FLEX_EXECUTABLE
FLEX_VERSION)
# FindFLEX.cmake ends here

View file

@ -1,44 +0,0 @@
# - Try to find GooglePerftools headers and libraries
#
# Usage of this module as follows:
#
# find_package(GooglePerftools)
#
# Variables used by this module, they can change the default behaviour and need
# to be set before calling find_package:
#
# GooglePerftools_ROOT_DIR Set this variable to the root installation of
# GooglePerftools if the module has problems finding
# the proper installation path.
#
# Variables defined by this module:
#
# GOOGLEPERFTOOLS_FOUND System has GooglePerftools libs/headers
# GooglePerftools_LIBRARIES The GooglePerftools libraries
# GooglePerftools_INCLUDE_DIR The location of GooglePerftools headers
find_path(GooglePerftools_ROOT_DIR
NAMES include/google/heap-profiler.h
)
find_library(GooglePerftools_LIBRARIES
NAMES tcmalloc
HINTS ${GooglePerftools_ROOT_DIR}/lib
)
find_path(GooglePerftools_INCLUDE_DIR
NAMES google/heap-profiler.h
HINTS ${GooglePerftools_ROOT_DIR}/include
)
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(GooglePerftools DEFAULT_MSG
GooglePerftools_LIBRARIES
GooglePerftools_INCLUDE_DIR
)
mark_as_advanced(
GooglePerftools_ROOT_DIR
GooglePerftools_LIBRARIES
GooglePerftools_INCLUDE_DIR
)

View file

@ -1,68 +0,0 @@
# - Try to find GeoIP headers and libraries
#
# Usage of this module as follows:
#
# find_package(LibGeoIP)
#
# Variables used by this module, they can change the default behaviour and need
# to be set before calling find_package:
#
# LibGeoIP_ROOT_DIR Set this variable to the root installation of
# libGeoIP if the module has problems finding the
# proper installation path.
#
# Variables defined by this module:
#
# LIBGEOIP_FOUND System has GeoIP libraries and headers
# LibGeoIP_LIBRARY The GeoIP library
# LibGeoIP_INCLUDE_DIR The location of GeoIP headers
# HAVE_GEOIP_COUNTRY_EDITION_V6 Whether the API support IPv6 country edition
# HAVE_GEOIP_CITY_EDITION_REV0_V6 Whether the API supports IPv6 city edition
find_path(LibGeoIP_ROOT_DIR
NAMES include/GeoIPCity.h
)
if (${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
# the static version of the library is preferred on OS X for the
# purposes of making packages (libGeoIP doesn't ship w/ OS X)
set(libgeoip_names libGeoIp.a GeoIP)
else ()
set(libgeoip_names GeoIP)
endif ()
find_library(LibGeoIP_LIBRARY
NAMES ${libgeoip_names}
HINTS ${LibGeoIP_ROOT_DIR}/lib
)
find_path(LibGeoIP_INCLUDE_DIR
NAMES GeoIPCity.h
HINTS ${LibGeoIP_ROOT_DIR}/include
)
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(LibGeoIP DEFAULT_MSG
LibGeoIP_LIBRARY
LibGeoIP_INCLUDE_DIR
)
if (LIBGEOIP_FOUND)
include(CheckCXXSourceCompiles)
set(CMAKE_REQUIRED_INCLUDES ${LibGeoIP_INCLUDE_DIR})
check_cxx_source_compiles("
#include <GeoIPCity.h>
int main() { GEOIP_COUNTRY_EDITION_V6; return 0; }
" HAVE_GEOIP_COUNTRY_EDITION_V6)
check_cxx_source_compiles("
#include <GeoIPCity.h>
int main() { GEOIP_CITY_EDITION_REV0_V6; return 0; }
" HAVE_GEOIP_CITY_EDITION_REV0_V6)
set(CMAKE_REQUIRED_INCLUDES)
endif ()
mark_as_advanced(
LibGeoIP_ROOT_DIR
LibGeoIP_LIBRARY
LibGeoIP_INCLUDE_DIR
)

View file

@ -1,52 +0,0 @@
# - Try to find libmagic header and library
#
# Usage of this module as follows:
#
# find_package(LibMagic)
#
# Variables used by this module, they can change the default behaviour and need
# to be set before calling find_package:
#
# LibMagic_ROOT_DIR Set this variable to the root installation of
# libmagic if the module has problems finding the
# proper installation path.
#
# Variables defined by this module:
#
# LIBMAGIC_FOUND System has libmagic and magic.h
# LibMagic_LIBRARY The libmagic library
# LibMagic_INCLUDE_DIR The location of magic.h
find_path(LibMagic_ROOT_DIR
NAMES include/magic.h
)
if (${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
# the static version of the library is preferred on OS X for the
# purposes of making packages (libmagic doesn't ship w/ OS X)
set(libmagic_names libmagic.a magic)
else ()
set(libmagic_names magic)
endif ()
find_library(LibMagic_LIBRARY
NAMES ${libmagic_names}
HINTS ${LibMagic_ROOT_DIR}/lib
)
find_path(LibMagic_INCLUDE_DIR
NAMES magic.h
HINTS ${LibMagic_ROOT_DIR}/include
)
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(LibMagic DEFAULT_MSG
LibMagic_LIBRARY
LibMagic_INCLUDE_DIR
)
mark_as_advanced(
LibMagic_ROOT_DIR
LibMagic_LIBRARY
LibMagic_INCLUDE_DIR
)

View file

@ -1,56 +0,0 @@
# - Try to find openssl include dirs and libraries
#
# Usage of this module as follows:
#
# find_package(OpenSSL)
#
# Variables used by this module, they can change the default behaviour and need
# to be set before calling find_package:
#
# OpenSSL_ROOT_DIR Set this variable to the root installation of
# openssl if the module has problems finding the
# proper installation path.
#
# Variables defined by this module:
#
# OPENSSL_FOUND System has openssl, include and library dirs found
# OpenSSL_INCLUDE_DIR The openssl include directories.
# OpenSSL_LIBRARIES The openssl libraries.
# OpenSSL_CYRPTO_LIBRARY The openssl crypto library.
# OpenSSL_SSL_LIBRARY The openssl ssl library.
find_path(OpenSSL_ROOT_DIR
NAMES include/openssl/ssl.h
)
find_path(OpenSSL_INCLUDE_DIR
NAMES openssl/ssl.h
HINTS ${OpenSSL_ROOT_DIR}/include
)
find_library(OpenSSL_SSL_LIBRARY
NAMES ssl ssleay32 ssleay32MD
HINTS ${OpenSSL_ROOT_DIR}/lib
)
find_library(OpenSSL_CRYPTO_LIBRARY
NAMES crypto
HINTS ${OpenSSL_ROOT_DIR}/lib
)
set(OpenSSL_LIBRARIES ${OpenSSL_SSL_LIBRARY} ${OpenSSL_CRYPTO_LIBRARY}
CACHE STRING "OpenSSL SSL and crypto libraries" FORCE)
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(OpenSSL DEFAULT_MSG
OpenSSL_LIBRARIES
OpenSSL_INCLUDE_DIR
)
mark_as_advanced(
OpenSSL_ROOT_DIR
OpenSSL_INCLUDE_DIR
OpenSSL_LIBRARIES
OpenSSL_CRYPTO_LIBRARY
OpenSSL_SSL_LIBRARY
)

View file

@ -1,68 +0,0 @@
# - Try to find libpcap include dirs and libraries
#
# Usage of this module as follows:
#
# find_package(PCAP)
#
# Variables used by this module, they can change the default behaviour and need
# to be set before calling find_package:
#
# PCAP_ROOT_DIR Set this variable to the root installation of
# libpcap if the module has problems finding the
# proper installation path.
#
# Variables defined by this module:
#
# PCAP_FOUND System has libpcap, include and library dirs found
# PCAP_INCLUDE_DIR The libpcap include directories.
# PCAP_LIBRARY The libpcap library (possibly includes a thread
# library e.g. required by pf_ring's libpcap)
find_path(PCAP_ROOT_DIR
NAMES include/pcap.h
)
find_path(PCAP_INCLUDE_DIR
NAMES pcap.h
HINTS ${PCAP_ROOT_DIR}/include
)
find_library(PCAP_LIBRARY
NAMES pcap
HINTS ${PCAP_ROOT_DIR}/lib
)
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(PCAP DEFAULT_MSG
PCAP_LIBRARY
PCAP_INCLUDE_DIR
)
include(CheckCSourceCompiles)
set(CMAKE_REQUIRED_LIBRARIES ${PCAP_LIBRARY})
check_c_source_compiles("int main() { return 0; }" PCAP_LINKS_SOLO)
set(CMAKE_REQUIRED_LIBRARIES)
# check if linking against libpcap also needs to link against a thread library
if (NOT PCAP_LINKS_SOLO)
find_package(Threads)
if (THREADS_FOUND)
set(CMAKE_REQUIRED_LIBRARIES ${PCAP_LIBRARY} ${CMAKE_THREAD_LIBS_INIT})
check_c_source_compiles("int main() { return 0; }" PCAP_NEEDS_THREADS)
set(CMAKE_REQUIRED_LIBRARIES)
endif ()
if (THREADS_FOUND AND PCAP_NEEDS_THREADS)
set(_tmp ${PCAP_LIBRARY} ${CMAKE_THREAD_LIBS_INIT})
list(REMOVE_DUPLICATES _tmp)
set(PCAP_LIBRARY ${_tmp}
CACHE STRING "Libraries needed to link against libpcap" FORCE)
else ()
message(FATAL_ERROR "Couldn't determine how to link against libpcap")
endif ()
endif ()
mark_as_advanced(
PCAP_ROOT_DIR
PCAP_INCLUDE_DIR
PCAP_LIBRARY
)

View file

@ -1,44 +0,0 @@
# A wrapper macro around the standard CMake find_package macro that
# facilitates displaying better error messages by default, or even
# accepting custom error messages on a per package basis.
#
# If a package is not found, then the MISSING_PREREQS variable gets
# set to true and either a default or custom error message appended
# to MISSING_PREREQ_DESCS.
#
# The caller can use these variables to display a list of any missing
# packages and abort the build/configuration if there were any.
#
# Use as follows:
#
# include(FindRequiredPackage)
# FindRequiredPackage(Perl)
# FindRequiredPackage(FLEX "You need to install flex (Fast Lexical Analyzer)")
#
# if (MISSING_PREREQS)
# foreach (prereq ${MISSING_PREREQ_DESCS})
# message(SEND_ERROR ${prereq})
# endforeach ()
# message(FATAL_ERROR "Configuration aborted due to missing prerequisites")
# endif ()
macro(FindRequiredPackage packageName)
find_package(${packageName})
string(TOUPPER ${packageName} canonPackageName)
if (NOT ${canonPackageName}_FOUND)
set(MISSING_PREREQS true)
set(customDesc)
foreach (descArg ${ARGN})
set(customDesc "${customDesc} ${descArg}")
endforeach ()
if (customDesc)
# append the custom error message that was provided as an argument
list(APPEND MISSING_PREREQ_DESCS ${customDesc})
else ()
list(APPEND MISSING_PREREQ_DESCS
" Could not find prerequisite package '${packageName}'")
endif ()
endif ()
endmacro(FindRequiredPackage)

View file

@ -1,26 +0,0 @@
# Determines at `make install` time if a file, typically a configuration
# file placed in $PREFIX/etc, shouldn't be installed to prevent overwrite
# of an existing file.
#
# _srcfile: the file to install
# _dstfile: the absolute file name after installation
macro(InstallClobberImmune _srcfile _dstfile)
install(CODE "
if (EXISTS ${_dstfile})
message(STATUS \"Skipping: ${_dstfile} (already exists)\")
execute_process(COMMAND \"${CMAKE_COMMAND}\" -E compare_files
${_srcfile} ${_dstfile} RESULT_VARIABLE _diff)
if (NOT \"\${_diff}\" STREQUAL \"0\")
message(STATUS \"Installing: ${_dstfile}.example\")
configure_file(${_srcfile} ${_dstfile}.example COPY_ONLY)
endif ()
else ()
message(STATUS \"Installing: ${_dstfile}\")
# install() is not scriptable within install(), and
# configure_file() is the next best thing
configure_file(${_srcfile} ${_dstfile} COPY_ONLY)
# TODO: create additional install_manifest files?
endif ()
")
endmacro(InstallClobberImmune)

View file

@ -1,42 +0,0 @@
include(InstallClobberImmune)
# This macro can be used to install configuration files which
# users are expected to modify after installation. It will:
#
# - If binary packaging is enabled:
# Install the file in the typical CMake fashion, but append to the
# INSTALLED_CONFIG_FILES cache variable for use with the Mac package's
# pre/post install scripts
# - If binary packaging is not enabled:
# Install the script in a way such that it will check at `make install`
# time whether the file does not exist. See InstallClobberImmune.cmake
#
# _srcfile: the absolute path to the file to install
# _dstdir: absolute path to the directory in which to install the file
# _dstfilename: how to (re)name the file inside _dstdir
macro(InstallPackageConfigFile _srcfile _dstdir _dstfilename)
set(_dstfile ${_dstdir}/${_dstfilename})
if (BINARY_PACKAGING_MODE)
# If packaging mode is enabled, always install the distribution's
# version of the file. The Mac package's pre/post install scripts
# or native functionality of RPMs will take care of not clobbering it.
install(FILES ${_srcfile} DESTINATION ${_dstdir} RENAME ${_dstfilename})
# This cache variable is what the Mac package pre/post install scripts
# use to avoid clobbering user-modified config files
set(INSTALLED_CONFIG_FILES
"${INSTALLED_CONFIG_FILES} ${_dstfile}" CACHE STRING "" FORCE)
# Additionally, the Mac PackageMaker packages don't have any automatic
# handling of configuration file conflicts so install an example file
# that the post install script will cleanup in the case it's extraneous
if (APPLE)
install(FILES ${_srcfile} DESTINATION ${_dstdir}
RENAME ${_dstfilename}.example)
endif ()
else ()
# Have `make install` check at run time whether the file does not exist
InstallClobberImmune(${_srcfile} ${_dstfile})
endif ()
endmacro(InstallPackageConfigFile)

View file

@ -1,20 +0,0 @@
This package will install @CMAKE_PROJECT_NAME@ into the following location:
@CMAKE_INSTALL_PREFIX@
You may choose to update your PATH environment variable:
# For Bash
export PATH=@CMAKE_INSTALL_PREFIX@/bin:$PATH
# For CSH
setenv PATH @CMAKE_INSTALL_PREFIX@/bin:$PATH
If you have more than one volume, please choose the install
destination as the one that contains the root filesystem.
If you have existing configuration files that are modified or
otherwise different from the version included in the package,
this installer will attempt to prevent overwirting them,
but its also advisable to make your own backups of important
files before proceeding.

View file

@ -1,10 +0,0 @@
if (NOT _MAC_DEPENDENCY_PATHS)
set(_MAC_DEPENDENCY_PATHS)
# As of CMake 2.8.3, Fink and MacPorts search paths are appended to the
# default search prefix paths, but the nicer thing would be if they are
# prepended to the default, so that is fixed here.
if (APPLE)
list(INSERT CMAKE_SYSTEM_PREFIX_PATH 0 /opt/local) # MacPorts
list(INSERT CMAKE_SYSTEM_PREFIX_PATH 0 /sw) # Fink
endif ()
endif ()

View file

@ -1,34 +0,0 @@
include(CheckCXXSourceCompiles)
include(CheckCSourceCompiles)
# This autoconf variable is obsolete; it's portable to assume C89 and signal
# handlers returning void
set(RETSIGTYPE "void")
set(RETSIGVAL "")
check_c_source_compiles("
#include <sys/types.h>
#include <sys/socket.h>
extern int socket(int, int, int);
extern int connect(int, const struct sockaddr *, int);
extern int send(int, const void *, int, int);
extern int recvfrom(int, void *, int, int, struct sockaddr *, int *);
int main() { return 0; }
" DO_SOCK_DECL)
if (DO_SOCK_DECL)
message(STATUS "socket() and friends need explicit declaration")
endif ()
check_cxx_source_compiles("
#include <stdlib.h>
#include <syslog.h>
extern \"C\" {
int openlog(const char* ident, int logopt, int facility);
int syslog(int priority, const char* message_fmt, ...);
int closelog();
}
int main() { return 0; }
" SYSLOG_INT)
if (SYSLOG_INT)
message(STATUS "syslog prototypes need declaration")
endif ()

View file

@ -1,66 +0,0 @@
if (${CMAKE_SYSTEM_NAME} MATCHES "FreeBSD")
# alternate malloc is faster for FreeBSD, but needs more testing
# need to add way to set this from the command line
set(USE_NMALLOC true)
elseif (${CMAKE_SYSTEM_NAME} MATCHES "OpenBSD")
set(USE_NMALLOC true)
elseif (${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
elseif (${CMAKE_SYSTEM_NAME} MATCHES "Linux")
set(HAVE_LINUX true)
elseif (${CMAKE_SYSTEM_NAME} MATCHES "Solaris")
set(SOCKET_LIBS nsl socket)
elseif (${CMAKE_SYSTEM_NAME} MATCHES "osf")
# Workaround ip_hl vs. ip_vhl problem in netinet/ip.h
add_definitions(-D__STDC__=2)
elseif (${CMAKE_SYSTEM_NAME} MATCHES "irix")
list(APPEND CMAKE_C_FLAGS -xansi -signed -g3)
list(APPEND CMAKE_CXX_FLAGS -xansi -signed -g3)
elseif (${CMAKE_SYSTEM_NAME} MATCHES "ultrix")
list(APPEND CMAKE_C_FLAGS -std1 -g3)
list(APPEND CMAKE_CXX_FLAGS -std1 -g3)
include(CheckCSourceCompiles)
check_c_source_compiles("
#include <sys/types.h>
int main() {
void c(const struct a *);
return 0;
}
" have_ultrix_const)
if (NOT have_ultrix_const)
set(NEED_ULTRIX_CONST_HACK true)
endif ()
elseif (${CMAKE_SYSTEM_NAME} MATCHES "hpux" OR
${CMAKE_SYSTEM_NAME} MATCHES "HP-UX")
include(CheckCSourceCompiles)
set(CMAKE_REQUIRED_FLAGS -Aa)
set(CMAKE_REQUIRED_DEFINITIONS -D_HPUX_SOURCE)
check_c_source_compiles("
#include <sys/types.h>
int main() {
int frob(int, char *);
return 0;
}
" have_ansi_prototypes)
set(CMAKE_REQUIRED_FLAGS)
set(CMAKE_REQUIRED_DEFINITIONS)
if (have_ansi_prototypes)
add_definitions(-D_HPUX_SOURCE)
list(APPEND CMAKE_C_FLAGS -Aa)
list(APPEND CMAKE_CXX_FLAGS -Aa)
endif ()
if (NOT have_ansi_prototypes)
message(FATAL_ERROR "Can't get HPUX compiler to handle ANSI prototypes")
endif ()
endif ()

View file

@ -1,72 +0,0 @@
include(CheckCSourceCompiles)
include(CheckCXXSourceCompiles)
set(CMAKE_REQUIRED_LIBRARIES ${OpenSSL_LIBRARIES})
set(CMAKE_REQUIRED_INCLUDES ${OpenSSL_INCLUDE_DIR})
check_c_source_compiles("
#include <openssl/ssl.h>
int main() { return 0; }
" including_ssl_h_works)
if (NOT including_ssl_h_works)
# On Red Hat we may need to include Kerberos header.
set(CMAKE_REQUIRED_INCLUDES ${OpenSSL_INCLUDE_DIR} /usr/kerberos/include)
check_c_source_compiles("
#include <krb5.h>
#include <openssl/ssl.h>
int main() { return 0; }
" NEED_KRB5_H)
set(CMAKE_REQUIRED_INCLUDES ${OpenSSL_INCLUDE_DIR})
if (NOT NEED_KRB5_H)
message(FATAL_ERROR
"OpenSSL test failure. See CmakeError.log for details.")
else ()
message(STATUS "OpenSSL requires Kerberos header")
include_directories("/usr/kerberos/include")
endif ()
endif ()
# check for OPENSSL_add_all_algorithms_conf function
# and thus OpenSSL >= v0.9.7
check_c_source_compiles("
#include <openssl/evp.h>
int main() {
OPENSSL_add_all_algorithms_conf();
return 0;
}
" openssl_greater_than_0_9_7)
if (NOT openssl_greater_than_0_9_7)
message(FATAL_ERROR "OpenSSL >= v0.9.7 required")
endif ()
check_cxx_source_compiles("
#include <openssl/x509.h>
int main() {
const unsigned char** cpp = 0;
X509** x =0;
d2i_X509(x, cpp, 0);
return 0;
}
" OPENSSL_D2I_X509_USES_CONST_CHAR)
if (NOT OPENSSL_D2I_X509_USES_CONST_CHAR)
# double check that it compiles without const
check_cxx_source_compiles("
#include <openssl/x509.h>
int main() {
unsigned char** cpp = 0;
X509** x =0;
d2i_X509(x, cpp, 0);
return 0;
}
" OPENSSL_D2I_X509_USES_CHAR)
if (NOT OPENSSL_D2I_X509_USES_CHAR)
message(FATAL_ERROR
"Can't determine if openssl_d2i_x509() takes const char parameter")
endif ()
endif ()
set(CMAKE_REQUIRED_INCLUDES)
set(CMAKE_REQUIRED_LIBRARIES)

View file

@ -1,63 +0,0 @@
include(CheckFunctionExists)
include(CheckCSourceCompiles)
include(CheckIncludeFiles)
set(CMAKE_REQUIRED_INCLUDES ${PCAP_INCLUDE_DIR})
set(CMAKE_REQUIRED_LIBRARIES ${PCAP_LIBRARY})
check_include_files(pcap-int.h HAVE_PCAP_INT_H)
check_function_exists(pcap_freecode HAVE_LIBPCAP_PCAP_FREECODE)
if (NOT HAVE_LIBPCAP_PCAP_FREECODE)
set(DONT_HAVE_LIBPCAP_PCAP_FREECODE true)
message(STATUS "No implementation for pcap_freecode()")
endif ()
check_c_source_compiles("
#include <pcap.h>
int main () {
int snaplen;
int linktype;
struct bpf_program fp;
int optimize;
bpf_u_int32 netmask;
char str[10];
char error[1024];
snaplen = 50;
linktype = DLT_EN10MB;
optimize = 1;
netmask = 0L;
str[0] = 'i'; str[1] = 'p'; str[2] = '\\\\0';
(void)pcap_compile_nopcap(
snaplen, linktype, &fp, str, optimize, netmask, &error);
return 0;
}
" LIBPCAP_PCAP_COMPILE_NOPCAP_HAS_ERROR_PARAMETER)
if (NOT LIBPCAP_PCAP_COMPILE_NOPCAP_HAS_ERROR_PARAMETER)
# double check
check_c_source_compiles("
#include <pcap.h>
int main () {
int snaplen;
int linktype;
struct bpf_program fp;
int optimize;
bpf_u_int32 netmask;
char str[10];
snaplen = 50;
linktype = DLT_EN10MB;
optimize = 1;
netmask = 0L;
str[0] = 'i'; str[1] = 'p'; str[2] = '\\\\0';
(void)pcap_compile_nopcap(snaplen, linktype, &fp, str, optimize, netmask);
return 0;
}
" LIBPCAP_PCAP_COMPILE_NOPCAP_NO_ERROR_PARAMETER)
if (NOT LIBPCAP_PCAP_COMPILE_NOPCAP_NO_ERROR_PARAMETER)
message(FATAL_ERROR
"Can't determine if pcap_compile_nopcap takes an error parameter")
endif ()
endif ()
set(CMAKE_REQUIRED_INCLUDES)
set(CMAKE_REQUIRED_LIBRARIES)

View file

@ -1,35 +0,0 @@
function(uninstall_manifest manifestPath)
file(READ "${manifestPath}" files)
string(REGEX REPLACE "\n" ";" files "${files}")
foreach (file ${files})
set(fileName $ENV{DESTDIR}${file})
if (EXISTS "${fileName}" OR IS_SYMLINK "${fileName}")
message(STATUS "Uninstalling: ${fileName}")
execute_process(
COMMAND "@CMAKE_COMMAND@" -E remove "${fileName}"
OUTPUT_VARIABLE rm_out
RESULT_VARIABLE rm_retval
)
if (NOT ${rm_retval} EQUAL 0)
message(FATAL_ERROR "Problem when removing: ${fileName}")
endif ()
else ()
message(STATUS "Does not exist: ${fileName}")
endif ()
endforeach ()
endfunction(uninstall_manifest)
file(GLOB install_manifests @CMAKE_CURRENT_BINARY_DIR@/install_manifest*.txt)
if (install_manifests)
foreach (manifest ${install_manifests})
uninstall_manifest(${manifest})
endforeach ()
else ()
message(FATAL_ERROR "Cannot find any install manifests in: "
"\"@CMAKE_CURRENT_BINARY_DIR@/install_manifest*.txt\"")
endif ()

View file

@ -1,61 +0,0 @@
#!/bin/sh
# This script is meant to be used by binary packages post-installation.
# Variables between @ symbols are replaced by CMake at configure time.
backupNamesFile=/tmp/bro_install_backups
version=@VERSION@
sampleFiles=""
# check whether it's safe to remove backup configuration files that
# the most recent package install created
if [ -e ${backupNamesFile} ]; then
backupFileList=`cat ${backupNamesFile}`
for backupFile in ${backupFileList}; do
origFileName=`echo ${backupFile} | sed 's/\(.*\)\..*/\1/'`
diff ${origFileName} ${backupFile} > /dev/null 2>&1
if [ $? -eq 0 ]; then
# if the installed version and the backup version don't differ
# then we can remove the backup version and the example file
rm ${backupFile}
rm ${origFileName}.example
else
# The backup file differs from the newly installed version,
# since we can't tell if the backup version has been modified
# by the user, we should restore it to its original location
# and rename the new version appropriately.
sampleFiles="${sampleFiles}\n${origFileName}.example"
mv ${backupFile} ${origFileName}
fi
done
rm ${backupNamesFile}
fi
if [ -n "${sampleFiles}" ]; then
# Use some apple script to display a message to user
/usr/bin/osascript << EOF
tell application "System Events"
activate
display alert "Existing configuration files differ from the ones that would be installed by this package. To avoid overwriting configuration which you may have modified, the following new config files have been installed:\n${sampleFiles}\n\nIf you have previously modified configuration files, please make sure that they are still compatible, else you should update your config files to the new versions."
end tell
EOF
fi
# Set up world writeable spool and logs directory for broctl, making sure
# to set the sticky bit so that unprivileged users can't rename/remove files.
# (CMake/CPack is supposed to install them, but has problems with empty dirs)
if [ -n "@EMPTY_WORLD_DIRS@" ]; then
for dir in "@EMPTY_WORLD_DIRS@"; do
mkdir -p ${dir}
chmod 777 ${dir}
chmod +t ${dir}
done
fi

View file

@ -1,34 +0,0 @@
#!/bin/sh
# This script is meant to be used by binary packages pre-installation.
# Variables between @ symbols are replaced by CMake at configure time.
configFiles="@INSTALLED_CONFIG_FILES@"
backupNamesFile=/tmp/bro_install_backups
# Checks if a config file exists in a default location and makes a backup
# so that a modified version is not clobbered
backupFile () {
origFile="$1"
if [ -e ${origFile} ]; then
# choose a file suffix that doesn't already exist
ver=1
while [ -e ${origFile}.${ver} ]; do
ver=$(( ver + 1 ))
done
backupFile=${origFile}.${ver}
cp -p ${origFile} ${backupFile}
# the post upgrade script will check whether the installed
# config file actually differs from existing version
# and delete unnecessary backups
echo "${backupFile}" >> ${backupNamesFile}
fi
}
for file in ${configFiles}; do
backupFile "${file}"
done

21
configure vendored
View file

@ -32,6 +32,8 @@ Usage: $0 [OPTION]... [VAR=VALUE]...
--disable-broccoli don't build or install the Broccoli library --disable-broccoli don't build or install the Broccoli library
--disable-broctl don't install Broctl --disable-broctl don't install Broctl
--disable-auxtools don't build or install auxilliary tools --disable-auxtools don't build or install auxilliary tools
--disable-python don't try to build python bindings for broccoli
--disable-ruby don't try to build ruby bindings for broccoli
Required Packages in Non-Standard Locations: Required Packages in Non-Standard Locations:
--with-openssl=PATH path to OpenSSL install root --with-openssl=PATH path to OpenSSL install root
@ -49,6 +51,9 @@ Usage: $0 [OPTION]... [VAR=VALUE]...
--with-python=PATH path to Python interpreter --with-python=PATH path to Python interpreter
--with-python-lib=PATH path to libpython --with-python-lib=PATH path to libpython
--with-python-inc=PATH path to Python headers --with-python-inc=PATH path to Python headers
--with-ruby=PATH path to ruby interpreter
--with-ruby-lib=PATH path to ruby library
--with-ruby-inc=PATH path to ruby headers
--with-swig=PATH path to SWIG executable --with-swig=PATH path to SWIG executable
Packaging Options (for developers): Packaging Options (for developers):
@ -144,6 +149,12 @@ while [ $# -ne 0 ]; do
--disable-auxtools) --disable-auxtools)
append_cache_entry INSTALL_AUX_TOOLS BOOL false append_cache_entry INSTALL_AUX_TOOLS BOOL false
;; ;;
--disable-python)
append_cache_entry DISABLE_PYTHON_BINDINGS BOOL true
;;
--disable-ruby)
append_cache_entry DISABLE_RUBY_BINDINGS BOOL true
;;
--with-openssl=*) --with-openssl=*)
append_cache_entry OpenSSL_ROOT_DIR PATH $optarg append_cache_entry OpenSSL_ROOT_DIR PATH $optarg
;; ;;
@ -185,6 +196,16 @@ while [ $# -ne 0 ]; do
append_cache_entry PYTHON_INCLUDE_DIR PATH $optarg append_cache_entry PYTHON_INCLUDE_DIR PATH $optarg
append_cache_entry PYTHON_INCLUDE_PATH PATH $optarg append_cache_entry PYTHON_INCLUDE_PATH PATH $optarg
;; ;;
--with-ruby=*)
append_cache_entry RUBY_EXECUTABLE PATH $optarg
;;
--with-ruby-lib=*)
append_cache_entry RUBY_LIBRARY PATH $optarg
;;
--with-ruby-inc=*)
append_cache_entry RUBY_INCLUDE_DIRS PATH $optarg
append_cache_entry RUBY_INCLUDE_PATH PATH $optarg
;;
--with-swig=*) --with-swig=*)
append_cache_entry SWIG_EXECUTABLE PATH $optarg append_cache_entry SWIG_EXECUTABLE PATH $optarg
;; ;;

View file

@ -1,18 +0,0 @@
# $Id: OS-fingerprint.bro 1071 2005-03-08 14:09:31Z vern $
#
# Tracks operating system versioning using the "software" framework.
@load software
event OS_version_found(c: connection, host: addr, OS: OS_version)
{
local version: software_version;
version$major = version$minor = version$minor2 = -1;
version$addl = OS$detail;
local sw: software;
sw$name = OS$genre;
sw$version = version;
event software_version_found(c, host, sw, "OS");
}

View file

@ -1,278 +0,0 @@
# $Id: adu.bro 5152 2007-12-04 21:48:56Z vern $
@load conn-id
module adu;
# This script parses application-layer data (ADU) units, or "messages",
# out of the packet streams. Since the analysis is generic, we define
# an ADU simply as all application-layer data in a 5-tuple flow going
# in one direction without any data going the other way. Once we see
# data in the other direction, we finish the current ADU and start
# a new one (going the other way). While this approach is only
# approximate, it can work well for both UDP and TCP.
#
# The script reports ADUs as strings, up to a configurable maximum size, and
# up to a configurable depth into the flow.
#
# Generated events:
#
# - adu_tx(c: connection, a: adu_state) reports an ADU seen from
# c's originator to its responder.
#
# - adu_rx(c: connection, a: adu_state) reports an ADU seen from
# c's responder to the originator.
#
# - adu_done(c: connection) indicates that no more ADUs will be seen
# on connection c. This is useful to know in case your statekeeping
# relies on event connection_state_remove(), which is also used by
# adu.bro.
#
# --- Input configuration -- which ports to look at --------------------
# Right now: everything!
#
redef tcp_content_deliver_all_orig = T;
redef tcp_content_deliver_all_resp = T;
redef udp_content_deliver_all_orig = T;
redef udp_content_deliver_all_resp = T;
# --- Debugging -- should really be a separate policy ------------------
# Comment out to disable debugging output:
#global adu_debug = T;
# Uncomment to enable tests:
#global adu_test = T;
@ifdef (adu_debug)
function DBG(msg: string) { print fmt("DBG[adu.bro]: %s", msg); }
@else
function DBG(msg: string) { }
@endif
export {
# --- Constants --------------------------------------------------------
# The maximum depth in bytes up to which we follow a flow.
# This is counting bytes seen in both directions.
const adu_conn_max_depth = 100000 &redef;
# The maximum message depth that we report.
const adu_max_depth = 3 &redef;
# The maximum message size in bytes that we report.
const adu_max_size = 1000 &redef;
# Whether ADUs are reported beyond content gaps.
const adu_gaps_ok = F &redef;
# --- Types ------------------------------------------------------------
# adu_state records contain the latest ADU and aditional flags to help
# the user identify the direction of the message, its depth in the flow,
# etc.
type adu_state: record {
adu: string &default = ""; # the current ADU
# Message counter (>= 1), orig->resp and resp->orig.
depth_tx: count &default = 1;
depth_rx: count &default = 1;
# TCP: seqno tracking to recognize gaps.
seen_tx: count &default = 0;
seen_rx: count &default = 0;
size: count &default = 0; # total connection size in bytes
is_orig: bool &default = F; # whether ADU is orig->resp
ignore: bool &default = F; # ignore future activity on conn
};
# Tell the ADU policy that you do not wish to receive further
# adu_tx/adu_rx events for a given connection. Other policies
# may continue to process the connection.
#
global adu_skip_further_processing: function(cid: conn_id);
}
# --- Globals ----------------------------------------------------------
# A global table that tracks each flow's messages.
global adu_conns: table[conn_id] of adu_state;
# Testing invokes the following events.
global adu_tx: event(c: connection, astate: adu_state);
global adu_rx: event(c: connection, astate: adu_state);
global adu_done: event(c: connection);
# --- Functions --------------------------------------------------------
function adu_skip_further_processing(cid: conn_id)
{
if ( cid !in adu_conns )
return;
adu_conns[cid]$ignore = T;
}
function flow_contents(c: connection, is_orig: bool, seq: count, contents: string)
{
local astate: adu_state;
DBG(fmt("contents %s, %s: %s", id_string(c$id), is_orig, contents));
# Ensure we track the given connection.
if ( c$id !in adu_conns )
adu_conns[c$id] = astate;
else
astate = adu_conns[c$id];
# Forget it if we've been asked to ignore.
#
if ( astate$ignore == T )
return;
# Don't report if flow is too big.
#
if ( astate$size >= adu_conn_max_depth )
return;
# If we have an assembled message, we may now have something
# to report.
if ( |astate$adu| > 0 )
{
# If application-layer data flow is switching
# from resp->orig to orig->resp, report the assembled
# message as a received ADU.
if ( is_orig && ! astate$is_orig )
{
event adu_rx(c, copy(astate));
astate$adu = "";
if ( ++astate$depth_rx > adu_max_depth )
adu_skip_further_processing(c$id);
}
# If application-layer data flow is switching
# from orig->resp to resp->orig, report the assembled
# message as a transmitted ADU.
#
if ( !is_orig && astate$is_orig )
{
event adu_tx(c, copy(astate));
astate$adu = "";
if ( ++astate$depth_tx > adu_max_depth )
adu_skip_further_processing(c$id);
}
}
# Check for content gaps. If we identify one, only continue
# if user allowed it.
#
if ( !adu_gaps_ok && seq > 0 )
{
if ( is_orig )
{
if ( seq > astate$seen_tx + 1 )
return;
else
astate$seen_tx += |contents|;
}
else
{
if ( seq > astate$seen_rx + 1 )
return;
else
astate$seen_rx += |contents|;
}
}
# Append the contents to the end of the currently
# assembled message, if the message hasn't already
# reached the maximum size.
#
if ( |astate$adu| < adu_max_size )
{
astate$adu += contents;
# As a precaution, clip the string to the maximum
# size. A long content string with astate$adu just
# below its maximum allowed size could exceed that
# limit by a lot.
### str_clip(astate$adu, adu_max_size);
}
# Note that this counter is bumped up even if we have
# exceeded the maximum size of an individual message.
#
astate$size += |contents|;
astate$is_orig = is_orig;
}
# --- Event Handlers ---------------------------------------------------
event tcp_contents(c: connection, is_orig: bool, seq: count, contents: string)
{
flow_contents(c, is_orig, seq, contents);
}
event udp_contents(u: connection, is_orig: bool, contents: string)
{
flow_contents(u, is_orig, 0, contents);
}
event connection_state_remove(c: connection)
{
if ( c$id !in adu_conns )
return;
local astate = adu_conns[c$id];
# Forget it if we've been asked to ignore.
#
if ( astate$ignore == T )
return;
# Report the remaining data now, if any.
#
if ( |astate$adu| > 0 ) {
if ( astate$is_orig )
{
if ( astate$depth_tx <= adu_max_depth )
event adu_tx(c, copy(astate));
}
else
{
if ( astate$depth_rx <= adu_max_depth )
event adu_rx(c, copy(astate));
}
}
delete adu_conns[c$id];
event adu_done(c);
}
# --- Tests ------------------------------------------------------------
@ifdef (adu_test)
event adu_tx(c: connection, astate: adu_state)
{
print fmt("%s ---- %s, %d -> ----", network_time(), id_string(c$id), astate$depth_tx);
# print astate$adu;
}
event adu_rx(c: connection, astate: adu_state)
{
print fmt("%s ---- %s, %d <- ----", network_time(), id_string(c$id), astate$depth_rx);
# print astate$adu;
}
@endif

View file

@ -1,3 +0,0 @@
# $Id: alarm.bro 340 2004-09-09 06:38:27Z vern $
redef bro_alarm_file = open_log_file("alarm");

View file

@ -1,141 +0,0 @@
@load heavy-analysis
@load OS-fingerprint
@load adu
@load alarm
@load analy
@load anon
@load arp
@load backdoor
@load bittorrent
@load blaster
@load bt-tracker
@load brolite-backdoor
@load capture-events
@load capture-loss
@load capture-state-updates
@load checkpoint
@load clear-passwords
@load conn-flood
@load conn-id
@load conn
@load contents
@load cpu-adapt
@load dce
@load demux
@load detect-protocols-http
@load detect-protocols
@load dhcp
@load dns-info
@load dns-lookup
@load dns
@load dpd
@load drop-adapt
@load dyn-disable
@load file-flush
@load finger
@load firewall
@load flag-irc
@load flag-warez
@load frag
@load ftp
@load gnutella
@load hot-ids
@load hot
@load http-abstract
@load http-anon-server
@load http-anon-useragent
@load http-anon-utils
@load http-body
@load http-detect-passwd
@load http-entity
@load http-event
@load http-header
@load http-identified-files.bro
@load http-reply
@load http-request
@load http-rewriter
@load http
@load icmp
@load ident-rewriter
@load ident
@load inactivity
@load interconn
@load irc-bot-syslog
@load irc-bot
@load irc
@load large-conns
@load listen-clear
@load listen-ssl
@load load-level
@load load-sample
@load log-append
@load login
@load mime-pop
@load mime
@load mt
@load ncp
@load netflow
@load netstats
@load nfs
@load notice-action-filters
@load notice
@load ntp
@load passwords
@load pcap
@load pkt-profile
@load pop3
@load port-name
@load portmapper
@load print-filter
@load print-globals
@load print-resources
@load print-sig-states
@load profiling
@load proxy
@load remote-pcap
@load remote-ping
@load remote-print-id-reply
@load remote-print-id
@load remote-print
@load remote-report-notices
@load remote-send-id
@load remote
@load rotate-logs
@load rsh
@load scan
@load secondary-filter
@load sensor-sshd
@load server-ports
@load service-probe
@load signatures
@load site
@load smb
@load smtp-relay
@load smtp-rewriter
@load smtp
@load snort
@load software
@load ssh
@load ssh-stepping
@load ssl-alerts
@load ssl-ciphers
@load ssl-errors
@load ssl-worm
@load ssl
@load stats
@load stepping
@load synflood
@load targeted-scan
@load tcp
@load tftp
@load trw-impl
@load trw
@load udp-common
@load udp
@load vlan
@load weird
@load worm
@load notice-policy
# The following keeps us running after the bro_init event.
redef PrintFilter::terminate_bro = F;

View file

@ -1,16 +0,0 @@
# Statistical analysis of TCP connection in terms of the packet streams
# in each direction.
@load dns-lookup
@load udp
event conn_stats(c: connection, os: endpoint_stats, rs: endpoint_stats)
{
local id = c$id;
print fmt("%.6f %s %s %s %s %s %s %s %s %s",
c$start_time, c$duration, id$orig_p, id$resp_p,
conn_size(c$orig, tcp), conn_size(c$resp, tcp),
id$orig_h, id$resp_h, os, rs);
}

View file

@ -1,193 +0,0 @@
# $Id: anon.bro 6889 2009-08-21 16:45:17Z vern $
redef anonymize_ip_addr = T;
const orig_addr_anonymization = RANDOM_MD5 &redef;
const resp_addr_anonymization = RANDOM_MD5 &redef;
const other_addr_anonymization = SEQUENTIALLY_NUMBERED &redef;
const preserve_orig_addr: set[addr] = {} &redef;
const preserve_resp_addr: set[addr] = {} &redef;
const preserve_other_addr: set[addr] = {
0.0.0.0,
} &redef;
const preserved_subnet: set[subnet] = {
# 192.150.186/23,
} &redef;
const preserved_net: set[net] = {
# 192.150.186, 192.150.187,
} &redef;
global anon_log = open_log_file("anon") &redef;
global anonymized_args: table[string] of string;
global ip_anon_mapping: set[addr, addr];
event bro_init()
{
for ( n in preserved_net )
preserve_net(n);
}
function anonymize_address(a: addr, id: conn_id): addr
{
if ( a == id$orig_h )
return anonymize_addr(a, ORIG_ADDR);
else if ( a == id$resp_h )
return anonymize_addr(a, RESP_ADDR);
else
return anonymize_addr(a, OTHER_ADDR);
}
event anonymization_mapping(orig: addr, mapped: addr)
{
if ( [orig, mapped] !in ip_anon_mapping )
{
add ip_anon_mapping[orig, mapped];
print anon_log, fmt("%s -> %s", orig, mapped);
}
}
function string_anonymized(from: string, to: string, seed: count)
{
print anon_log, fmt("\"%s\" %d=> \"%s\"", from, seed, to);
}
global num_string_id: count = 0 &redef;
global anonymized_strings: table[string] of record {
s: string;
c: count;
} &redef;
# Hopefully, the total number of strings to anonymize is much less than
# 36^unique_string_length.
const unique_string_length = 8 &redef;
# const anonymized_string_pattern = /U[0-9a-f]+U/;
global unique_string_set: set[string];
event bro_init()
{
for ( s in anonymized_strings )
add unique_string_set[anonymized_strings[s]$s];
}
function unique_string(s: string, seed: count): string
{
local t = cat("U", sub_bytes(md5_hmac(seed, s),
1, unique_string_length), "U");
if ( t in unique_string_set )
return unique_string(s, seed+1);
anonymized_strings[s] = [$s = t, $c = 1];
add unique_string_set[t];
string_anonymized(s, t, seed);
return t;
}
function anonymize_string(from: string): string
{
if ( from in anonymized_strings )
{
++anonymized_strings[from]$c;
return anonymized_strings[from]$s;
}
local t = unique_string(from, 0);
return t;
}
function anonymize_arg(typ: string, arg: string): string
{
if ( arg == "" )
return ""; # an empty argument is safe
local arg_seed = string_cat(typ, arg);
if ( arg_seed in anonymized_args )
return anonymized_args[arg_seed];
local a = anonymize_string(arg_seed);
anonymized_args[arg_seed] = a;
print anon_log, fmt("anonymize_arg: (%s) {%s} -> %s ",
typ, to_string_literal(arg), to_string_literal(a));
return a;
}
# Does not contain ? and ends with an allowed suffix.
const path_to_file_pat =
/\/[^?]+\.(html|ico|icon|pdf|ps|doc|ppt|htm|js|crl|swf|shtml|h|old|c|cc|java|class|src|cfm|gif|jpg|php|rdf|rss|asp|bmp|owl|phtml|jpeg|jsp|cgi|png|txt|xml|css|avi|tex|dvi)/
;
# Acceptable domain names.
const kosher_dom_pat =
/ar|au|biz|br|ca|cc|cl|cn|co|com|cx|cz|de|ec|es|edu|fi|fm|fr|gov|hn|il|is|it|jp|lv|mx|net|no|nz|org|pe|pl|ru|sk|tv|tw|uk|us|arpa/
;
# Simple filename pattern.
const simple_filename =
/[0-9\-A-Za-z]+\.(html|ico|icon|pdf|ps|doc|ppt|htm|js|crl|swf|shtml|h|old|c|cc|java|class|src|cfm|gif|jpg|php|rdf|rss|asp|bmp|owl|phtml|jpeg|jsp|cgi|png|txt|xml|css|avi|tex|dvi)/
;
function anonymize_path(path: string): string
{
local hashed_path = "";
if ( to_lower(path) != path_to_file_pat )
{
hashed_path = anonymize_arg("path", path);
return hashed_path;
}
local file_parts = split(path, /\./);
local i = 1;
for ( part in file_parts )
{
# This looks broken to me - VP.
hashed_path = fmt("%s.%s", hashed_path, file_parts[i]);
if ( ++i == length(file_parts) )
break;
}
return fmt("%s.%s", anonymize_arg("path", hashed_path), file_parts[i]);
}
function anonymize_host(host: string): string
{
local hashed_host = "";
local host_parts = split(host, /\./);
local i = 1;
for ( hosty in host_parts )
{
if ( i == length(host_parts) )
break;
# Check against "kosher" tld list.
hashed_host = fmt("%s%s.", hashed_host,
anonymize_arg("host", host_parts[i]));
++i;
}
if ( host_parts[i] == kosher_dom_pat )
return string_cat(hashed_host, host_parts[i]);
print anon_log, fmt("anonymize_host: non-kosher domain %s", host);
return string_cat(hashed_host, anonymize_arg("host", host_parts[i]));
}
event bro_done()
{
for ( s in anonymized_strings )
{
print anon_log, fmt("appearance: %d: \"%s\" => \"%s\"",
anonymized_strings[s]$c, s, anonymized_strings[s]$s);
}
}

View file

@ -1,160 +0,0 @@
# $Id: arp.bro 4909 2007-09-24 02:26:36Z vern $
@load notice
module ARP;
export {
redef enum Notice += {
ARPSourceMAC_Mismatch, # source MAC doesn't match mappings
ARPAddlMAC_Mapping, # another MAC->addr seen beyond just one
ARPUnsolicitedReply, # could be poisoning; or just gratuitous
# ARPRequestProvidesTargetAddr, # request includes non-triv addr
# MAC/addr pair seen in request/reply different from
# that in the cache.
ARPCacheInconsistency,
# ARP reply gives different value than previously seen.
ARPMappingChanged,
};
const arp_log = open_log_file("arp") &redef;
}
redef capture_filters += { ["arp"] = "arp" };
# Abbreviations taken from RFC 826:
#
# SHA: source hardware address
# SPA: source protocol address (i.e., IP address)
# THA: target hardware address
# TPA: target protocol address
# ARP requests indexed on SHA/SPA/TPA (no THA, as it's what it's being
# queried).
global arp_requests: set[string, addr, addr] &create_expire = 1 min;
# ARP responses we've seen: indexed by IP address, yielding MAC address.
global ARP_cache: table[addr] of string;
# Bad ARPs can occur when:
# - type/size pairs are not OK for HW and L3 addresses (Ethernet=6, IP=4)
# - opcode is neither request (1) nor reply (2)
# - MAC src address != ARP sender MAC address
event bad_arp(SPA: addr, SHA: string, TPA: addr, THA: string,
explanation: string)
{
print arp_log, fmt("%.06f bad-arp %s(%s) ? %s(%s): %s",
network_time(), SPA, SHA, TPA, THA, explanation);
}
# The first of these maps a MAC address to the last protocol address seen
# for it. The second tracks every protocol address seen.
global mac_addr_map: table[string] of addr;
global mac_addr_associations: table[string] of set[addr];
# A somewhat general notion of broadcast MAC/IP addresses.
const broadcast_mac_addrs = { "00:00:00:00:00:00", "ff:ff:ff:ff:ff:ff", };
const broadcast_addrs = { 0.0.0.0, 255.255.255.255, };
# Called to note that we've seen an association between a MAC address
# and an IP address. Note that this is *not* an association advertised
# in an ARP reply (those are tracked in ARP_cache), but instead the
# pairing of hardware address + protocol address as expressed in
# an ARP request or reply header.
function mac_addr_association(mac_addr: string, a: addr)
{
# Ignore placeholders.
if ( mac_addr in broadcast_mac_addrs || a in broadcast_addrs )
return;
local is_addl = F;
if ( mac_addr in mac_addr_associations )
is_addl = a !in mac_addr_associations[mac_addr];
else
mac_addr_associations[mac_addr] = set();
print arp_log, fmt("%.06f association %s -> %s%s", network_time(),
mac_addr, a, is_addl ? " <addl>" : "");
mac_addr_map[mac_addr] = a;
add mac_addr_associations[mac_addr][a];
if ( a in ARP_cache && ARP_cache[a] != mac_addr )
NOTICE([$note=ARPCacheInconsistency, $src=a,
$msg=fmt("mapping for %s to %s doesn't match cache of %s",
mac_addr, a, ARP_cache[a])]);
}
# Returns the IP address associated with a MAC address, if we've seen one.
# Otherwise just returns the MAC address.
function addr_from_mac(mac_addr: string): string
{
return mac_addr in mac_addr_map ?
fmt("%s", mac_addr_map[mac_addr]) : mac_addr;
}
event arp_request(mac_src: string, mac_dst: string, SPA: addr, SHA: string,
TPA: addr, THA: string)
{
mac_addr_association(SHA, SPA);
local msg = fmt("%s -> %s who-has %s",
addr_from_mac(mac_src), addr_from_mac(mac_dst), TPA);
local mismatch = SHA != mac_src;
if ( mismatch )
NOTICE([$note=ARPSourceMAC_Mismatch, $src=SPA, $msg=msg]);
# It turns out that some hosts fill in the THA field even though
# that doesn't make sense. (The RFC specifically allows this,
# however.) Perhaps there's an attack that can be launched
# doing so, but it's hard to see what it might be, so for now
# we don't bother notice'ing these.
# if ( THA !in broadcast_addrs )
# NOTICE([$note=ARPRequestProvidesTargetAddr, $src=SPA,
# $msg=fmt("%s: %s", msg, THA)]);
print arp_log, fmt("%.06f %s%s", network_time(), msg,
mismatch ? " <source-mismatch>" : "");
add arp_requests[SHA, SPA, TPA];
}
event arp_reply(mac_src: string, mac_dst: string, SPA: addr, SHA: string,
TPA: addr, THA: string)
{
mac_addr_association(SHA, SPA);
mac_addr_association(THA, TPA);
local msg = fmt("%s -> %s: %s is-at %s",
addr_from_mac(mac_src), addr_from_mac(mac_dst),
SPA, SHA);
local unsolicited = [THA, TPA, SPA] !in arp_requests;
delete arp_requests[THA, TPA, SPA];
if ( unsolicited )
NOTICE([$note=ARPUnsolicitedReply, $src=SPA,
$msg=fmt("%s: request[%s, %s, %s]", msg, THA, TPA, SPA)]);
local mismatch = SHA != mac_src;
if ( mismatch )
NOTICE([$note=ARPSourceMAC_Mismatch, $src=SPA, $msg=msg]);
local mapping_changed = SPA in ARP_cache && ARP_cache[SPA] != SHA;
if ( mapping_changed )
NOTICE([$note=ARPMappingChanged, $src=SPA,
$msg=fmt("%s: was %s", msg, ARP_cache[SPA])]);
print arp_log, fmt("%.06f %s%s%s%s", network_time(), msg,
unsolicited ? " <unsolicited>" : "",
mismatch ? " <source-mismatch>" : "",
mapping_changed ?
fmt(" <changed from %s>", ARP_cache[SPA]) : "");
ARP_cache[SPA] = SHA;
}

View file

@ -1,559 +0,0 @@
# $Id: backdoor.bro 4909 2007-09-24 02:26:36Z vern $
# Looks for a variety of applications running on ports other than
# their usual ports.
#
# Note that this script by itself does *not* change capture_filters
# to add in the extra ports to look at. You need to specify that
# separately.
# Some tcpdump filters can be used to replace or work together with
# some detection algorithms. They could be used with the "secondary
# filter" for more efficient (but in some cases potentially less reliable)
# matching:
#
# - looking for "SSH-1." or "SSH-2." at the beginning of the packet;
# somewhat weaker than ssh-sig in that ssh-sig only looks for such
# pattern in the first packet of a connection:
#
# tcp[(tcp[12]>>2):4] = 0x5353482D and
# (tcp[((tcp[12]>>2)+4):2] = 0x312e or tcp[((tcp[12]>>2)+4):2] = 0x322e)
#
# - looking for pkts with 8k+4 (<=128) bytes of data (combined with ssh-len);
# only effective for ssh 1.x:
#
# (ip[2:2] - ((ip[0]&0x0f)<<2) - (tcp[12]>>2)) & 0xFF87 = 4
#
# - looking for packets with <= 512 bytes of data that ends with a NUL
# (can be potentially combined with rlogin-sig or rlogin-sig-1byte):
#
# (tcp[(ip[2:2] - ((ip[0]&0x0f)<<2))-1] == 0) and
# ((ip[2:2] - ((ip[0]&0x0f)<<2) - (tcp[12]>>2)) != 0) and
# ((ip[2:2] - ((ip[0]&0x0f)<<2) - (tcp[12]>>2)) <= 512)
#
# - looking for telnet negotiation (can be combined with telnet-sig(-3byte)):
#
# (tcp[(tcp[12]>>2):2] > 0xfffa) and
# (tcp[(tcp[12]>>2):2] < 0xffff) and
# ((ip[2:2] - ((ip[0]&0x0f)<<2) - (tcp[12] >> 2)) >= 3)
#
# - looking for packets with <= 20 bytes of data (combined with small-pkt):
#
# (ip[2:2] - ((ip[0]&0x0f)<<2) - (tcp[12]>>2)) <= 20
#
# - looking for FTP servers by the initial "220-" or "220 " sent by the server:
#
# tcp[(tcp[12]>>2):4] = 0x3232302d or tcp[(tcp[12]>>2):4] = 0x32323020
#
# - looking for root backdoors by seeing a server payload of exactly "# ":
#
# tcp[(tcp[12]>>2):2] = 0x2320 and
# (ip[2:2] - ((ip[0]&0x0f)<<2) - (tcp[12]>>2)) == 2
#
# - looking for Napster by the initial "GET" or "SEND" sent by the originator:
#
# ((ip[2:2]-((ip[0]&0x0f)<<2)-(tcp[12]>>2))=4 and
# tcp[(tcp[12]>>2):4]=0x53454e44) or
# ((ip[2:2]-((ip[0]&0x0f)<<2)-(tcp[12]>>2))=3 and
# tcp[(tcp[12]>>2):2]=0x4745 and tcp[(tcp[12]>>2)+2]=0x54)
#
# - looking for Gnutella handshaking "GNUTELLA "
#
# tcp[(tcp[12]>>2):4] = 0x474e5554 and
# tcp[(4+(tcp[12]>>2)):4] = 0x454c4c41 and
# tcp[8+(tcp[12]>>2)] = 0x20
#
# - looking for KaZaA via "GIVE " (not present in all connections)
#
# tcp[(tcp[12]>>2):4] = 0x47495645 and
# tcp[(4+(tcp[12]>>2)):1] = 0x20
#
@load site
@load port-name
@load demux
@load notice
redef enum Notice += { BackdoorFound, };
# Set to dump the packets that trigger the backdoor detector to a file.
const dump_backdoor_packets = F &redef;
redef backdoor_stat_period = 60 sec;
redef backdoor_stat_backoff = 2.0;
const ssh_min_num_pkts = 8 &redef;
const ssh_min_ssh_pkts_ratio = 0.6 &redef;
const backdoor_min_num_lines = 2 &redef;
const backdoor_min_normal_line_ratio = 0.5 &redef;
const backdoor_min_bytes = 10 &redef;
const backdoor_min_7bit_ascii_ratio = 0.75 &redef;
type rlogin_conn_info : record {
o_num_null: count;
o_len: count;
r_num_null: count;
r_len: count;
};
const backdoor_demux_disabled = T &redef;
const backdoor_demux_skip_tags: set[string] &redef;
const ftp_backdoor_sigs = "ftp-sig";
const ssh_backdoor_sigs = { "ssh-sig", "ssh-len-v1.x", "ssh-len-v2.x" };
const rlogin_backdoor_sigs = { "rlogin-sig", "rlogin-sig-1byte" };
const root_backdoor_sigs = "root-bd-sig";
const telnet_backdoor_sigs = { "telnet-sig", "telnet-sig-3byte" };
const napster_backdoor_sigs = "napster-sig";
const gnutella_backdoor_sigs = "gnutella-sig";
const kazaa_backdoor_sigs = "kazaa-sig";
const http_backdoor_sigs = "http-sig";
const http_proxy_backdoor_sigs = "http-proxy-sig";
const smtp_backdoor_sigs = "smtp-sig";
const irc_backdoor_sigs = "irc-sig";
const gaobot_backdoor_sigs = "gaobot-sig";
# List of backdoors, so you can use it when defining sets and tables
# with values over all of them.
const backdoor_sigs = {
ftp_backdoor_sigs, ssh_backdoor_sigs, rlogin_backdoor_sigs,
root_backdoor_sigs, telnet_backdoor_sigs,
napster_backdoor_sigs, gnutella_backdoor_sigs, kazaa_backdoor_sigs,
http_backdoor_sigs, http_proxy_backdoor_sigs,
smtp_backdoor_sigs, irc_backdoor_sigs, gaobot_backdoor_sigs,
};
# List of address-port pairs that if present in a backdoor are ignored.
# Note that these can be either the client and its source port (unusual)
# or the server and its service port (the common case).
const backdoor_ignore_host_port_pairs: set[addr, port] &redef;
const backdoor_ignore_ports: table[string, port] of bool = {
# The following ignore backdoors that are detected on their
# usual ports. The definitions for ftp-sig, telnet-sig and
# telnet-sig-3byte are somehwat broad since those backdoors
# are also frequently triggered for other similar protocols.
[ftp_backdoor_sigs, [ftp, smtp, 587/tcp ]] = T,
[ssh_backdoor_sigs, ssh] = T,
[rlogin_backdoor_sigs , [512/tcp, rlogin, 514/tcp]] = T,
[root_backdoor_sigs, [telnet, 512/tcp, rlogin, 514/tcp]] = T,
[telnet_backdoor_sigs, [telnet, ftp, smtp, 143/tcp, 110/tcp]] = T,
# The following don't have well-known ports (well, Napster does
# somewhat, as shown below), hence the definitions are F rather
# than T.
[napster_backdoor_sigs, [6688/tcp, 6699/tcp]] = F,
[gnutella_backdoor_sigs, 6346/tcp] = F,
[kazaa_backdoor_sigs, 1214/tcp] = F,
[http_backdoor_sigs, [http, 8000/tcp, 8080/tcp]] = T,
[smtp_backdoor_sigs, [smtp, 587/tcp]] = T,
# Skip FTP, as "USER foo" generates false positives. There's
# also a lot of IRC on 7000/tcp.
[irc_backdoor_sigs, [ftp, 6666/tcp, 6667/tcp, 7000/tcp]] = T,
# The following are examples of wildcards, and since they're defined
# to be F, they don't affect the policy unless redefined.
["*", http] = F, # entry for "any backdoor, service http"
["ssh-sig", 0/tcp] = F, # entry for "ssh-sig, any port"
} &redef &default = F;
# Indexed by the backdoor, indicates which backdoors residing on
# a local (remote) host should be ignored.
const backdoor_ignore_local: set[string] &redef;
const backdoor_ignore_remote: set[string] &redef;
# Indexed by the source (destination) address and the backdoor.
# Also indexed by the /24 and /16 versions of the source address.
# backdoor "*" means "all backdoors".
const backdoor_ignore_src_addrs: table[string, addr] of bool &redef &default=F;
const backdoor_ignore_dst_addrs: table[string, addr] of bool &redef &default=F;
const backdoor_standard_ports = {
telnet, rlogin, 512/tcp, 514/tcp, ftp, ssh, smtp, 143/tcp,
110/tcp, 6667/tcp,
} &redef;
const backdoor_annotate_standard_ports = T &redef;
const backdoor_ignore_hosts: set[addr] &redef;
const backdoor_ignore_src_nets: set[subnet] &redef;
const backdoor_ignore_dst_nets: set[subnet] &redef;
# Most backdoors are enabled by default, but a few are disabled by
# default (T below) because they generated too many false positives
# (or, for HTTP, too many uninteresting true positives).
const ftp_sig_disabled = F &redef;
const gaobot_sig_disabled = F &redef;
const gnutella_sig_disabled = F &redef;
const http_proxy_sig_disabled = T &redef;
const http_sig_disabled = T &redef;
const irc_sig_disabled = F &redef;
const kazaa_sig_disabled = F &redef;
const napster_sig_disabled = F &redef;
const rlogin_sig_1byte_disabled = T &redef;
const rlogin_sig_disabled = T &redef;
const root_backdoor_sig_disabled = T &redef;
const smtp_sig_disabled = F &redef;
# Note, for the following there's a corresponding variable
# interconn_ssh_len_disabled in interconn.bro.
const ssh_len_disabled = T &redef;
const ssh_sig_disabled = F &redef;
const telnet_sig_3byte_disabled = T &redef;
const telnet_sig_disabled = T &redef;
global ssh_len_conns: set[conn_id];
global rlogin_conns: table[conn_id] of rlogin_conn_info;
global root_backdoor_sig_conns: set[conn_id];
global did_sig_conns: table[conn_id] of set[string];
const BACKDOOR_UNKNOWN = 0;
const BACKDOOR_YES = 1;
const BACKDOOR_NO = 2;
const BACKDOOR_SIG_FOUND = 3;
global telnet_sig_conns: table[conn_id] of count;
global telnet_sig_3byte_conns: table[conn_id] of count;
global smtp_sig_conns: table[conn_id] of count;
global irc_sig_conns: table[conn_id] of count;
global gaobot_sig_conns: table[conn_id] of count;
const backdoor_log = open_log_file("backdoor") &redef;
function ignore_backdoor_conn(c: connection, bd: string): bool
{
local oa = c$id$orig_h;
local ra = c$id$resp_h;
local op = c$id$orig_p;
local rp = c$id$resp_p;
if ( backdoor_ignore_ports[bd, op] ||
backdoor_ignore_ports[bd, rp] ||
# Check port wildcards.
backdoor_ignore_ports[bd, 0/tcp] ||
(ra in local_nets && bd in backdoor_ignore_local) ||
(ra !in local_nets && bd in backdoor_ignore_remote) ||
backdoor_ignore_src_addrs[bd, oa] ||
backdoor_ignore_src_addrs[bd, mask_addr(oa, 16)] ||
backdoor_ignore_src_addrs[bd, mask_addr(oa, 24)] ||
backdoor_ignore_dst_addrs[bd, ra] ||
backdoor_ignore_dst_addrs[bd, mask_addr(ra, 16)] ||
backdoor_ignore_dst_addrs[bd, mask_addr(ra, 24)] )
return T;
if ( [oa, op] in backdoor_ignore_host_port_pairs ||
[ra, rp] in backdoor_ignore_host_port_pairs )
return T;
if ( bd != "*" )
# Evaluate again, but for wildcarding the backdoor.
return ignore_backdoor_conn(c, "*");
else
return F;
}
function log_backdoor(c: connection, tag: string): bool
{
if ( ignore_backdoor_conn(c, tag) )
return F;
local id = c$id;
if ( backdoor_annotate_standard_ports &&
(id$orig_p in backdoor_standard_ports ||
id$resp_p in backdoor_standard_ports) )
append_addl(c, fmt("[%s]", tag));
else if ( id$orig_h in backdoor_ignore_hosts ||
id$resp_h in backdoor_ignore_hosts ||
id$orig_h in backdoor_ignore_src_nets ||
id$resp_h in backdoor_ignore_dst_nets )
return F;
else
{
print backdoor_log, fmt("%.6f %s > %s %s",
c$start_time,
endpoint_id(id$orig_h, id$orig_p),
endpoint_id(id$resp_h, id$resp_p),
tag);
NOTICE([$note=BackdoorFound, $msg=tag, $conn=c]);
if ( dump_backdoor_packets )
{
mkdir("backdoor-packets");
local fname = fmt("backdoor-packets/%s:%.2f",
tag, current_time());
dump_current_packet(fname);
}
if ( backdoor_demux_disabled ||
tag in backdoor_demux_skip_tags )
{
if ( active_connection(c$id) )
skip_further_processing(c$id);
}
else
demux_conn(id, tag, "orig", "resp");
}
return T;
}
event new_connection(c: connection)
{
local id = c$id;
if ( ! rlogin_sig_disabled || ! rlogin_sig_1byte_disabled )
{
local i: rlogin_conn_info;
i$o_num_null = i$o_len = i$r_num_null = i$r_len = 0;
rlogin_conns[id] = i;
}
}
event backdoor_remove_conn(c: connection)
{
local id = c$id;
delete ssh_len_conns[id];
delete telnet_sig_conns[id];
delete telnet_sig_3byte_conns[id];
delete rlogin_conns[id];
delete root_backdoor_sig_conns[id];
delete smtp_sig_conns[id];
delete irc_sig_conns[id];
delete gaobot_sig_conns[id];
delete did_sig_conns[id];
}
event root_backdoor_signature_found(c: connection)
{
if ( root_backdoor_sig_disabled ||
ignore_backdoor_conn(c, "root-bd-sig") )
return;
local id = c$id;
# For root backdoors, don't ignore standard ports. This is because
# we shouldn't see such a backdoor even 23/tcp or 513/tcp!
if ( id !in root_backdoor_sig_conns )
{
add root_backdoor_sig_conns[id];
log_backdoor(c, "root-bd-sig");
}
}
function signature_found(c: connection, sig_disabled: bool, sig_name: string)
{
if ( sig_disabled )
return;
if ( ignore_backdoor_conn(c, sig_name) )
return;
if ( c$id !in did_sig_conns )
did_sig_conns[c$id] = set();
if ( sig_name !in did_sig_conns[c$id] )
{
add did_sig_conns[c$id][sig_name];
log_backdoor(c, sig_name);
}
}
event ftp_signature_found(c: connection)
{
signature_found(c, ftp_sig_disabled, "ftp-sig");
}
event napster_signature_found(c: connection)
{
signature_found(c, napster_sig_disabled, "napster-sig");
}
event gnutella_signature_found(c: connection)
{
signature_found(c, gnutella_sig_disabled, "gnutella-sig");
}
event kazaa_signature_found(c: connection)
{
signature_found(c, kazaa_sig_disabled, "kazaa-sig");
}
event http_signature_found(c: connection)
{
signature_found(c, http_sig_disabled, "http-sig");
}
event http_proxy_signature_found(c: connection)
{
signature_found(c, http_proxy_sig_disabled, "http-proxy-sig");
}
event ssh_signature_found(c: connection, is_orig: bool)
{
signature_found(c, ssh_sig_disabled, "ssh-sig");
}
event smtp_signature_found(c: connection)
{
signature_found(c, smtp_sig_disabled, "smtp-sig");
}
event irc_signature_found(c: connection)
{
signature_found(c, irc_sig_disabled, "irc-sig");
}
event gaobot_signature_found(c: connection)
{
signature_found(c, gaobot_sig_disabled, "gaobot-sig");
}
event telnet_signature_found(c: connection, is_orig: bool, len: count)
{
local id = c$id;
if ( ignore_backdoor_conn(c, "telnet-sig") )
return;
if ( ! telnet_sig_disabled && id !in telnet_sig_conns )
telnet_sig_conns[id] = BACKDOOR_SIG_FOUND;
if ( ! telnet_sig_3byte_disabled && len == 3 &&
id !in telnet_sig_3byte_conns )
telnet_sig_3byte_conns[id] = BACKDOOR_SIG_FOUND;
}
event rlogin_signature_found(c: connection, is_orig: bool,
num_null: count, len: count)
{
local id = c$id;
if ( (rlogin_sig_disabled && rlogin_sig_1byte_disabled) ||
ignore_backdoor_conn(c, "rlogin-sig") )
return;
local ri = rlogin_conns[id];
if ( is_orig && ri$o_num_null == 0 )
ri$o_num_null = num_null;
else if ( ! is_orig && ri$r_num_null == 0 )
{
ri$r_num_null = num_null;
ri$r_len = len;
}
else
return;
if ( ri$o_num_null == 0 || ri$r_num_null == 0 )
return;
if ( ! rlogin_sig_1byte_disabled && ri$r_len == 1 )
log_backdoor(c, "rlogin-sig-1byte");
if ( ! rlogin_sig_disabled )
log_backdoor(c, "rlogin-sig");
}
function ssh_len_stats(c: connection, os: backdoor_endp_stats,
rs: backdoor_endp_stats) : bool
{
if ( ssh_len_disabled || c$id in ssh_len_conns )
return F;
if ( os$num_pkts == 0 || rs$num_pkts == 0 )
return F;
# xxx: only use ssh-len for partial connection
local is_partial = os$is_partial || rs$is_partial;
if ( ! is_partial )
return F;
local num_pkts = os$num_pkts + rs$num_pkts;
if ( num_pkts < ssh_min_num_pkts )
return F;
local num_8k0_pkts = os$num_8k0_pkts + rs$num_8k0_pkts;
local num_8k4_pkts = os$num_8k4_pkts + rs$num_8k4_pkts;
local id = c$id;
if ( num_8k0_pkts >= num_pkts * ssh_min_ssh_pkts_ratio )
{
add ssh_len_conns[id];
log_backdoor(c, "ssh-len-v2.x");
}
else if ( num_8k4_pkts >= num_pkts * ssh_min_ssh_pkts_ratio )
{
add ssh_len_conns[id];
log_backdoor(c, "ssh-len-v1.x");
}
return T;
}
function telnet_stats(c: connection, os: backdoor_endp_stats,
rs: backdoor_endp_stats) : bool
{
local num_lines = os$num_lines + rs$num_lines;
local num_normal_lines = os$num_normal_lines + rs$num_normal_lines;
if ( num_lines < backdoor_min_num_lines ||
num_normal_lines < num_lines * backdoor_min_normal_line_ratio )
return F;
local num_bytes = os$num_bytes + rs$num_bytes;
local num_7bit_ascii = os$num_7bit_ascii + rs$num_7bit_ascii;
if ( num_bytes < backdoor_min_bytes ||
num_7bit_ascii < num_bytes * backdoor_min_7bit_ascii_ratio )
return F;
local id = c$id;
if ( id in telnet_sig_conns &&
telnet_sig_conns[id] != BACKDOOR_YES )
{
telnet_sig_conns[id] = BACKDOOR_YES;
log_backdoor(c, "telnet-sig");
}
if ( id in telnet_sig_3byte_conns &&
telnet_sig_3byte_conns[id] != BACKDOOR_YES )
{
telnet_sig_3byte_conns[id] = BACKDOOR_YES;
log_backdoor(c, "telnet-sig-3byte");
}
return T;
}
event backdoor_stats(c: connection,
os: backdoor_endp_stats, rs: backdoor_endp_stats)
{
telnet_stats(c, os, rs);
ssh_len_stats(c, os, rs);
}

View file

@ -1,277 +0,0 @@
# $Id:$
#
# bittorrent.bro - policy script for analyzing BitTorrent traffic
# ---------------------------------------------------------------
# This code contributed by Nadi Sarrar.
@load dpd
@load weird
module BitTorrent;
export {
# Whether to log the length of PDUs.
global log_pdu_length = T &redef;
}
redef capture_filters += { ["bittorrent"] = "tcp" };
type bt_peer_state: enum {
choked, # peer won't receive any responses to requests (initial state)
unchoked # peer may do requests
};
type bt_peer_info: record {
# Total of pure peer wire protocol overhead data (w/o pieces).
protocol_total: count &default = 0;
# State of the peer - choked or unchoked.
state: bt_peer_state &default = choked;
# Total number of seconds the peer was unchoked.
unchoked: interval &default = 0 secs;
# Time of the last received unchoke message.
time_last_unchoked: time;
};
type bt_peer_conn: record {
id: count;
orig: bt_peer_info;
resp: bt_peer_info;
weird: bool &default = F;
};
global bittorrent_log = open_log_file("bittorrent") &redef;
global bt_peer_conns : table[conn_id] of bt_peer_conn;
global peer_conn_count = 0;
function record_peer_protocol_traffic(c: connection, is_orig: bool,
protocol_len: count): count
{
if ( c$id in bt_peer_conns )
{
local pc = bt_peer_conns[c$id];
if ( is_orig )
pc$orig$protocol_total += protocol_len;
else
pc$resp$protocol_total += protocol_len;
return pc$id;
}
return 0;
}
function record_choke(pi: bt_peer_info, now: time)
{
if ( pi$state == unchoked )
{
pi$state = choked;
pi$unchoked += now - pi$time_last_unchoked;
}
}
function record_unchoke(pi: bt_peer_info, now: time)
{
if ( pi$state == choked )
{
pi$state = unchoked;
pi$time_last_unchoked = now;
}
}
function lookup_bt_peer(id: conn_id): bt_peer_conn
{
if ( id in bt_peer_conns )
return bt_peer_conns[id];
local orig: bt_peer_info;
local resp: bt_peer_info;
local pc: bt_peer_conn;
pc$orig = orig;
pc$resp = resp;
pc$id = ++peer_conn_count;
bt_peer_conns[id] = pc;
return pc;
}
function bt_log_id(id: conn_id, cid: count, tag: string, is_orig: bool): string
{
return fmt("%.6f P%d %s %s:%d %s %s:%d",
network_time(), cid, tag, id$orig_h, id$orig_p,
is_orig ? ">" : "<", id$resp_h, id$resp_p);
}
function pdu_log_len(len: count): string
{
return log_pdu_length ? fmt("[PDU-len:%d]", len) : "";
}
function log_pdu(c: connection, is_orig: bool, tag: string, len: count): count
{
local cid = record_peer_protocol_traffic(c, is_orig, len);
print bittorrent_log,
fmt("%s %s", bt_log_id(c$id, cid, tag, is_orig),
pdu_log_len(len));
return cid;
}
function log_pdu_str(c: connection, is_orig: bool, tag: string, len: count,
str: string)
{
local cid = record_peer_protocol_traffic(c, is_orig, len);
print bittorrent_log,
fmt("%s %s %s", bt_log_id(c$id, cid, tag, is_orig),
pdu_log_len(len), str);
}
function log_pdu_str_n(c: connection, is_orig: bool, tag: string, len: count,
n: count, str: string)
{
local cid = record_peer_protocol_traffic(c, is_orig, len);
print bittorrent_log,
fmt("%s %s %s", bt_log_id(c$id, cid, tag, is_orig),
pdu_log_len(n), str);
}
event bittorrent_peer_handshake(c: connection, is_orig: bool, reserved: string,
info_hash: string, peer_id: string)
{
local pc = lookup_bt_peer(c$id);
log_pdu_str(c, is_orig, "handshake", 68,
fmt("[peer_id:%s info_hash:%s reserved:%s]",
bytestring_to_hexstr(peer_id),
bytestring_to_hexstr(info_hash),
bytestring_to_hexstr(reserved)));
}
event bittorrent_peer_keep_alive(c: connection, is_orig: bool)
{
log_pdu(c, is_orig, "keep-alive", 4);
}
event bittorrent_peer_choke(c: connection, is_orig: bool)
{
local cid = log_pdu(c, is_orig, "choke", 5);
if ( cid > 0 )
{
local pc = bt_peer_conns[c$id];
record_choke(is_orig ? pc$resp : pc$orig, network_time());
}
}
event bittorrent_peer_unchoke(c: connection, is_orig: bool)
{
local cid = log_pdu(c, is_orig, "unchoke", 5);
if ( cid > 0 )
{
local pc = bt_peer_conns[c$id];
record_unchoke(is_orig ? pc$resp : pc$orig, network_time());
}
}
event bittorrent_peer_interested(c: connection, is_orig: bool)
{
log_pdu(c, is_orig, "interested", 5);
}
event bittorrent_peer_not_interested(c: connection, is_orig: bool)
{
log_pdu(c, is_orig, "not-interested", 5);
}
event bittorrent_peer_have(c: connection, is_orig: bool, piece_index: count)
{
log_pdu(c, is_orig, "have", 9);
}
event bittorrent_peer_bitfield(c: connection, is_orig: bool, bitfield: string)
{
log_pdu_str(c, is_orig, "bitfield", 5 + byte_len(bitfield),
fmt("[bitfield:%s]",
bytestring_to_hexstr(bitfield)));
}
event bittorrent_peer_request(c: connection, is_orig: bool, index: count,
begin: count, length: count)
{
log_pdu_str(c, is_orig, "request", 17,
fmt("[index:%d begin:%d length:%d]", index, begin, length));
}
event bittorrent_peer_piece(c: connection, is_orig: bool, index: count,
begin: count, piece_length: count)
{
log_pdu_str_n(c, is_orig, "piece", 13, 13 + piece_length,
fmt("[index:%d begin:%d piece_length:%d]",
index, begin, piece_length));
}
event bittorrent_peer_cancel(c: connection, is_orig: bool, index: count,
begin: count, length: count)
{
log_pdu_str(c, is_orig, "cancel", 7,
fmt("[index:%d begin:%d length:%d]",
index, begin, length));
}
event bittorrent_peer_port(c: connection, is_orig: bool, listen_port: port)
{
log_pdu_str(c, is_orig, "port", 5,
fmt("[listen_port:%s]", listen_port));
}
event bittorrent_peer_unknown(c: connection, is_orig: bool, message_id: count,
data: string)
{
log_pdu_str(c, is_orig, "<unknown>", 5 + byte_len(data),
fmt("[message_id:%d]", message_id));
}
event bittorrent_peer_weird(c: connection, is_orig: bool, msg: string)
{
local pc = lookup_bt_peer(c$id);
pc$weird = T;
print bittorrent_log,
fmt("%s [%s]", bt_log_id(c$id, pc$id, "<weird>", is_orig), msg);
event conn_weird(msg, c);
}
function log_close(c: connection, pc: bt_peer_conn, is_orig: bool)
{
local endp = is_orig ? c$orig : c$resp;
local peer_i = is_orig ? pc$orig : pc$resp;
local status =
pc$weird ?
fmt("size:%d", endp$size) :
fmt("unchoked:%.06f size_protocol:%d size_pieces:%d",
peer_i$unchoked, peer_i$protocol_total,
endp$size - peer_i$protocol_total);
print bittorrent_log,
fmt("%s [duration:%.06f %s]",
bt_log_id(c$id, pc$id, "<closed>", is_orig),
c$duration, status);
}
event connection_state_remove(c: connection)
{
if ( c$id !in bt_peer_conns )
return;
local pc = bt_peer_conns[c$id];
delete bt_peer_conns[c$id];
record_choke(pc$orig, c$start_time + c$duration);
record_choke(pc$resp, c$start_time + c$duration);
log_close(c, pc, T);
log_close(c, pc, F);
}

View file

@ -1,52 +0,0 @@
# $Id: blaster.bro 5952 2008-07-13 19:45:15Z vern $
#
# Identifies W32.Blaster-infected hosts by observing their scanning
# activity.
@load notice
@load site
# Which hosts have scanned which addresses via 135/tcp.
global w32b_scanned: table[addr] of set[addr] &write_expire = 5min;
global w32b_reported: set[addr] &persistent;
const W32B_port = 135/tcp;
const W32B_MIN_ATTEMPTS = 50 &redef;
redef enum Notice += {
W32B_SourceLocal,
W32B_SourceRemote,
};
event connection_attempt(c: connection)
{
if ( c$id$resp_p != W32B_port )
return;
local ip = c$id$orig_h;
if ( ip in w32b_reported )
return;
if ( ip in w32b_scanned )
{
add (w32b_scanned[ip])[c$id$resp_h];
if ( length(w32b_scanned[ip]) >= W32B_MIN_ATTEMPTS )
{
if ( is_local_addr(ip) )
NOTICE([$note=W32B_SourceLocal, $conn=c,
$msg=fmt("W32.Blaster local source: %s",
ip)]);
else
NOTICE([$note=W32B_SourceRemote, $conn=c,
$msg=fmt("W32.Blaster remote source: %s",
ip)]);
add w32b_reported[ip];
}
}
else
w32b_scanned[ip] = set(ip) &mergeable;
}

View file

@ -1,55 +0,0 @@
# $Id: brolite-backdoor.bro 2956 2006-05-14 01:08:34Z vern $
# Sample file for running backdoor detector
#
# Note, this can consume significant processing resources when running
# on live traffic.
#
# To run bro with this script using a Bro Lite setup:
#
# rename this script to hostname.bro
# run: $BROHOME/etc/bro.rc start
# or bro -i interface brolite-backdoor.bro
@load site
@load backdoor
@load weird
# By default, do backdoor detection on everything except standard HTTP
# and SMTP ports.
redef capture_filters += [ ["tcp"] = "tcp" ];
redef restrict_filters +=
[ ["not-http"] = "not (port 80 or port 8000 or port 8080)" ];
redef restrict_filters += [ ["not-smtp"] = "not (port 25 or port 587)" ];
redef use_tagging = T;
# Set if you want to dump packets that trigger the detections.
redef dump_backdoor_packets = T;
# Disable (set to T) if you don't care about this traffic.
# redef gnutella_sig_disabled = T;
# redef kazaa_sig_disabled = T;
redef napster_sig_disabled = T; # too many false positives
# Ignore outgoing, only report incoming backdoors.
redef backdoor_ignore_remote += {
ftp_backdoor_sigs, ssh_backdoor_sigs, rlogin_backdoor_sigs,
http_backdoor_sigs, http_proxy_backdoor_sigs, smtp_backdoor_sigs,
};
# Set these to send mail on backdoor alarms.
# redef mail_dest = "youremail@yourhost.dom";
# redef notice_action_filters += {
# [BackdoorFound] = send_email_notice,
#};
# Tuning: use more aggressive timeouts to reduce CPU and memory, as these
# have little effect on backdoor analysis.
redef tcp_SYN_timeout = 1 sec;
redef tcp_attempt_delay = 1 sec;
redef tcp_inactivity_timeout = 1 min;
redef udp_inactivity_timeout = 5 secs;
redef icmp_inactivity_timeout = 5 secs;

View file

@ -1,82 +0,0 @@
# $Id: brolite-sigs.bro 3856 2006-12-02 00:18:57Z vern $
# Bro Lite signature configuration file
# General policy - these scripts are more infrastructural than service
# oriented, so in general avoid changing anything here.
# Set global constant. This can be used in ifdef statements to determine
# if signatures are enabled.
const use_signatures = T;
@load snort # basic definitions for signatures
@load signatures # the signature policy engine
@load sig-functions # addl. functions added for signature accuracy
@load sig-action # actions related to particular signatures
# Flag HTTP worm sources such as Code Red.
@load worm
# Do worm processing
redef notice_action_filters += { [RemoteWorm] = file_notice };
# Ports that need to be captured for signatures to see a useful
# cross section of traffic.
redef capture_filters += {
["sig-http"] =
"tcp port 80 or tcp port 8080 or tcp port 8000 or tcp port 8001",
["sig-ftp"] = "port ftp",
["sig-telnet"] = "port telnet",
["sig-portmapper"] = "port 111",
["sig-smtp"] = "port smtp",
["sig-imap"] = "port 143",
["sig-snmp"] = "port 161 or port 162",
["sig-dns"] = "port 53",
# rsh/rlogin/rexec
["sig-rfoo"] = "port 512 or port 513 or port 515",
# Range of TCP ports for general RPC traffic. This can also
# occur on other ports, but these should catch a lot without
# a major performance hit. We skip ports assosciated with
# HTTP, SSH and M$.
["sig-rpc"] = "tcp[2:2] > 32770 and tcp[2:2] < 32901 and tcp[0:2] != 80 and tcp[0:2] != 22 and tcp[0:2] != 139",
};
### Why is this called "tcp3"?
# Catch outbound M$ scanning. Returns filter listing local addresses
# along with the interesting ports.
function create_tcp3_filter(): string
{
local local_addrs = "";
local firsttime = T;
for ( l in local_nets )
{
if ( firsttime )
{
local_addrs = fmt("src net %s", l);
firsttime = F;
}
else
local_addrs = fmt("%s or src net %s", local_addrs, l);
}
local MS_scan_ports =
"dst port 135 or dst port 137 or dst port 139 or dst port 445";
if ( local_addrs == "" )
return MS_scan_ports;
else
return fmt("(%s) and (%s)", local_addrs, MS_scan_ports);
}
# Create and apply the filter.
redef capture_filters += { ["tcp3"] = create_tcp3_filter()};
# Turn on ICMP analysis.
redef capture_filters += { ["icmp"] = "icmp"};
# Load the addendum signatures. These are utility signatures that do not
# produce event messages.
redef signature_files += "sig-addendum";

View file

@ -1,195 +0,0 @@
# Bro Lite base configuration file.
# General policy - these scripts are more infrastructural than service
# oriented, so in general avoid changing anything here.
@load site # defines local and neighbor networks from static config
@load tcp # initialize BPF filter for SYN/FIN/RST TCP packets
@load weird # initialize generic mechanism for unusual events
@load conn # access and record connection events
@load hot # defines certain forms of sensitive access
@load frag # process TCP fragments
@load print-resources # on exit, print resource usage information
# Scan detection policy.
@load scan # generic scan detection mechanism
@load trw # additional, more sensitive scan detection
#@load drop # include if installation has ability to drop hostile remotes
# Application level policy - these scripts operate on the specific service.
@load http # general http analyzer, low level of detail
@load http-request # detailed analysis of http requests
@load http-reply # detailed analysis of http reply's
# Track software versions; required for some signature matching. Also
# can be used by http and ftp policies.
@load software
@load ftp # FTP analysis
@load portmapper # record and analyze RPC portmapper requests
@load tftp # identify and log TFTP sessions
@load login # rlogin/telnet analyzer
@load irc # IRC analyzer
@load blaster # blaster worm detection
@load stepping # "stepping stone" detection
@load synflood # synflood attacks detection
@load smtp # record and analyze email traffic - somewhat expensive
@load notice-policy # tuning of notices to downgrade some alarms
# off by default
#@load icmp # icmp analysis
# Tuning of memory consumption.
@load inactivity # time out connections for certain services more quickly
# @load print-globals # on exit, print the size of global script variables
# Record system statistics to the notice file
@load stats
# udp analysis - potentially expensive, depending on a site's traffic profile
#@load udp.all
#@load remove-multicast
# Prints the pcap filter and immediately exits. Not used during
# normal operation.
#@load print-filter
## End policy script loading.
## General configuration.
@load rotate-logs
redef log_rotate_base_time = "0:00";
redef log_rotate_interval = 24 hr;
# Set additional policy prefixes.
@prefixes += lite
## End basic configuration.
## Scan configuration.
@ifdef ( Scan::analyze_all_services )
redef Scan::analyze_all_services = T;
# The following turns off scan detection.
#redef Scan::suppress_scan_checks = T;
# Be a bit more aggressive than default (though the defaults
# themselves should be fixed).
redef Scan::report_outbound_peer_scan = { 100, 1000, };
# These services are skipped for scan detection due to excessive
# background noise.
redef Scan::skip_services += {
http, # Avoid Code Red etc. overload
27374/tcp, # Massive scanning in Jan 2002
1214/tcp, # KaZaa scans
12345/tcp, # Massive scanning in Apr 2002
445/tcp, # Massive distributed scanning Oct 2002
135/tcp, # These days, NetBIOS scanning is endemic
137/udp, # NetBIOS
139/tcp, # NetBIOS
1025/tcp,
6129/tcp, # Dameware
3127/tcp, # MyDoom worms worms worms!
2745/tcp, # Bagel worm
1433/tcp, # Distributed scanning, April 2004
5000/tcp, # Distributed scanning, May 2004
5554/tcp, # More worm food, May 2004
9898/tcp, # Worms attacking worms. ugh - May 2004
3410/tcp, # More worm food, June 2004
3140/tcp, # Dyslexic worm food, June 2004
27347/tcp, # Can't kids type anymore?
1023/tcp, # Massive scanning, July 2004
17300/tcp, # Massive scanning, July 2004
};
@endif
@ifdef ( ICMP::detect_scans )
# Whether to detect ICMP scans.
redef ICMP::detect_scans = F;
redef ICMP::scan_threshold = 100;
@endif
@ifdef ( TRW::TRWAddressScan )
# remove logging TRW scan events
redef notice_action_filters += {
[TRW::TRWAddressScan] = ignore_notice,
};
@endif
# Note: default scan configuration is conservative in terms of memory use and
# might miss slow scans. Consider uncommenting these based on your sites scan
# traffic.
#redef distinct_peers &create_expire = 30 mins;
#redef distinct_ports &create_expire = 30 mins;
#redef distinct_low_ports &create_expire= 30 mins;
## End scan configuration.
## additional IRC checks
redef IRC::hot_words += /.*exe/ ;
## Dynamic Protocol Detection configuration
#
# This is off by default, as it requires a more powerful Bro host.
# Uncomment next line to activate.
# const use_dpd = T;
@ifdef ( use_dpd )
@load dpd
@load irc-bot
@load dyn-disable
@load detect-protocols
@load detect-protocols-http
@load proxy
@load ssh
# By default, DPD looks at all traffic except port 80.
# For lightly loaded networks, comment out the restrict_filters line.
# For heavily loaded networks, try adding addition ports (e.g., 25) to
# the restrict filters.
redef capture_filters += [ ["tcp"] = "tcp" ];
redef restrict_filters += [ ["not-http"] = "not (port 80)" ];
@endif
@ifdef ( ProtocolDetector::ServerFound )
# Report servers on non-standard ports only for local addresses.
redef notice_policy += {
[$pred(a: notice_info) =
{ return a$note == ProtocolDetector::ServerFound &&
! is_local_addr(a$src); },
$result = NOTICE_FILE,
$priority = 1],
# Report protocols on non-standard ports only for local addresses
# (unless it's IRC).
[$pred(a: notice_info) =
{ return a$note == ProtocolDetector::ProtocolFound &&
! is_local_addr(a$dst) &&
a$sub != "IRC"; },
$result = NOTICE_FILE,
$priority = 1],
};
@endif
# The following is used to transfer state between Bro's when one
# takes over from another.
#
# NOTE: not implemented in the production version, so ignored for now.
@ifdef ( remote_peers_clear )
redef remote_peers_clear += {
[127.0.0.1, 55555/tcp] = [$hand_over = T],
[127.0.0.1, 0/tcp] = [$hand_over = T]
};
@endif
# Use tagged log files for notices.
redef use_tagging = T;

View file

@ -1,190 +0,0 @@
# $Id:$
#
# bt-tracker.bro - analysis of BitTorrent tracker traffic
# ------------------------------------------------------------------------------
# This code contributed by Nadi Sarrar.
@load dpd
@load weird
module BitTorrent;
export {
# Whether to log tracker URIs.
global log_tracker_request_uri = F &redef;
}
redef capture_filters += { ["bittorrent"] = "tcp", };
global bt_tracker_log = open_log_file("bt-tracker") &redef;
global bt_tracker_conns: table[conn_id] of count;
global tracker_conn_count: count = 0;
function bt_log_tag(id: conn_id, cid: count, tag: string, is_orig: bool): string
{
return fmt("%.6f T%d %s %s:%d %s %s:%d",
network_time(), cid, tag, id$orig_h, id$orig_p,
is_orig ? ">" : "<", id$resp_h, id$resp_p);
}
event bt_tracker_request(c: connection, uri: string,
headers: bt_tracker_headers)
{
# Parse and validate URI.
local pair = split1(uri, /\?/);
local keys = split(pair[2], /&/);
local info_hash = "";
local peer_ide = "";
local peer_port = 0/udp;
local uploaded = -1;
local downloaded = -1;
local left = -1;
local compact = T;
local peer_event = "empty";
for ( idx in keys )
{
local keyval = split1(keys[idx], /=/);
if ( length(keyval) != 2 )
next;
local key = to_lower(keyval[1]);
local val = keyval[2];
if ( key == "info_hash" )
info_hash = unescape_URI(val);
else if ( key == "peer_id" )
peer_ide = unescape_URI(val);
else if ( key == "port" )
peer_port = to_port(to_count(val), tcp);
else if ( key == "uploaded" )
uploaded = to_int(val);
else if ( key == "downloaded" )
downloaded = to_int(val);
else if ( key == "left" )
left = to_int(val);
else if ( key == "compact" )
compact = (to_int(val) == 1);
else if ( key == "event" )
{
val = to_lower(val);
if ( val == /started|stopped|completed/ )
peer_event = val;
}
}
if ( info_hash == "" || peer_ide == "" || peer_port == 0/udp )
{ # Does not look like BitTorrent.
disable_analyzer(c$id, current_analyzer());
delete bt_tracker_conns[c$id];
return;
}
if ( peer_port != 0/tcp )
expect_connection(to_addr("0.0.0.0"), c$id$orig_h,
peer_port, ANALYZER_BITTORRENT, 1 min);
local id: count;
if ( c$id in bt_tracker_conns )
id = bt_tracker_conns[c$id];
else
{
id = ++tracker_conn_count;
bt_tracker_conns[c$id] = id;
}
print bt_tracker_log,
fmt("%s [peer_id:%s info_hash:%s port:%s event:%s up:%d down:%d left:%d compact:%s]%s",
bt_log_tag(c$id, id, "request", T),
bytestring_to_hexstr(peer_ide),
bytestring_to_hexstr(info_hash),
peer_port, peer_event,
uploaded, downloaded, left,
compact ? "yes" : "no",
log_tracker_request_uri ? fmt(" GET %s", uri) : "");
}
function benc_status(benc: bittorrent_benc_dir, tag: string): string
{
if ( tag !in benc || ! benc[tag]?$i )
return "";
local fmt_tag = sub(tag, / /, "_");
return fmt("%s:%d", fmt_tag, benc[tag]$i);
}
event bt_tracker_response(c: connection, status: count,
headers: bt_tracker_headers,
peers: bittorrent_peer_set,
benc: bittorrent_benc_dir)
{
if ( c$id !in bt_tracker_conns )
return;
local id = bt_tracker_conns[c$id];
for ( peer in peers )
expect_connection(c$id$orig_h, peer$h, peer$p,
ANALYZER_BITTORRENT, 1 min);
if ( "failure reason" in benc )
{
print bt_tracker_log,
fmt("%s [failure_reason:\"%s\"]",
bt_log_tag(c$id, id, "response", F),
benc["failure reason"]?$s ?
benc["failure reason"]$s : "");
return;
}
print bt_tracker_log,
fmt("%s [%s%s%s%s%speers:%d]",
bt_log_tag(c$id, id, "response", F),
benc_status(benc, "warning message"),
benc_status(benc, "complete"),
benc_status(benc, "incomplete"),
benc_status(benc, "interval"),
benc_status(benc, "min interval"),
length(peers));
}
event bt_tracker_response_not_ok(c: connection, status: count,
headers: bt_tracker_headers)
{
if ( c$id in bt_tracker_conns )
{
local id = bt_tracker_conns[c$id];
print bt_tracker_log,
fmt("%s [status:%d]",
bt_log_tag(c$id, id, "response", F), status);
}
}
event bt_tracker_weird(c: connection, is_orig: bool, msg: string)
{
local id = (c$id in bt_tracker_conns) ? bt_tracker_conns[c$id] : 0;
print bt_tracker_log,
fmt("%s [%s]", bt_log_tag(c$id, id, "<weird>", is_orig), msg);
event conn_weird(msg, c);
}
event connection_state_remove(c: connection)
{
if ( c$id !in bt_tracker_conns )
return;
local id = bt_tracker_conns[c$id];
delete bt_tracker_conns[c$id];
print bt_tracker_log,
fmt("%s [duration:%.06f total:%d]",
# Ideally the direction here wouldn't be T or F
# but both, displayed as "<>".
bt_log_tag(c$id, id, "<closed>", T), c$duration,
c$orig$size + c$resp$size);
}

View file

@ -1,9 +0,0 @@
#! $Id: capture-events.bro 4674 2007-07-30 22:00:43Z vern $
#
# Captures all events to events.bst.
#
event bro_init()
{
capture_events("events.bst");
}

View file

@ -1,74 +0,0 @@
# $Id:$
# Logs evidence regarding the degree to which the packet capture process
# suffers from measurment loss.
#
# By default, only reports loss computed in terms of number of "gap events"
# (ACKs for a sequence number that's above a gap). You can also get an
# estimate in terms of number of bytes missing; this however is sometimes
# heavily affected by miscomputations due to broken packets with incorrect
# sequence numbers. (These packets also affect the first estimator, but
# only to a quite minor degree.)
@load notice
module CaptureLoss;
export {
redef enum Notice += {
CaptureLossReport, # interval report
CaptureLossSummary, # end-of-run summary
};
# Whether to also report byte-weighted estimates.
global report_byte_based_estimates = F &redef;
# Whether to generate per-interval reports even if there
# was no evidence of loss.
global report_if_none = F &redef;
# Whether to generate a summary even if there was no
# evidence of loss.
global summary_if_none = F &redef;
}
# Redefine this to be non-zero to get per-interval reports.
redef gap_report_freq = 0 sec;
event gap_report(dt: interval, info: gap_info)
{
if ( info$gap_events > 0 || report_if_none )
{
local msg = report_byte_based_estimates ?
fmt("gap-dt=%.6f acks=%d bytes=%d gaps=%d gap-bytes=%d",
dt, info$ack_events, info$ack_bytes,
info$gap_events, info$gap_bytes) :
fmt("gap-dt=%.6f acks=%d gaps=%d",
dt, info$ack_events, info$gap_events);
NOTICE([$note=CaptureLossReport, $msg=msg]);
}
}
event bro_done()
{
local g = get_gap_summary();
local gap_rate =
g$ack_events == 0 ? 0.0 :
(1.0 * g$gap_events) / (1.0 * g$ack_events);
local gap_bytes =
g$ack_bytes == 0 ? 0.0 :
(1.0 * g$gap_bytes) / (1.0 * g$ack_bytes);
if ( gap_rate == 0.0 && gap_bytes == 0.0 && ! summary_if_none )
return;
local msg = report_byte_based_estimates ?
fmt("estimated rate = %g / %g (events/bytes)",
gap_rate, gap_bytes) :
fmt("estimated rate = %g", gap_rate);
NOTICE([$note=CaptureLossSummary, $msg=msg]);
}

View file

@ -1,9 +0,0 @@
#! $Id: capture-events.bro 6 2004-04-30 00:31:26Z jason $
#
# Captures all operations on &synchronized variables to state-updates.bst.
#
event bro_init()
{
capture_state_updates("state-updates.bst");
}

View file

@ -1,54 +0,0 @@
# $Id: checkpoint.bro 6724 2009-06-07 09:23:03Z vern $
#
# Checkpoints Bro's persistent state at regular intervals and scans
# the state directory for external updates.
const state_rescan_interval = 15 secs &redef;
const state_checkpoint_interval = 15 min &redef;
# Services for which the internal connection state is stored.
const persistent_services = {
21/tcp, # ftp
22/tcp, # ssh
23/tcp, # telnet
513/tcp, # rlogin
} &redef;
# The first timer fires immediately. This flags lets us ignore it.
global state_ignore_first = T;
event state_checkpoint()
{
if ( state_ignore_first )
state_ignore_first = F;
else if ( ! bro_is_terminating() )
checkpoint_state();
if ( state_checkpoint_interval > 0 secs )
schedule state_checkpoint_interval { state_checkpoint() };
}
event state_rescan()
{
rescan_state();
if ( state_rescan_interval > 0 secs )
schedule state_rescan_interval { state_rescan() };
}
event bro_init()
{
if ( state_checkpoint_interval > 0 secs )
schedule state_checkpoint_interval { state_checkpoint() };
if ( state_rescan_interval > 0 secs )
schedule state_rescan_interval { state_rescan() };
}
event connection_established(c: connection)
{
# Buggy?
# if ( c$id$resp_p in persistent_services )
# make_connection_persistent(c);
}

View file

@ -1,36 +0,0 @@
# $Id: clear-passwords.bro 4758 2007-08-10 06:49:23Z vern $
# Monitoring for use of cleartext passwords.
@load ftp
@load login
@load pop3
@load irc
const passwd_file = open_log_file("passwords") &redef;
# ftp, login and pop3 call login_{success,failure}, which in turn
# calls account_tried(), so we can snarf all at once here:
event account_tried(c: connection, user: string, passwd: string)
{
print passwd_file, fmt("%s account name '%s', password '%s': %s",
is_local_addr(c$id$orig_h) ? "local" : "remote",
user, passwd, id_string(c$id));
}
# IRC raises a different event on login, so we hook into it here:
event irc_join_message(c: connection, info_list: irc_join_list)
{
for ( l in info_list)
{
print passwd_file, fmt("IRC JOIN name '%s', password '%s'",
l$nick, l$password);
}
}
# Raised if IRC user tries to become operator:
event irc_oper_message(c: connection, user: string, password: string)
{
print passwd_file, fmt("IRC OPER name '%s', password '%s'",
user, password);
}

View file

@ -1,71 +0,0 @@
# $Id$
#
# Script which alarms if the number of connections per time interval
# exceeds a threshold.
#
# This script is mainly meant as a demonstration; it hasn't been hardened
# with/for operational use.
@load notice
module ConnFlood;
export {
redef enum Notice += {
ConnectionFloodStart, ConnectionFloodEnd,
};
# Thresholds to reports (conns/sec).
const thresholds: set[count] =
{ 1000, 2000, 4000, 6000, 8000, 10000, 20000, 50000 }
&redef;
# Average over this time interval.
const avg_interval = 10 sec &redef;
}
global conn_counter = 0;
global last_thresh = 0;
# Note: replace with connection_attempt if too expensive.
event new_connection(c: connection)
{
++conn_counter;
}
event check_flood()
{
local thresh = 0;
local rate = double_to_count(interval_to_double((conn_counter / avg_interval)));
# Find the largest threshold reached this interval.
for ( i in thresholds )
{
if ( rate >= i && rate > thresh )
thresh = i;
}
# Report if larger than last reported threshold.
if ( thresh > last_thresh )
{
NOTICE([$note=ConnectionFloodStart, $n=thresh,
$msg=fmt("flood begins at rate %d conns/sec", rate)]);
last_thresh = thresh;
}
# If no threshold was reached, the flood is over.
else if ( thresh == 0 && last_thresh > 0 )
{
NOTICE([$note=ConnectionFloodEnd, $n=thresh,
$msg=fmt("flood ends at rate %d conns/sec", rate)]);
last_thresh = 0;
}
conn_counter = 0;
schedule avg_interval { check_flood() };
}
event bro_init()
{
schedule avg_interval { check_flood() };
}

View file

@ -1,24 +0,0 @@
# $Id: conn-id.bro 45 2004-06-09 14:29:49Z vern $
# Simple functions for generating ASCII connection identifiers.
@load port-name
function id_string(id: conn_id): string
{
return fmt("%s > %s",
endpoint_id(id$orig_h, id$orig_p),
endpoint_id(id$resp_h, id$resp_p));
}
function reverse_id_string(id: conn_id): string
{
return fmt("%s < %s",
endpoint_id(id$orig_h, id$orig_p),
endpoint_id(id$resp_h, id$resp_p));
}
function directed_id_string(id: conn_id, is_orig: bool): string
{
return is_orig ? id_string(id) : reverse_id_string(id);
}

View file

@ -1,425 +0,0 @@
# $Id: conn.bro 6782 2009-06-28 02:19:03Z vern $
@load notice
@load hot
@load port-name
@load netstats
@load conn-id
redef enum Notice += {
SensitiveConnection, # connection marked "hot"
};
const conn_closed = { TCP_CLOSED, TCP_RESET };
global have_FTP = F; # if true, we've loaded ftp.bro
global have_SMTP = F; # if true, we've loaded smtp.bro
# TODO: Do we have a nicer way of doing this?
export { global FTP::is_ftp_data_conn: function(c: connection): bool; }
# Whether to include connection state history in the logs generated
# by record_connection.
const record_state_history = F &redef;
# Whether to translate the local address in SensitiveConnection notices
# to a hostname. Meant as a demonstration of the "when" construct.
const xlate_hot_local_addr = F &redef;
# Whether to use DPD for generating the service field in the summaries.
# Default off, because it changes the format of conn.log in a way
# potentially incompatible with existing scripts.
const dpd_conn_logs = F &redef;
# Maps a given port on a given server's address to an RPC service.
# If we haven't loaded portmapper.bro, then it will be empty
# (and, ideally, queries to it would be optimized away ...).
global RPC_server_map: table[addr, port] of string;
const conn_file = open_log_file("conn") &redef;
function conn_state(c: connection, trans: transport_proto): string
{
local os = c$orig$state;
local rs = c$resp$state;
local o_inactive = os == TCP_INACTIVE || os == TCP_PARTIAL;
local r_inactive = rs == TCP_INACTIVE || rs == TCP_PARTIAL;
if ( trans == tcp )
{
if ( rs == TCP_RESET )
{
if ( os == TCP_SYN_SENT || os == TCP_SYN_ACK_SENT ||
(os == TCP_RESET &&
c$orig$size == 0 && c$resp$size == 0) )
return "REJ";
else if ( o_inactive )
return "RSTRH";
else
return "RSTR";
}
else if ( os == TCP_RESET )
return r_inactive ? "RSTOS0" : "RSTO";
else if ( rs == TCP_CLOSED && os == TCP_CLOSED )
return "SF";
else if ( os == TCP_CLOSED )
return r_inactive ? "SH" : "S2";
else if ( rs == TCP_CLOSED )
return o_inactive ? "SHR" : "S3";
else if ( os == TCP_SYN_SENT && rs == TCP_INACTIVE )
return "S0";
else if ( os == TCP_ESTABLISHED && rs == TCP_ESTABLISHED )
return "S1";
else
return "OTH";
}
else if ( trans == udp )
{
if ( os == UDP_ACTIVE )
return rs == UDP_ACTIVE ? "SF" : "S0";
else
return rs == UDP_ACTIVE ? "SHR" : "OTH";
}
else
return "OTH";
}
function conn_size(e: endpoint, trans: transport_proto): string
{
if ( e$size > 0 || (trans == tcp && e$state == TCP_CLOSED) )
return fmt("%d", e$size);
else
### should return 0 for TCP_RESET that went through TCP_CLOSED
return "?";
}
function service_name(c: connection): string
{
local p = c$id$resp_p;
if ( p in port_names )
return port_names[p];
else
return "other";
}
const state_graphic = {
["OTH"] = "?>?", ["REJ"] = "[",
["RSTO"] = ">]", ["RSTOS0"] = "}]", ["RSTR"] = ">[", ["RSTRH"] = "<[",
["S0"] = "}", ["S1"] = ">", ["S2"] = "}2", ["S3"] = "}3",
["SF"] = ">", ["SH"] = ">h", ["SHR"] = "<h",
};
function full_id_string(c: connection): string
{
local id = c$id;
local trans = get_port_transport_proto(id$orig_p);
local state = conn_state(c, trans);
local state_gr = state_graphic[state];
local service = service_name(c);
if ( state == "S0" || state == "S1" || state == "REJ" )
return fmt("%s %s %s/%s %s", id$orig_h, state_gr,
id$resp_h, service, c$addl);
else
return fmt("%s %sb %s %s/%s %sb %.1fs %s",
id$orig_h, conn_size(c$orig, trans),
state_gr, id$resp_h, service,
conn_size(c$resp, trans), c$duration, c$addl);
}
# The sets are indexed by the complete hot messages.
global hot_conns_reported: table[conn_id] of set[string];
# Low-level routine that generates the actual SensitiveConnection
# notice associated with a "hot" connection.
function do_hot_notice(c: connection, dir: string, host: string)
{
NOTICE([$note=SensitiveConnection, $conn=c,
$msg=fmt("hot: %s %s local host: %s",
full_id_string(c), dir, host)]);
}
# Generate a SensitiveConnection notice with the local hostname
# translated. Mostly intended as a demonstration of using "when".
function gen_hot_notice_with_hostnames(c: connection)
{
local id = c$id;
local inbound = is_local_addr(id$resp_h);
local dir = inbound ? "to" : "from";
local local_addr = inbound ? id$orig_h : id$resp_h;
add_notice_tag(c);
when ( local hostname = lookup_addr(local_addr) )
do_hot_notice(c, dir, hostname);
timeout 5 sec
{ do_hot_notice(c, dir, fmt("%s", local_addr)); }
}
function log_hot_conn(c: connection)
{
if ( c$id !in hot_conns_reported )
hot_conns_reported[c$id] = set() &mergeable;
local msg = full_id_string(c);
if ( msg !in hot_conns_reported[c$id] )
{
if ( xlate_hot_local_addr )
gen_hot_notice_with_hostnames(c);
else
NOTICE([$note=SensitiveConnection, $conn=c,
$msg=fmt("hot: %s", full_id_string(c))]);
add hot_conns_reported[c$id][msg];
}
}
function determine_service_non_DPD(c: connection) : string
{
if ( length(c$service) != 0 )
{
for ( i in c$service )
return i; # return first;
}
else if ( have_FTP && FTP::is_ftp_data_conn(c) )
return port_names[20/tcp];
else if ( [c$id$resp_h, c$id$resp_p] in RPC_server_map )
# Alternatively, perhaps this should be stored in $addl
# rather than $service, so the port number remains
# visible .... ?
return RPC_server_map[c$id$resp_h, c$id$resp_p];
else if ( c$orig$state == TCP_INACTIVE )
{
# We're seeing a half-established connection. Use the
# service of the originator if it's well-known and the
# responder isn't.
if ( c$id$resp_p !in port_names && c$id$orig_p in port_names )
return port_names[c$id$orig_p];
}
return service_name(c);
}
function determine_service(c: connection) : string
{
if ( ! dpd_conn_logs )
return determine_service_non_DPD(c);
if ( [c$id$resp_h, c$id$resp_p] in RPC_server_map )
add c$service[RPC_server_map[c$id$resp_h, c$id$resp_p]];
if ( length(c$service) == 0 )
{
# Empty service set. Use port as a hint.
if ( c$orig$state == TCP_INACTIVE )
{
# We're seeing a half-established connection. Use the
# service of the originator if it's well-known and the
# responder isn't.
if ( c$id$resp_p !in port_names &&
c$id$orig_p in port_names )
return fmt("%s?", port_names[c$id$orig_p]);
}
if ( c$id$resp_p in port_names )
return fmt("%s?", port_names[c$id$resp_p]);
return "other";
}
local service = "";
for ( s in c$service )
{
if ( sub_bytes(s, 0, 1) != "-" )
service = service == "" ? s : cat(service, ",", s);
}
return service != "" ? to_lower(service) : "other";
}
function record_connection(f: file, c: connection)
{
local id = c$id;
local local_init = is_local_addr(id$orig_h);
local local_addr = local_init ? id$orig_h : id$resp_h;
local remote_addr = local_init ? id$resp_h : id$orig_h;
local flags = local_init ? "L" : "X";
local trans = get_port_transport_proto(id$orig_p);
local duration: string;
# Do this first so we see the tag in addl.
if ( c$hot > 0 )
log_hot_conn(c);
if ( trans == tcp )
{
if ( c$orig$state in conn_closed || c$resp$state in conn_closed )
duration = fmt("%.06f", c$duration);
else
duration = "?";
}
else
duration = fmt("%.06f", c$duration);
local addl = c$addl;
@ifdef ( estimate_flow_size_and_remove )
# Annotate connection with separately-estimated size, if present.
local orig_est = estimate_flow_size_and_remove(id, T);
local resp_est = estimate_flow_size_and_remove(id, F);
if ( orig_est$have_est )
addl = fmt("%s olower=%.0fMB oupper=%.0fMB oincon=%s", addl,
orig_est$lower / 1e6, orig_est$upper / 1e6,
orig_est$num_inconsistent);
if ( resp_est$have_est )
addl = fmt("%s rlower=%.0fMB rupper=%.0fMB rincon=%s", addl,
resp_est$lower / 1e6, resp_est$upper / 1e6,
resp_est$num_inconsistent);
@endif
local service = determine_service(c);
local log_msg =
fmt("%.6f %s %s %s %s %d %d %s %s %s %s %s",
c$start_time, duration, id$orig_h, id$resp_h, service,
id$orig_p, id$resp_p, trans,
conn_size(c$orig, trans), conn_size(c$resp, trans),
conn_state(c, trans), flags);
if ( record_state_history )
log_msg = fmt("%s %s", log_msg,
c$history == "" ? "X" : c$history);
if ( addl != "" )
log_msg = fmt("%s %s", log_msg, addl);
print f, log_msg;
}
event connection_established(c: connection)
{
Hot::check_hot(c, Hot::CONN_ESTABLISHED);
if ( c$hot > 0 )
log_hot_conn(c);
}
event partial_connection(c: connection)
{
if ( c$orig$state == TCP_PARTIAL && c$resp$state == TCP_INACTIVE )
# This appears to be a stealth scan. Don't do hot-checking
# as there wasn't an established connection.
;
else
{
Hot::check_hot(c, Hot::CONN_ESTABLISHED);
Hot::check_hot(c, Hot::APPL_ESTABLISHED); # assume it's been established
}
if ( c$hot > 0 )
log_hot_conn(c);
}
event connection_attempt(c: connection)
{
Hot::check_spoof(c);
Hot::check_hot(c, Hot::CONN_ATTEMPTED);
}
event connection_finished(c: connection)
{
if ( c$orig$size == 0 || c$resp$size == 0 )
# Hard to get excited about this - not worth logging again.
c$hot = 0;
else
Hot::check_hot(c, Hot::CONN_FINISHED);
}
event connection_partial_close(c: connection)
{
if ( c$orig$size == 0 || c$resp$size == 0 )
# Hard to get excited about this - not worth logging again.
c$hot = 0;
else
Hot::check_hot(c, Hot::CONN_FINISHED);
}
event connection_half_finished(c: connection)
{
Hot::check_hot(c, Hot::CONN_ATTEMPTED);
}
event connection_rejected(c: connection)
{
Hot::check_hot(c, Hot::CONN_REJECTED);
}
event connection_reset(c: connection)
{
Hot::check_hot(c, Hot::CONN_FINISHED);
}
event connection_pending(c: connection)
{
if ( c$orig$state in conn_closed &&
(c$resp$state == TCP_INACTIVE || c$resp$state == TCP_PARTIAL) )
# This is a stray FIN or RST - don't bother reporting.
return;
if ( c$orig$state == TCP_RESET || c$resp$state == TCP_RESET )
# We already reported this connection when the RST
# occurred.
return;
Hot::check_hot(c, Hot::CONN_FINISHED);
}
function connection_gone(c: connection, gone_type: string)
{
if ( c$orig$size == 0 || c$resp$size == 0 )
{
if ( c$orig$state == TCP_RESET && c$resp$state == TCP_INACTIVE)
# A bare RST, no other context. Ignore it.
return;
# Hard to get excited about this - not worth logging again,
# per connection_finished().
c$hot = 0;
}
else
Hot::check_hot(c, Hot::CONN_TIMEOUT);
}
event connection_state_remove(c: connection) &priority = -10
{
local os = c$orig$state;
local rs = c$resp$state;
if ( os == TCP_ESTABLISHED && rs == TCP_ESTABLISHED )
# It was still active, no summary generated.
connection_gone(c, "remove");
else if ( (os == TCP_CLOSED || rs == TCP_CLOSED) &&
(os == TCP_ESTABLISHED || rs == TCP_ESTABLISHED) )
# One side has closed, the other hasn't - it's in state S2
# or S3, hasn't been reported yet.
connection_gone(c, "remove");
record_connection(conn_file, c);
delete hot_conns_reported[c$id];
}

View file

@ -1,40 +0,0 @@
# $Id: contents.bro 47 2004-06-11 07:26:32Z vern $
redef capture_filters += { ["contents"] = "tcp" };
# Keeps track of to which given contents files we've written.
global contents_files: set[string];
event new_connection_contents(c: connection)
{
local id = c$id;
local orig_file =
fmt("contents.%s.%d-%s.%d",
id$orig_h, id$orig_p, id$resp_h, id$resp_p);
local resp_file =
fmt("contents.%s.%d-%s.%d",
id$resp_h, id$resp_p, id$orig_h, id$orig_p);
local orig_f: file;
local resp_f: file;
if ( orig_file !in contents_files )
{
add contents_files[orig_file];
orig_f = open(orig_file);
}
else
orig_f = open_for_append(orig_file);
if ( resp_file !in contents_files )
{
add contents_files[resp_file];
resp_f = open(resp_file);
}
else
resp_f = open_for_append(resp_file);
set_contents_file(id, CONTENTS_ORIG, orig_f);
set_contents_file(id, CONTENTS_RESP, resp_f);
}

View file

@ -1,62 +0,0 @@
# $Id: cpu-adapt.bro 1904 2005-12-14 03:27:15Z vern $
#
# Adjust load level based on cpu load.
@load load-level
# We increase the load-level if the average CPU load (percentage) is
# above this limit.
global cpu_upper_limit = 70.0 &redef;
# We derease the load-level if the average CPU load is below this limit.
global cpu_lower_limit = 30.0 &redef;
# Time interval over which we average the CPU load.
global cpu_interval = 1 min &redef;
global cpu_last_proc_time = 0 secs;
global cpu_last_wall_time: time = 0;
event cpu_measure_load()
{
local res = resource_usage();
local proc_time = res$user_time + res$system_time;
local wall_time = current_time();
if ( cpu_last_proc_time > 0 secs )
{
local dproc = proc_time - cpu_last_proc_time;
local dwall = wall_time - cpu_last_wall_time;
local load = dproc / dwall * 100.0;
print ll_file, fmt("%.6f CPU load %.02f", network_time(), load);
# Second test is for whether we have any room to change
# things. It shouldn't be hardwired to "xxx10" ....
if ( load > cpu_upper_limit &&
current_load_level != LoadLevel10 )
{
print ll_file, fmt("%.6f CPU load above limit: %.02f",
network_time(), load);
increase_load_level();
}
else if ( load < cpu_lower_limit &&
current_load_level != LoadLevel1 )
{
print ll_file, fmt("%.6f CPU load below limit: %.02f",
network_time(), load);
decrease_load_level();
}
}
cpu_last_proc_time = proc_time;
cpu_last_wall_time = wall_time;
schedule cpu_interval { cpu_measure_load() };
}
event bro_init()
{
schedule cpu_interval { cpu_measure_load() };
}

View file

@ -1,8 +0,0 @@
# $Id:$
redef capture_filters += { ["dce"] = "port 135" };
global dce_ports = { 135/tcp } &redef;
redef dpd_config += { [ANALYZER_DCE_RPC] = [$ports = dce_ports] };
# No default implementation for events.

View file

@ -1,41 +0,0 @@
# $Id: demux.bro 4758 2007-08-10 06:49:23Z vern $
const demux_dir = log_file_name("xscript") &redef;
global created_demux_dir = F;
# Table of which connections we're demuxing.
global demuxed_conn: set[conn_id];
# tag: identifier to use for the reason for demuxing
# otag: identifier to use for originator side of the connection
# rtag: identifier to use for responder side of the connection
function demux_conn(id: conn_id, tag: string, otag: string, rtag: string): bool
{
if ( id in demuxed_conn || ! active_connection(id) )
return F;
if ( ! created_demux_dir )
{
mkdir(demux_dir);
created_demux_dir = T;
}
local orig_file =
fmt("%s/%s.%s.%s.%d-%s.%d", demux_dir, otag, tag,
id$orig_h, id$orig_p, id$resp_h, id$resp_p);
local resp_file =
fmt("%s/%s.%s.%s.%d-%s.%d", demux_dir, rtag, tag,
id$resp_h, id$resp_p, id$orig_h, id$orig_p);
set_contents_file(id, CONTENTS_ORIG, open(orig_file));
set_contents_file(id, CONTENTS_RESP, open(resp_file));
add demuxed_conn[id];
return T;
}
event connection_finished(c: connection)
{
delete demuxed_conn[c$id];
}

View file

@ -1,156 +0,0 @@
# $Id: detect-protocols-http.bro,v 1.1.4.2 2006/05/31 00:16:21 sommer Exp $
#
# Identifies protocols that use HTTP.
@load detect-protocols
module DetectProtocolHTTP;
export {
# Defines characteristics of a protocol. All attributes must match
# to trigger the detection. We match patterns against lower-case
# versions of the data.
type protocol : record {
url: pattern &optional;
client_header: pattern &optional;
client_header_content: pattern &optional;
server_header: pattern &optional;
server_header_content: pattern &optional;
};
const protocols: table[string] of protocol = {
["Kazaa"] = [$url=/^\/\.hash=.*/, $server_header=/^x-kazaa.*/],
["Gnutella"] = [$url=/^\/(uri-res|gnutella).*/,
$server_header=/^x-gnutella-.*/],
["Gnutella_"] = [$url=/^\/(uri-res|gnutella).*/,
$server_header=/^x-(content-urn|features).*/],
["Gnutella__"] = [$url=/^\/(uri-res|gnutella).*/,
$server_header=/^content-type/,
$server_header_content=/.*x-gnutella.*/],
["BitTorrent"] = [$url=/^.*\/(scrape|announce)\?.*info_hash.*/],
["SOAP"] = [$client_header=/^([:print:]+-)?(soapaction|methodname|messagetype).*/],
["Squid"] = [$server_header=/^x-squid.*/],
} &redef;
}
# Bit masks.
const url_found = 1;
const client_header_found = 2;
const server_header_found = 2;
type index : record {
id: conn_id;
pid: string;
};
# Maps to characteristics found so far.
# FIXME: An integer would suffice for the bit-field
# if we had bit-operations ...
global conns: table[index] of set[count] &read_expire = 1hrs;
function check_match(c: connection, pid: string, mask: set[count])
{
conns[[$id=c$id, $pid=pid]] = mask;
local p = protocols[pid];
if ( p?$url && url_found !in mask )
return;
if ( p?$client_header && client_header_found !in mask )
return;
if ( p?$server_header && server_header_found !in mask )
return;
# All found.
ProtocolDetector::found_protocol(c, ANALYZER_HTTP, pid);
}
event http_request(c: connection, method: string, original_URI: string,
unescaped_URI: string, version: string)
{
for ( pid in protocols )
{
local p = protocols[pid];
if ( ! p?$url )
next;
local mask: set[count];
local idx = [$id=c$id, $pid=pid];
if ( idx in conns )
mask = conns[idx];
if ( url_found in mask )
# Already found a match.
next;
# FIXME: There are people putting NULs into the URLs
# (BitTorrent), which to_lower() does not like. Not sure
# what the right fix is, though.
unescaped_URI = subst_string(unescaped_URI, "\x00", "");
if ( to_lower(unescaped_URI) == p$url )
{
add mask[url_found];
check_match(c, pid, mask);
}
}
}
event http_header(c: connection, is_orig: bool, name: string, value: string)
{
if ( name == /[sS][eE][rR][vV][eE][rR]/ )
{
# Try to extract the server software.
local s = split1(strip(value), /[[:space:]\/]/);
if ( s[1] == /[-a-zA-Z0-9_]+/ )
ProtocolDetector::found_protocol(c, ANALYZER_HTTP, s[1]);
}
for ( pid in protocols )
{
local p = protocols[pid];
local mask: set[count];
local idx = [$id=c$id, $pid=pid];
if ( idx in conns )
mask = conns[idx];
if ( p?$client_header && is_orig )
{
if ( client_header_found in mask )
return;
if ( to_lower(name) == p$client_header )
{
if ( p?$client_header_content )
if ( to_lower(value) !=
p$client_header_content )
return;
add mask[client_header_found];
check_match(c, pid, mask);
}
}
if ( p?$server_header && ! is_orig )
{
if ( server_header_found in mask )
return;
if ( to_lower(name) == p$server_header )
{
if ( p?$server_header_content )
if ( to_lower(value) !=
p$server_header_content )
return;
add mask[server_header_found];
check_match(c, pid, mask);
}
}
}
}

View file

@ -1,258 +0,0 @@
# $Id: detect-protocols.bro,v 1.1.4.4 2006/05/31 18:07:27 sommer Exp $
#
# Finds connections with protocols on non-standard ports using the DPM
# framework.
@load site
@load conn-id
@load notice
module ProtocolDetector;
export {
redef enum Notice += {
ProtocolFound, # raised for each connection found
ServerFound, # raised once per dst host/port/protocol tuple
};
# Table of (protocol, resp_h, resp_p) tuples known to be uninteresting
# in the given direction. For all other protocols detected on
# non-standard ports, we raise a ProtocolFound notice. (More specific
# filtering can then be done via notice_filters.)
#
# Use 0.0.0.0 for to wildcard-match any resp_h.
type dir: enum { NONE, INCOMING, OUTGOING, BOTH };
const valids: table[count, addr, port] of dir = {
# A couple of ports commonly used for benign HTTP servers.
# For now we want to see everything.
# [ANALYZER_HTTP, 0.0.0.0, 81/tcp] = OUTGOING,
# [ANALYZER_HTTP, 0.0.0.0, 82/tcp] = OUTGOING,
# [ANALYZER_HTTP, 0.0.0.0, 83/tcp] = OUTGOING,
# [ANALYZER_HTTP, 0.0.0.0, 88/tcp] = OUTGOING,
# [ANALYZER_HTTP, 0.0.0.0, 8001/tcp] = OUTGOING,
# [ANALYZER_HTTP, 0.0.0.0, 8090/tcp] = OUTGOING,
# [ANALYZER_HTTP, 0.0.0.0, 8081/tcp] = OUTGOING,
#
# [ANALYZER_HTTP, 0.0.0.0, 6346/tcp] = BOTH, # Gnutella
# [ANALYZER_HTTP, 0.0.0.0, 6347/tcp] = BOTH, # Gnutella
# [ANALYZER_HTTP, 0.0.0.0, 6348/tcp] = BOTH, # Gnutella
} &redef;
# Set of analyzers for which we suppress ServerFound notices
# (but not ProtocolFound). Along with avoiding clutter in the
# log files, this also saves memory because for these we don't
# need to remember which servers we already have reported, which
# for some can be a lot.
const suppress_servers: set [count] = {
# ANALYZER_HTTP
} &redef;
# We consider a connection to use a protocol X if the analyzer for X
# is still active (i) after an interval of minimum_duration, or (ii)
# after a payload volume of minimum_volume, or (iii) at the end of the
# connection.
const minimum_duration = 30 secs &redef;
const minimum_volume = 4e3 &redef; # bytes
# How often to check the size of the connection.
const check_interval = 5 secs;
# Entry point for other analyzers to report that they recognized
# a certain (sub-)protocol.
global found_protocol: function(c: connection, analyzer: count,
protocol: string);
# Table keeping reported (server, port, analyzer) tuples (and their
# reported sub-protocols).
global servers: table[addr, port, string] of set[string]
&read_expire = 14 days;
}
# Table that tracks currently active dynamic analyzers per connection.
global conns: table[conn_id] of set[count];
# Table of reports by other analyzers about the protocol used in a connection.
global protocols: table[conn_id] of set[string];
type protocol : record {
a: string; # analyzer name
sub: string; # "sub-protocols" reported by other sources
};
function get_protocol(c: connection, a: count) : protocol
{
local str = "";
if ( c$id in protocols )
{
for ( p in protocols[c$id] )
str = |str| > 0 ? fmt("%s/%s", str, p) : p;
}
return [$a=analyzer_name(a), $sub=str];
}
function fmt_protocol(p: protocol) : string
{
return p$sub != "" ? fmt("%s (via %s)", p$sub, p$a) : p$a;
}
function do_notice(c: connection, a: count, d: dir)
{
if ( d == BOTH )
return;
if ( d == INCOMING && is_local_addr(c$id$resp_h) )
return;
if ( d == OUTGOING && ! is_local_addr(c$id$resp_h) )
return;
local p = get_protocol(c, a);
local s = fmt_protocol(p);
NOTICE([$note=ProtocolFound,
$msg=fmt("%s %s on port %s", id_string(c$id), s, c$id$resp_p),
$sub=s, $conn=c, $n=a]);
# We report multiple ServerFound's per host if we find a new
# sub-protocol.
local known = [c$id$resp_h, c$id$resp_p, p$a] in servers;
local newsub = F;
if ( known )
newsub = (p$sub != "" &&
p$sub !in servers[c$id$resp_h, c$id$resp_p, p$a]);
if ( (! known || newsub) && a !in suppress_servers )
{
NOTICE([$note=ServerFound,
$msg=fmt("%s: %s server on port %s%s", c$id$resp_h, s,
c$id$resp_p, (known ? " (update)" : "")),
$p=c$id$resp_p, $sub=s, $conn=c, $src=c$id$resp_h, $n=a]);
if ( ! known )
servers[c$id$resp_h, c$id$resp_p, p$a] = set();
add servers[c$id$resp_h, c$id$resp_p, p$a][p$sub];
}
}
function report_protocols(c: connection)
{
# We only report the connection if both sides have transferred data.
if ( c$resp$size == 0 || c$orig$size == 0 )
{
delete conns[c$id];
delete protocols[c$id];
return;
}
local analyzers = conns[c$id];
for ( a in analyzers )
{
if ( [a, c$id$resp_h, c$id$resp_p] in valids )
do_notice(c, a, valids[a, c$id$resp_h, c$id$resp_p]);
else if ( [a, 0.0.0.0, c$id$resp_p] in valids )
do_notice(c, a, valids[a, 0.0.0.0, c$id$resp_p]);
else
do_notice(c, a, NONE);
append_addl(c, analyzer_name(a));
}
delete conns[c$id];
delete protocols[c$id];
}
event ProtocolDetector::check_connection(c: connection)
{
if ( c$id !in conns )
return;
local duration = network_time() - c$start_time;
local size = c$resp$size + c$orig$size;
if ( duration >= minimum_duration || size >= minimum_volume )
report_protocols(c);
else
{
local delay = min_interval(minimum_duration - duration,
check_interval);
schedule delay { ProtocolDetector::check_connection(c) };
}
}
event connection_state_remove(c: connection)
{
if ( c$id !in conns )
{
delete protocols[c$id];
return;
}
# Reports all analyzers that have remained to the end.
report_protocols(c);
}
event protocol_confirmation(c: connection, atype: count, aid: count)
{
# Don't report anything running on a well-known port.
if ( atype in dpd_config && c$id$resp_p in dpd_config[atype]$ports )
return;
if ( c$id in conns )
{
local analyzers = conns[c$id];
add analyzers[atype];
}
else
{
conns[c$id] = set(atype);
local delay = min_interval(minimum_duration, check_interval);
schedule delay { ProtocolDetector::check_connection(c) };
}
}
# event connection_analyzer_disabled(c: connection, analyzer: count)
# {
# if ( c$id !in conns )
# return;
#
# delete conns[c$id][analyzer];
# }
function append_proto_addl(c: connection)
{
for ( a in conns[c$id] )
append_addl(c, fmt_protocol(get_protocol(c, a)));
}
function found_protocol(c: connection, analyzer: count, protocol: string)
{
# Don't report anything running on a well-known port.
if ( analyzer in dpd_config &&
c$id$resp_p in dpd_config[analyzer]$ports )
return;
if ( c$id !in protocols )
protocols[c$id] = set();
add protocols[c$id][protocol];
}
event connection_state_remove(c: connection)
{
if ( c$id !in conns )
return;
append_proto_addl(c);
}

View file

@ -1,525 +0,0 @@
# $Id: dhcp.bro 4054 2007-08-14 21:45:58Z pclin $
@load dpd
@load weird
module DHCP;
export {
# Set to false to disable printing to dhcp.log.
const logging = T &redef;
}
# Type of states in DHCP client. See Figure 5 in RFC 2131.
# Each state name is prefixed with DHCP_ to avoid name conflicts.
type dhcp_state: enum {
DHCP_INIT_REBOOT,
DHCP_INIT,
DHCP_SELECTING,
DHCP_REQUESTING,
DHCP_REBINDING,
DHCP_BOUND,
DHCP_RENEWING,
DHCP_REBOOTING,
# This state is not in Figure 5. Client has been externally configured.
DHCP_INFORM,
};
global dhcp_log: file;
# Source port 68: client -> server; source port 67: server -> client.
global dhcp_ports: set[port] = { 67/udp, 68/udp } &redef;
redef dpd_config += { [ANALYZER_DHCP_BINPAC] = [$ports = dhcp_ports] };
# Default handling for peculiarities in DHCP analysis.
redef Weird::weird_action += {
["DHCP_no_type_option"] = Weird::WEIRD_FILE,
["DHCP_wrong_op_type"] = Weird::WEIRD_FILE,
["DHCP_wrong_msg_type"] = Weird::WEIRD_FILE,
};
# Types of DHCP messages, identified from the 'options' field. See RFC 1533.
global dhcp_msgtype_name: table[count] of string = {
[1] = "DHCP_DISCOVER",
[2] = "DHCP_OFFER",
[3] = "DHCP_REQUEST",
[4] = "DHCP_DECLINE",
[5] = "DHCP_ACK",
[6] = "DHCP_NAK",
[7] = "DHCP_RELEASE",
[8] = "DHCP_INFORM",
};
# Type of DHCP client state, inferred from the messages. See RFC 2131, fig 5.
global dhcp_state_name: table[dhcp_state] of string = {
[DHCP_INIT_REBOOT] = "INIT-REBOOT",
[DHCP_INIT] = "INIT",
[DHCP_SELECTING] = "SELECTING",
[DHCP_REQUESTING] = "REQUESTING",
[DHCP_REBINDING] = "REBINDING",
[DHCP_BOUND] = "BOUND",
[DHCP_RENEWING] = "RENEWING",
[DHCP_REBOOTING] = "REBOOTING",
[DHCP_INFORM] = "INFORM",
};
type dhcp_session_info: record {
state: dhcp_state; # the state of a DHCP client
seq: count; # sequence of session in the trace
lease: interval; # lease time of an IP address
h_addr: string; # hardware/MAC address of the client
};
# Track the DHCP session info of each client, indexed by the transaction ID.
global dhcp_session: table[count] of dhcp_session_info
&default = record($state = DHCP_INIT_REBOOT, $seq = 0, $lease = 0 sec,
$h_addr = "")
&write_expire = 5 min
;
# We need the following table to track some DHCPINFORM messages since they
# use xid = 0 (I do not know why), starting from the second pair of INFORM
# and ACK. Since the client address is ready before DHCPINFORM, we can use
# it as the index to find its corresponding xid.
global session_xid: table[addr] of count &read_expire = 30 sec;
# Count how many DHCP sessions have been detected, for use in dhcp_session_seq.
global pkt_cnt: count = 0;
global session_cnt: count = 0;
# Record the address of client that sends a DHCPINFORM message with xid = 0.
global recent_client: addr;
global BROADCAST_ADDR = 255.255.255.255;
global NULL_ADDR = 0.0.0.0;
# Used to detect if an ACK is duplicated. They are used only in dhcp_ack().
# We put them here since Bro scripts lacks the equivalent of "static" variables.
global ack_from: addr;
global duplicated_ack: bool;
function warning_wrong_state(msg_type: count): string
{
return fmt("%s not sent in a correct state.",
dhcp_msgtype_name[msg_type]);
}
function dhcp_message(c: connection, seq: count, show_conn: bool): string
{
local conn_info = fmt("%.06f #%d", network_time(), seq);
if ( show_conn )
return fmt("%s %s > %s", conn_info,
endpoint_id(c$id$orig_h, c$id$orig_p),
endpoint_id(c$id$resp_h, c$id$resp_p));
return conn_info;
}
function new_dhcp_session(xid: count, state: dhcp_state, h_addr: string)
: dhcp_session_info
{
local session: dhcp_session_info;
session$state = state;
session$seq = ++session_cnt;
session$lease = 0 sec;
session$h_addr = h_addr;
dhcp_session[xid] = session;
return session;
}
event bro_init()
{
if ( logging )
dhcp_log = open_log_file("dhcp");
}
event dhcp_discover(c: connection, msg: dhcp_msg, req_addr: addr)
{
local old_session = T;
if ( msg$xid !in dhcp_session )
{
local session =
new_dhcp_session(msg$xid, DHCP_SELECTING, msg$h_addr);
old_session = F;
}
if ( logging )
{
if ( old_session &&
dhcp_session[msg$xid]$state == DHCP_SELECTING )
print dhcp_log, fmt("%s DISCOVER (duplicated)",
dhcp_message(c, dhcp_session[msg$xid]$seq, F));
else
print dhcp_log,
fmt("%s DISCOVER (xid = %x, client state = %s)",
dhcp_message(c, dhcp_session[msg$xid]$seq, T),
msg$xid, dhcp_state_name[dhcp_session[msg$xid]$state]);
}
}
event dhcp_offer(c: connection, msg: dhcp_msg, mask: addr,
router: dhcp_router_list, lease: interval, serv_addr: addr)
{
local standalone = msg$xid !in dhcp_session;
local err_state =
standalone && dhcp_session[msg$xid]$state != DHCP_SELECTING;
if ( logging )
{
# Note that no OFFER messages are considered duplicated,
# since they may come from multiple DHCP servers in a session.
if ( standalone )
print dhcp_log, fmt("%s OFFER (standalone)",
dhcp_message(c, ++session_cnt, T));
else if ( err_state )
print dhcp_log, fmt("%s OFFER (in error state %s)",
dhcp_message(c, dhcp_session[msg$xid]$seq, T),
dhcp_state_name[dhcp_session[msg$xid]$state]);
else
print dhcp_log, fmt("%s OFFER (client state = %s)",
dhcp_message(c, dhcp_session[msg$xid]$seq, T),
dhcp_state_name[DHCP_SELECTING]);
}
}
event dhcp_request(c: connection, msg: dhcp_msg,
req_addr: addr, serv_addr: addr)
{
local log_info: string;
if ( msg$xid in dhcp_session )
{
if ( ! logging )
return;
local state = dhcp_session[msg$xid]$state;
if ( state == DHCP_REBOOTING )
recent_client = req_addr;
else
recent_client = c$id$orig_h;
session_xid[recent_client] = msg$xid;
if ( state == DHCP_RENEWING || state == DHCP_REBINDING ||
state == DHCP_REQUESTING || state == DHCP_REBOOTING )
print dhcp_log, fmt("%s REQUEST (duplicated)",
dhcp_message(c, dhcp_session[msg$xid]$seq, F));
else
{
log_info = dhcp_message(c, dhcp_session[msg$xid]$seq, T);
print dhcp_log, fmt("%s REQUEST (in error state %s)",
log_info,
dhcp_state_name[dhcp_session[msg$xid]$state]);
}
}
else
{
local d_state = DHCP_REBOOTING;
if ( c$id$resp_h != BROADCAST_ADDR )
d_state = DHCP_RENEWING;
else if ( msg$ciaddr != NULL_ADDR )
d_state = DHCP_REBINDING;
else if ( serv_addr != NULL_ADDR )
d_state = DHCP_REQUESTING;
local session = new_dhcp_session(msg$xid, d_state, msg$h_addr);
if ( session$state == DHCP_REBOOTING )
recent_client = req_addr;
else
recent_client = c$id$orig_h;
session_xid[recent_client] = msg$xid;
if ( logging )
{
log_info = dhcp_message(c, session$seq, T);
if ( req_addr != NULL_ADDR )
log_info = fmt("%s REQUEST %As",
log_info, req_addr);
else
log_info = fmt("%s REQUEST", log_info);
print dhcp_log, fmt("%s (xid = %x, client state = %s)",
log_info, msg$xid,
dhcp_state_name[session$state]);
}
}
}
event dhcp_decline(c: connection, msg: dhcp_msg)
{
local old_session = msg$xid in dhcp_session;
local err_state = F;
if ( old_session )
{
if ( dhcp_session[msg$xid]$state == DHCP_REQUESTING )
dhcp_session[msg$xid]$state = DHCP_INIT;
else
err_state = T;
}
else
new_dhcp_session(msg$xid, DHCP_INIT, "");
if ( ! logging )
return;
if ( old_session )
{
if ( err_state )
print dhcp_log, fmt("%s DECLINE (in error state %s)",
dhcp_message(c, dhcp_session[msg$xid]$seq, T),
dhcp_state_name[dhcp_session[msg$xid]$state]);
else
print dhcp_log, fmt("%s DECLINE (duplicated)",
dhcp_message(c, dhcp_session[msg$xid]$seq, F));
}
else
print dhcp_log, fmt("%s DECLINE (xid = %x)",
dhcp_message(c, ++session_cnt, T), msg$xid);
}
event dhcp_ack(c: connection, msg: dhcp_msg, mask: addr,
router: dhcp_router_list, lease: interval, serv_addr: addr)
{
local log_info: string;
if ( msg$xid == 0 )
{ # An ACK for a DHCPINFORM message with xid = 0.
local xid =
c$id$orig_h in session_xid ?
# An ACK to the client.
session_xid[c$id$orig_h]
:
# Assume ACK from a relay agent to the server.
session_xid[recent_client];
local seq: count;
if ( xid > 0 )
{
duplicated_ack = dhcp_session[xid]$state != DHCP_INFORM;
dhcp_session[xid]$state = DHCP_BOUND;
seq = dhcp_session[xid]$seq;
}
else
{
# This is a weird situation. We arbitrarily set
# duplicated_ack to false to have more information
# shown.
duplicated_ack = F;
seq = session_cnt;
}
if ( ! logging )
return;
log_info = dhcp_message(c, seq, F);
if ( c$id$orig_h in session_xid )
{
if ( duplicated_ack )
print dhcp_log, fmt("%s ACK (duplicated)",
log_info);
else
print dhcp_log,
fmt("%s ACK (client state = %s)",
log_info,
dhcp_state_name[DHCP_BOUND]);
}
else
print dhcp_log,
fmt("%s ACK (relay agent at = %As)",
log_info, c$id$orig_h);
return;
}
if ( msg$xid in dhcp_session )
{
local last_state = dhcp_session[msg$xid]$state;
local from_reboot_state = last_state == DHCP_REBOOTING;
if ( last_state == DHCP_REQUESTING ||
last_state == DHCP_REBOOTING ||
last_state == DHCP_RENEWING ||
last_state == DHCP_REBINDING ||
last_state == DHCP_INFORM )
{
dhcp_session[msg$xid]$state = DHCP_BOUND;
dhcp_session[msg$xid]$lease = lease;
}
if ( ! logging )
return;
if ( last_state == DHCP_BOUND )
{
log_info = dhcp_message(c, dhcp_session[msg$xid]$seq, F);
if ( c$id$orig_h == ack_from )
log_info = fmt("%s ACK (duplicated)",
log_info);
else
# Not a duplicated ACK.
log_info = fmt("%s ACK (relay agent at = %As)",
log_info, c$id$orig_h);
}
else
{
ack_from = c$id$orig_h;
# If in a reboot state, we had better
# explicitly show the original address
# and the destination address of ACK,
# because the client initally has a
# zero address.
if ( from_reboot_state )
log_info = dhcp_message(c, dhcp_session[msg$xid]$seq, T);
else
log_info = dhcp_message(c, dhcp_session[msg$xid]$seq, F);
if ( last_state != DHCP_INFORM &&
lease > 0 sec )
log_info = fmt("%s ACK (lease time = %s, ",
log_info, lease);
else
log_info = fmt("%s ACK (", log_info);
log_info = fmt("%sclient state = %s)",
log_info,
dhcp_state_name[dhcp_session[msg$xid]$state]);
}
print dhcp_log, log_info;
}
else if ( logging )
print dhcp_log, fmt("%s ACK (standalone)",
dhcp_message(c, ++session_cnt, T));
}
event dhcp_nak(c: connection, msg: dhcp_msg)
{
if ( msg$xid in dhcp_session )
{
local last_state = dhcp_session[msg$xid]$state;
if ( last_state == DHCP_REQUESTING ||
last_state == DHCP_REBOOTING ||
last_state == DHCP_RENEWING ||
last_state == DHCP_REBINDING )
dhcp_session[msg$xid]$state = DHCP_INIT;
if ( logging )
print dhcp_log, fmt("%s NAK (client state = %s)",
dhcp_message(c, dhcp_session[msg$xid]$seq, F),
dhcp_state_name[dhcp_session[msg$xid]$state]);
}
else if ( logging )
print dhcp_log, fmt("%s NAK (standalone)",
dhcp_message(c, ++session_cnt, T));
}
event dhcp_release(c: connection, msg: dhcp_msg)
{
local old_session = msg$xid in dhcp_session;
if ( ! old_session )
# We assume the client goes back to DHCP_INIT
# because the RFC does not specify which state to go to.
new_dhcp_session(msg$xid, DHCP_INIT, "");
if ( ! logging )
return;
if ( old_session )
{
if ( dhcp_session[msg$xid]$state == DHCP_INIT )
print dhcp_log, fmt("%s RELEASE (duplicated)",
dhcp_message(c, dhcp_session[msg$xid]$seq, F));
else
print dhcp_log, fmt("%s RELEASE, (client state = %s)",
dhcp_message(c, dhcp_session[msg$xid]$seq, F),
dhcp_state_name[dhcp_session[msg$xid]$state]);
}
else
print dhcp_log, fmt("%s RELEASE (xid = %x, IP addr = %As)",
dhcp_message(c, session_cnt, T), msg$xid, c$id$orig_h);
}
event dhcp_inform(c: connection, msg: dhcp_msg)
{
recent_client = c$id$orig_h;
if ( msg$xid == 0 )
{
# Oops! Try to associate message with transaction ID 0 with
# a previous session.
local xid: count;
local seq: count;
if ( c$id$orig_h in session_xid )
{
xid = session_xid[c$id$orig_h];
dhcp_session[xid]$state = DHCP_INFORM;
seq = dhcp_session[xid]$seq;
}
else
{
# Weird: xid = 0 and no previous INFORM-ACK dialog.
xid = 0;
seq = ++session_cnt;
# Just record that a INFORM message has appeared,
# although the xid is not useful.
session_xid[c$id$orig_h] = 0;
}
if ( logging )
print dhcp_log,
fmt("%s INFORM (xid = %x, client state = %s)",
dhcp_message(c, seq, T),
xid, dhcp_state_name[DHCP_INFORM]);
return;
}
if ( msg$xid in dhcp_session )
{
if ( logging )
if ( dhcp_session[msg$xid]$state == DHCP_INFORM )
print dhcp_log, fmt("%s INFORM (duplicated)",
dhcp_message(c, dhcp_session[msg$xid]$seq, F));
else {
print dhcp_log,
fmt("%s INFORM (duplicated, client state = %s)",
dhcp_message(c, dhcp_session[msg$xid]$seq, F),
dhcp_state_name[dhcp_session[msg$xid]$state]);
}
return;
}
local session = new_dhcp_session(msg$xid, DHCP_INFORM, msg$h_addr);
# Associate this transaction ID with the host so we can identify
# subsequent pairs of INFORM/ACK if client uses xid=0.
session_xid[c$id$orig_h] = msg$xid;
if ( logging )
print dhcp_log, fmt("%s INFORM (xid = %x, client state = %s)",
dhcp_message(c, session$seq, T),
msg$xid, dhcp_state_name[session$state]);
}

View file

@ -1,81 +0,0 @@
# $Id: dns-info.bro 3919 2007-01-14 00:27:09Z vern $
# Types, errors, and fields for analyzing DNS data. A helper file
# for dns.bro.
const PTR = 12;
const EDNS = 41;
const ANY = 255;
const query_types = {
[1] = "A", [2] = "NS", [3] = "MD", [4] = "MF",
[5] = "CNAME", [6] = "SOA", [7] = "MB", [8] = "MG",
[9] = "MR", [10] = "NULL", [11] = "WKS", [PTR] = "PTR",
[13] = "HINFO", [14] = "MINFO", [15] = "MX", [16] = "TXT",
[17] = "RP", [18] = "AFSDB", [19] = "X25", [20] = "ISDN",
[21] = "RT", [22] = "NSAP", [23] = "NSAP-PTR", [24] = "SIG",
[25] = "KEY", [26] = "PX" , [27] = "GPOS", [28] = "AAAA",
[29] = "LOC", [30] = "EID", [31] = "NIMLOC", [32] = "NB",
[33] = "SRV", [34] = "ATMA", [35] = "NAPTR", [36] = "KX",
[37] = "CERT", [38] = "A6", [39] = "DNAME", [40] = "SINK",
[EDNS] = "EDNS", [42] = "APL", [43] = "DS", [44] = "SINK",
[45] = "SSHFP", [46] = "RRSIG", [47] = "NSEC", [48] = "DNSKEY",
[49] = "DHCID", [99] = "SPF", [100] = "DINFO", [101] = "UID",
[102] = "GID", [103] = "UNSPEC", [249] = "TKEY", [250] = "TSIG",
[251] = "IXFR", [252] = "AXFR", [253] = "MAILB", [254] = "MAILA",
[32768] = "TA", [32769] = "DLV",
[ANY] = "*",
} &default = function(n: count): string { return fmt("query-%d", n); };
const DNS_code_types = {
[0] = "X0",
[1] = "Xfmt",
[2] = "Xsrv",
[3] = "Xnam",
[4] = "Ximp",
[5] = "X[",
} &default = function(n: count): string { return "?"; };
# Used for non-TSIG/EDNS types.
const base_error = {
[0] = "NOERROR", # No Error
[1] = "FORMERR", # Format Error
[2] = "SERVFAIL", # Server Failure
[3] = "NXDOMAIN", # Non-Existent Domain
[4] = "NOTIMP", # Not Implemented
[5] = "REFUSED", # Query Refused
[6] = "YXDOMAIN", # Name Exists when it should not
[7] = "YXRRSET", # RR Set Exists when it should not
[8] = "NXRRSet", # RR Set that should exist does not
[9] = "NOTAUTH", # Server Not Authoritative for zone
[10] = "NOTZONE", # Name not contained in zone
[11] = "unassigned-11", # available for assignment
[12] = "unassigned-12", # available for assignment
[13] = "unassigned-13", # available for assignment
[14] = "unassigned-14", # available for assignment
[15] = "unassigned-15", # available for assignment
[16] = "BADVERS", # for EDNS, collision w/ TSIG
[17] = "BADKEY", # Key not recognized
[18] = "BADTIME", # Signature out of time window
[19] = "BADMODE", # Bad TKEY Mode
[20] = "BADNAME", # Duplicate key name
[21] = "BADALG", # Algorithm not supported
[22] = "BADTRUNC", # draft-ietf-dnsext-tsig-sha-05.txt
[3842] = "BADSIG", # 16 <= number collision with EDNS(16);
# this is a translation from TSIG(16)
} &default = function(n: count): string { return "?"; };
# This deciphers EDNS Z field values.
const edns_zfield = {
[0] = "NOVALUE", # regular entry
[32768] = "DNS_SEC_OK", # accepts DNS Sec RRs
} &default = function(n: count): string { return "?"; };
const dns_class = {
[1] = "C_INTERNET",
[2] = "C_CSNET",
[3] = "C_CHAOS",
[4] = "C_HESOD",
[254] = "C_NONE",
[255] = "C_ANY",
} &default = function(n: count): string { return "?"; };

View file

@ -1,65 +0,0 @@
# $Id: dns-lookup.bro 340 2004-09-09 06:38:27Z vern $
@load notice
redef enum Notice += {
DNS_MappingChanged, # some sort of change WRT previous Bro lookup
};
const dns_interesting_changes = {
"unverified", "old name", "new name", "mapping",
} &redef;
function dump_dns_mapping(msg: string, dm: dns_mapping): bool
{
if ( msg in dns_interesting_changes ||
127.0.0.1 in dm$addrs )
{
local req = dm$req_host == "" ?
fmt("%As", dm$req_addr) : dm$req_host;
NOTICE([$note=DNS_MappingChanged,
$msg=fmt("DNS %s: %s/%s %s-> %As", msg, req,
dm$hostname, dm$valid ?
"" : "(invalid) ", dm$addrs),
$sub=msg]);
return T;
}
else
return F;
}
event dns_mapping_valid(dm: dns_mapping)
{
dump_dns_mapping("valid", dm);
}
event dns_mapping_unverified(dm: dns_mapping)
{
dump_dns_mapping("unverified", dm);
}
event dns_mapping_new_name(dm: dns_mapping)
{
dump_dns_mapping("new name", dm);
}
event dns_mapping_lost_name(dm: dns_mapping)
{
dump_dns_mapping("lost name", dm);
}
event dns_mapping_name_changed(old_dm: dns_mapping, new_dm: dns_mapping)
{
if ( dump_dns_mapping("old name", old_dm) )
dump_dns_mapping("new name", new_dm);
}
event dns_mapping_altered(dm: dns_mapping,
old_addrs: set[addr], new_addrs: set[addr])
{
if ( dump_dns_mapping("mapping", dm) )
NOTICE([$note=DNS_MappingChanged,
$msg=fmt("changed addresses: %As -> %As", old_addrs, new_addrs),
$sub="changed addresses"]);
}

View file

@ -1,675 +0,0 @@
# $Id: dns.bro 6724 2009-06-07 09:23:03Z vern $
@load notice
@load weird
@load udp-common
@load dns-info
module DNS;
export {
# Lookups of hosts in here are flagged ...
const sensitive_lookup_hosts: set[addr] &redef;
# ... unless the lookup comes from one of these hosts.
const okay_to_lookup_sensitive_hosts: set[addr] &redef;
# Start considering whether we're seeing PTR scanning if we've seen
# at least this many rejected PTR queries.
const report_rejected_PTR_thresh = 100 &redef;
# Generate a PTR_scan event if at any point (once we're above
# report_rejected_PTR_thresh) we see this many more distinct
# rejected PTR requests than distinct answered PTR requests.
const report_rejected_PTR_factor = 2.0 &redef;
# The following sources are allowed to do PTR scanning.
const allow_PTR_scans: set[addr] &redef;
# Annotations that if returned for a PTR lookup actually indicate
# a rejected query; for example, "illegal-address.lbl.gov".
const actually_rejected_PTR_anno: set[string] &redef;
# Hosts allowed to do zone transfers.
const zone_transfers_okay: set[addr] &redef;
# Set to false to disable printing to dns.log.
const logging = T &redef;
redef enum Notice += {
SensitiveDNS_Lookup, # DNS lookup of sensitive hostname/addr
DNS_PTR_Scan, # A set of PTR lookups
DNS_PTR_Scan_Summary, # Summary of a set of PTR lookups
ResolverInconsistency, # DNS answer changed
ZoneTransfer, # a DNS zone transfer request was seen
};
# This is a list of domains that have a history of providing
# more RR's in response than they are supposed to. There is
# some danger here in that record inconsistancies will not be
# identified for these domains...
const bad_domain_resp: set[string] &redef;
# Same idea, except that it applies to a list of host names.
const bad_host_resp: set[string] &redef;
# Turn resolver consistancy checking on/off.
const resolver_consist_check = F &redef;
# Should queries be checked against 'bad' domains?
const check_domain_list = T;
# List of 'bad' domains.
const hostile_domain_list: set[string] &redef;
# Used for PTR scan detection. Exported so their timeouts can be
# adjusted.
global distinct_PTR_requests:
table[addr, string] of count &default = 0 &write_expire = 5 min;
global distinct_rejected_PTR_requests:
table[addr] of count &default = 0 &write_expire = 5 min;
global distinct_answered_PTR_requests:
table[addr] of count &default = 0 &write_expire = 5 min;
}
redef capture_filters += {
["dns"] = "port 53",
["netbios-ns"] = "udp port 137",
};
# DPM configuration.
global dns_ports = { 53/udp, 53/tcp, 137/udp } &redef;
redef dpd_config += { [ANALYZER_DNS] = [$ports = dns_ports] };
global dns_udp_ports = { 53/udp, 137/udp } &redef;
global dns_tcp_ports = { 53/tcp } &redef;
redef dpd_config += { [ANALYZER_DNS_UDP_BINPAC] = [$ports = dns_udp_ports] };
redef dpd_config += { [ANALYZER_DNS_TCP_BINPAC] = [$ports = dns_tcp_ports] };
# Default handling for peculiarities in DNS analysis. You can redef these
# again in your site-specific script if you want different behavior.
redef Weird::weird_action += {
["DNS_AAAA_neg_length"] = Weird::WEIRD_FILE,
["DNS_Conn_count_too_large"] = Weird::WEIRD_FILE,
["DNS_NAME_too_long"] = Weird::WEIRD_FILE,
["DNS_RR_bad_length"] = Weird::WEIRD_FILE,
["DNS_RR_length_mismatch"] = Weird::WEIRD_FILE,
["DNS_RR_unknown_type"] = Weird::WEIRD_FILE,
["DNS_label_forward_compress_offset"] = Weird::WEIRD_FILE,
["DNS_label_len_gt_name_len"] = Weird::WEIRD_FILE,
["DNS_label_len_gt_pkt"] = Weird::WEIRD_FILE,
["DNS_label_too_long"] = Weird::WEIRD_FILE,
["DNS_name_too_long"] = Weird::WEIRD_FILE,
["DNS_truncated_RR_rdlength_lt_len"] = Weird::WEIRD_FILE,
["DNS_truncated_ans_too_short"] = Weird::WEIRD_FILE,
["DNS_truncated_len_lt_hdr_len"] = Weird::WEIRD_FILE,
["DNS_truncated_quest_too_short"] = Weird::WEIRD_FILE,
};
type dns_session_info: record {
id: count;
is_zone_transfer: bool;
last_active: time; # when we last saw activity
# Indexed by query id, returns string annotation corresponding to
# queries for which no answer seen yet.
pending_queries: table[count] of string;
};
# Indexed by client and server.
global dns_sessions: table[addr, addr, count] of dns_session_info;
global num_dns_sessions = 0;
const PTR_pattern = /[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+\.in-addr\.arpa/;
# Keeps track of for which addresses we processed a PTR_scan event.
global did_PTR_scan_event: table[addr] of count &default = 0;
# The following definitions relate to tracking when DNS records
# change and whether they do so in a consistent fashion.
type dns_response_record: record {
dns_name: string; # domain name in question
dns_type: count; # type of query
num_resp: count; # number of responses
resp_count: count; # how many responses have been registered
addrs: set[addr]; # addresses in response
};
global dns_history: table[string, count, count] of dns_response_record;
global did_zone_transfer_notice: table[addr] of count &default = 0;
# Sample known irregular domains.
redef bad_domain_resp += { "instacontent.net", "mirror-image.net", };
# Sample hostile domains.
redef hostile_domain_list += { "undernet.org", "afraid.org", };
global dns_log : file;
event bro_init()
{
if ( logging )
dns_log = open_log_file("dns");
}
event remove_name(name: string, qtype: count, id: count)
{
if ( [name, qtype, id] in dns_history )
{
# We need to remove the dns_history record and the assosciated
# dns_consistency_info records.
local drr = dns_history[name, qtype, id];
local a: addr;
for ( a in drr$addrs )
delete drr$addrs[a];
delete dns_history[name, qtype, id];
}
else if ( logging )
print dns_log, fmt("ERROR in history session removal: %s/%d doesn't exist", name, qtype);
}
# Returns the second-level domain, so for example an argument of "a.b.c.d"
# returns "c.d".
function second_level_domain(name: string): string
{
local split_on_dots = split(name, /\./);
local num_dots = length(split_on_dots);
if ( num_dots <= 1 )
return name;
return fmt("%s.%s", split_on_dots[num_dots-1], split_on_dots[num_dots]);
}
function insert_name(c: connection, msg: dns_msg, ans: dns_answer, a: addr)
{
local drr: dns_response_record;
if ( [ans$query, ans$qtype, msg$id] !in dns_history )
{ # add record
drr$dns_name = ans$query;
drr$dns_type = ans$qtype;
# Here we modified the expected number of addresses to allow
# for the number of answer RR's along with the provided
# additional RR's.
drr$num_resp = msg$num_answers+msg$num_addl;
drr$resp_count = 0;
add drr$addrs[a];
dns_history[ans$query, ans$qtype, msg$id] = drr;
if ( ans$TTL < 0 sec )
# Strangely enough, the spec allows this,
# though it's hard to see why! But because
# of that, we don't generate a Weird, we
# just change the TTL to 0.
ans$TTL = 0 sec;
# Check the TTL, but allow a smidgen of skew to avoid
# possible race conditions.
schedule ans$TTL + 1 sec
{ remove_name(ans$query, ans$qtype, msg$id) };
}
else
{ # extract record and do some counting
drr = dns_history[ans$query, ans$qtype, msg$id];
# In some broken records, the number of reported records is 0.
# This makes the test below fail, to 'fix' set to 1 ...
if ( drr$num_resp == 0 )
drr$num_resp = 1;
# Check if we have filled in the expected number of responses
# already - it should be > current responder count to allow
# for resolver timeouts. Addresses are only added if they
# are not already prsent. This comes at a slight performance
# cost.
if ( a !in drr$addrs )
{
add drr$addrs[a];
++drr$resp_count;
dns_history[ans$query, ans$qtype, msg$id]=drr;
}
if ( drr$num_resp >= drr$resp_count )
return;
if ( second_level_domain(ans$query) in bad_domain_resp )
return;
if ( ans$query in bad_host_resp )
return;
# Too many responses to the request, or some other
# inconsistency has been introduced.
NOTICE([$note=ResolverInconsistency, $conn=c,
$msg=fmt("address inconsistency for %s, %s", ans$query, a),
$dst=a]);
}
}
event expire_DNS_session(orig: addr, resp: addr, trans_id: count)
{
if ( [orig, resp, trans_id] in dns_sessions )
{
local session = dns_sessions[orig, resp, trans_id];
local last_active = session$last_active;
if ( network_time() > last_active + dns_session_timeout ||
done_with_network )
{
# Flush out any pending requests.
if ( logging )
{
for ( query in session$pending_queries )
print dns_log, fmt("%0.6f #%d %s",
network_time(), session$id,
session$pending_queries[query]);
print dns_log, fmt("%.06f #%d finish",
network_time(), session$id);
}
delete dns_sessions[orig, resp, trans_id];
}
else
schedule dns_session_timeout {
expire_DNS_session(orig, resp, trans_id)
};
}
}
function lookup_DNS_session(c: connection, trans_id: count): dns_session_info
{
local id = c$id;
local orig = id$orig_h;
local resp = id$resp_h;
if ( [orig, resp, trans_id] !in dns_sessions )
{
local session: dns_session_info;
session$id = ++num_dns_sessions;
session$last_active = network_time();
session$is_zone_transfer = F;
if ( logging )
print dns_log, fmt("%.06f #%d %s start",
c$start_time, session$id, id_string(id));
dns_sessions[orig, resp, trans_id] = session;
schedule 15 sec { expire_DNS_session(orig, resp, trans_id) };
append_addl(c, fmt("#%d", session$id));
return session;
}
else
return dns_sessions[orig, resp, trans_id];
}
event sensitive_addr_lookup(c: connection, a: addr, is_query: bool)
{
local orig = c$id$orig_h;
local resp = c$id$resp_h;
local holding = 0;
if ( orig in okay_to_lookup_sensitive_hosts )
return;
local session_id: string;
if ( [orig, resp, holding] in dns_sessions )
session_id = fmt("#%d", dns_sessions[orig, resp, holding]$id);
else
session_id = "#?";
local id = fmt("%s > %s (%s)", orig, resp, session_id);
if ( is_query )
NOTICE([$note=SensitiveDNS_Lookup, $conn=c,
$msg=fmt("%s PTR lookup of %s", id, a),
$sub="PTR lookup"]);
else
NOTICE([$note=SensitiveDNS_Lookup, $conn=c,
$msg=fmt("%s name lookup of %s", id, a),
$sub="name lookup"]);
}
function DNS_query_annotation(c: connection, msg: dns_msg, query: string,
qtype: count, is_zone_xfer: bool): string
{
local anno: string;
if ( (qtype == PTR || qtype == ANY) && query == PTR_pattern )
{
# convert PTR text to more readable form.
local a = ptr_name_to_addr(query);
if ( a in sensitive_lookup_hosts && ! is_zone_xfer )
event sensitive_addr_lookup(c, a, T);
anno = fmt("?%s %As", query_types[qtype], a);
}
else
anno = fmt("%s %s", query_types[qtype], query);
if ( ! is_zone_xfer &&
(msg$num_answers > 0 || msg$num_auth > 0 || msg$num_addl > 0) )
anno = fmt("%s <query addl = %d/%d/%d>", anno,
msg$num_answers, msg$num_auth, msg$num_addl);
return anno;
}
event dns_zone_transfer_request(c: connection, session: dns_session_info,
msg: dns_msg, query: string)
{
session$is_zone_transfer = T;
if ( ! is_tcp_port(c$id$orig_p) )
event conn_weird("UDP_zone_transfer", c);
local src = c$id$orig_h;
if ( src !in zone_transfers_okay &&
++did_zone_transfer_notice[src] == 1 )
{
NOTICE([$note=ZoneTransfer, $src=src, $conn=c,
$msg=fmt("transfer of %s requested by %s", query, src)]);
}
}
event dns_request(c: connection, msg: dns_msg, query: string, qtype: count, qclass: count)
{
local id = c$id;
local orig = id$orig_h;
local resp = id$resp_h;
local session = lookup_DNS_session(c, msg$id);
local anno = DNS_query_annotation(c, msg, query, qtype, F);
local report = fmt("%.06f #%d %s", network_time(), session$id, c$id$orig_h);
local q: string;
if ( query_types[qtype] == "AXFR" )
{
event dns_zone_transfer_request(c, session, msg, query);
q = DNS_query_annotation(c, msg, query, qtype, T);
report = fmt("%s ?%s", report, q);
}
else
report = fmt("%s <query ?%s> %s Trunc:%s Recurs:%s",
report, query_types[qtype], query, msg$TC, msg$RD);
if ( logging )
print dns_log, fmt("%s", report);
# Check to see if this is a host or MX lookup for a designated
# hostile domain.
if ( check_domain_list &&
(query_types[qtype] == "A" || query_types[qtype] == "MX") &&
second_level_domain(query) in hostile_domain_list )
{
NOTICE([$note=SensitiveDNS_Lookup, $conn=c,
$msg=fmt("%s suspicious domain lookup: %s", id, query)]);
}
session$pending_queries[msg$id] = anno;
session$last_active = network_time();
}
event dns_rejected(c: connection, msg: dns_msg,
query: string, qtype: count, qclass: count)
{
local session = lookup_DNS_session(c, msg$id);
local code = DNS_code_types[msg$rcode];
local id = msg$id;
if ( id in session$pending_queries )
{
if ( logging )
print dns_log, fmt("%.06f #%d %s %s", network_time(),
session$id,
session$pending_queries[id],
code);
delete session$pending_queries[id];
}
else if ( logging )
{
if ( c$start_time == network_time() )
print dns_log, fmt("%.06f #%d [?%s] %s", network_time(),
session$id, query, code);
else
print dns_log, fmt("%.06f #%d %s", network_time(),
session$id, code);
}
}
event PTR_scan_summary(src: addr)
{
NOTICE([$note=DNS_PTR_Scan_Summary, $src=src,
$msg=fmt("%s totaled %d/%d un/successful PTR lookups", src,
distinct_rejected_PTR_requests[src],
distinct_answered_PTR_requests[src]),
$sub="final summary"]);
}
event PTR_scan(src: addr)
{
++did_PTR_scan_event[src];
if ( src !in allow_PTR_scans && src !in okay_to_lookup_sensitive_hosts )
{
NOTICE([$note=DNS_PTR_Scan, $src=src,
$msg=fmt("%s has made %d/%d un/successful PTR lookups",
src, distinct_rejected_PTR_requests[src],
distinct_answered_PTR_requests[src]),
$sub="scan detected"]);
schedule 1 day { PTR_scan_summary(src) };
}
}
function check_PTR_scan(src: addr)
{
if ( src !in did_PTR_scan_event &&
distinct_rejected_PTR_requests[src] >=
distinct_answered_PTR_requests[src] * report_rejected_PTR_factor )
event PTR_scan(src);
}
function DNS_answer(c: connection, msg: dns_msg,
ans: dns_answer, annotation: string)
{
local is_answer = ans$answer_type == DNS_ANS;
local session = lookup_DNS_session(c, msg$id);
local report =
fmt("%.06f #%d %s", network_time(), session$id, c$id$orig_h);
local id = msg$id;
local query: string;
if ( id in session$pending_queries )
{
query = fmt("%s = <ans %s>", session$pending_queries[id],
query_types[ans$qtype]);
delete session$pending_queries[id];
report = fmt("%s %s", report, query);
}
else if ( session$is_zone_transfer )
{ # need to provide the query directly.
query = fmt("<ans %s>", query_types[ans$qtype]);
report = fmt("%s ?%s", report, query);
}
else
{
# No corresponding query. This can happen if it's
# already been deleted because we've already processed
# an answer to it; or if the session itself was timed
# out prior to this answer being generated. In the
# first case, we don't want to provide the query again;
# in the second, we do. We can determine that we're
# likely in the second case if either (1) this session
# was just now created, or (2) we're now processing the
# sole answer to the original query.
#
# However, for now we punt.
#
# if ( c$start_time == network_time() ||
# (is_answer && msg$num_answers == 1) )
# {
# query = DNS_query_annotation(c, msg, ans$query, ans$qtype, F);
# report = fmt("%s [?%s]", report, query);
# }
# else
# query = "";
query = fmt("<ans %s>", query_types[ans$qtype]);
report = fmt("%s %s", report, query);
}
# Append a bunch of additional annotation.
report = fmt("%s %s RCode:%s AA=%s TR=%s %s/%s/%s/%s",
report, annotation, base_error[msg$rcode], msg$AA, msg$TC,
msg$num_queries, msg$num_answers, msg$num_auth, msg$num_addl );
local src = c$id$orig_h;
if ( msg$rcode != 0 )
{
if ( /\?(PTR|\*.*in-addr).*/ in query )
##### should check for private address
{
if ( ++distinct_PTR_requests[src, query] == 1 &&
++distinct_rejected_PTR_requests[src] >=
report_rejected_PTR_thresh )
check_PTR_scan(src);
}
report = fmt("%s %s", report, DNS_code_types[msg$rcode]);
}
else if ( is_answer )
{
if ( /\?(PTR|\*.*in-addr).*/ in query )
{
if ( annotation in actually_rejected_PTR_anno )
{
if ( ++distinct_PTR_requests[src, query] == 1 &&
++distinct_rejected_PTR_requests[src] >=
report_rejected_PTR_thresh )
check_PTR_scan(src);
}
else
{
if ( ++distinct_PTR_requests[src, query] == 1 )
++distinct_answered_PTR_requests[src];
}
}
}
if ( logging )
print dns_log, fmt("%s TTL=%g", report, ans$TTL);
### Note, DNS_AUTH and DNS_ADDL not processed.
session$last_active = network_time();
}
event dns_A_reply(c: connection, msg: dns_msg, ans: dns_answer, a: addr)
{
if ( a in sensitive_lookup_hosts )
event sensitive_addr_lookup(c, a, F);
DNS_answer(c, msg, ans, fmt("%As", a));
if ( resolver_consist_check )
insert_name(c, msg, ans, a );
}
event dns_NS_reply(c: connection, msg: dns_msg, ans: dns_answer, name: string)
{
DNS_answer(c, msg, ans, fmt("%s", name));
}
event dns_CNAME_reply(c: connection, msg: dns_msg, ans: dns_answer, name: string)
{
DNS_answer(c, msg, ans, fmt("%s %s", query_types[ans$qtype], name));
}
event dns_PTR_reply(c: connection, msg: dns_msg, ans: dns_answer, name: string)
{
DNS_answer(c, msg, ans, fmt("%s", name));
}
event dns_SOA_reply(c: connection, msg: dns_msg, ans: dns_answer, soa: dns_soa)
{
DNS_answer(c, msg, ans, fmt("%s", soa$mname));
}
event dns_MX_reply(c: connection, msg: dns_msg, ans: dns_answer, name: string,
preference: count)
{
DNS_answer(c, msg, ans, fmt("%s/%d", name, preference));
}
event dns_EDNS(c: connection, msg: dns_msg, ans: dns_answer)
{
DNS_answer(c, msg, ans, "<---?--->");
}
# From here on down we need to modify the way that data is recorded. The
# standard resource record format is no longer universally applicable in
# that we may see modified structs or some number of value pairs that may take
# more flexability in reporting.
event dns_EDNS_addl(c: connection, msg: dns_msg, ans: dns_edns_additional)
{
local session = lookup_DNS_session(c, msg$id);
local report =
fmt("%.06f #%d %s", network_time(), session$id, c$id$orig_h);
if ( ans$is_query == 1 )
report = fmt("%s <addl_edns ?>", report);
else
report = fmt("%s <addl_edns> ", report);
if ( logging )
print dns_log, fmt("%s pldsize:%s RCode:%s VER:%s Z:%s",
report, ans$payload_size,
base_error[ans$extended_rcode],
ans$version, edns_zfield[ans$z_field]);
}
event dns_TSIG_addl(c: connection, msg: dns_msg, ans: dns_tsig_additional)
{
local session = lookup_DNS_session(c, msg$id);
local report =
fmt("%.06f #%d %s", network_time(), session$id, c$id$orig_h);
# Error handling with this is a little odd: number collision with EDNS.
# We set the collided value to the first private space number. gross.
local trans_error_num = (ans$rr_error == 16) ? 3842 : ans$rr_error;
if ( ans$is_query == 1 )
report = fmt("%s <addl_tsig ?> ", report);
else
report = fmt("%s <addl_tsig> ", report);
if ( logging )
print dns_log, fmt("%s name:%s alg:%s origID:%s RCode:%s",
report, ans$query, ans$alg_name,
ans$orig_id, base_error[trans_error_num]);
}

View file

@ -1,74 +0,0 @@
# $Id: drop-adapt.bro 6940 2009-11-14 00:38:53Z robin $
#
# Adjust load level based on packet drops.
#
@load load-level
# Increase load-level if packet drops are successively 'count' times
# above 'threshold' percent.
const drop_increase_count = 5 &redef;
const drop_increase_threshold = 5.0 &redef;
# Same for decreasing load-level.
const drop_decrease_count = 15 &redef;
const drop_decrease_threshold = 0.0 &redef;
# Minimum time to wait after a load-level increase before new decrease.
const drop_decrease_wait = 20 mins &redef;
global drop_last_stat: net_stats;
global drop_have_stats = F;
global drop_above = 0;
global drop_below = 0;
global drop_last_increase: time = 0;
event net_stats_update(t: time, ns: net_stats)
{
if ( drop_have_stats )
{
local new_recvd = ns$pkts_recvd - drop_last_stat$pkts_recvd;
local new_dropped =
ns$pkts_dropped - drop_last_stat$pkts_dropped;
local p = new_dropped * 100.0 / new_recvd;
drop_last_stat = ns;
if ( p >= 0 )
{
if ( p >= drop_increase_threshold )
{
if ( ++drop_above >= drop_increase_count )
{
increase_load_level();
drop_above = 0;
drop_last_increase = t;
}
}
else
drop_above = 0;
if ( t - drop_last_increase < drop_decrease_wait )
return;
if ( p <= drop_decrease_threshold )
{
if ( ++drop_below >= drop_decrease_count )
{
decrease_load_level();
drop_below = 0;
}
}
else
drop_below = 0;
}
}
else
{
drop_have_stats = T;
drop_last_stat = ns;
}
}

View file

@ -1,340 +0,0 @@
# $Id:$
#
# drop.bro implements a drop/restore policy termed "catch-and-release"
# whereby the first time an address is dropped, it is restored a while after
# the last connection attempt seen. If a connection attempt is subsequently
# seen, however, then the system is blocked again, and for a longer time.
#
# This policy has significant benefits when using Bro to update router
# ACLs for which:
# - The router has a limited number of ACLs slots.
# - You care about possible reuse of IP addresses by now-benign hosts,
# so don't want blocks to last forever.
#
# Original code by Jim Mellander, LBNL.
# Updated by Brian Tierney, LBNL and by Robin Sommer, ICSI.
@load site
module Drop;
export {
redef enum Notice += {
# Connectivity with given address has been dropped.
AddressDropped,
# A request to drop connectivity has been ignored.
AddressDropIgnored,
# Connectivity with given address has been restored.
AddressRestored,
AddressAlreadyDropped, # host is already dropped
# Previously dropped host connects again.
AddressSeenAgain,
# Previous offenders re-dropped or re-restored.
RepeatAddressDropped,
RepeatAddressRestored,
};
# True if we have the capability to drop hosts at all.
const can_drop_connectivity = F &redef;
# True if we never want to drop local addresses.
const dont_drop_locals = T &redef;
# True if we should use the catch-and-release scheme. If not then
# we simply drop addresses via the drop_connectivity_script and
# never restore them (they must be restored out-of-band).
const use_catch_release = F &redef;
# Catch-and-release parameters.
# Interval to wait for release following inactivity after
# first offense.
global drop_time = 5 min &redef;
# For repeat offenders: if the total time a host has already been
# dropped reaches persistent_offender_time, we drop the host for
# long_drop_time. Setting persistent_offender_time to zero disables
# this functionality.
const persistent_offender_time = 2 hr &redef;
global long_drop_time = 12 hr &redef;
# Scripts to perform the actual dropping/restore. They get the
# IP address as their first argument.
const drop_connectivity_script = "drop-connectivity" &redef;
const restore_connectivity_script = "restore-connectivity" &redef;
const root_servers = {
a.root-servers.net, b.root-servers.net, c.root-servers.net,
d.root-servers.net, e.root-servers.net, f.root-servers.net,
g.root-servers.net, h.root-servers.net, i.root-servers.net,
j.root-servers.net, k.root-servers.net, l.root-servers.net,
m.root-servers.net,
} &redef;
const gtld_servers = {
a.gtld-servers.net, b.gtld-servers.net, c.gtld-servers.net,
d.gtld-servers.net, e.gtld-servers.net, f.gtld-servers.net,
g.gtld-servers.net, h.gtld-servers.net, i.gtld-servers.net,
j.gtld-servers.net, k.gtld-servers.net, l.gtld-servers.net,
m.gtld-servers.net,
} &redef;
const never_shut_down = {
root_servers, gtld_servers,
} &redef;
const never_drop_nets: set[subnet] &redef;
# Drop the connectivity for the address. "msg" gives a reason.
# It returns a copy of the NOTICE generated for the drop, which
# gives more information about the kind of dropping performed.
# If the notice type is NoticeNone, the drop was not successful
# (e.g., because this Bro instance is not configured to do drops.)
global drop_address: function(a: addr, msg: string) : notice_info;
# The following events are used to communicate information about the
# drops, in particular for C&R in the cluster setting.
# Address has been dropped.
global address_dropped: event(a: addr);
# Raised when an IP is restored.
global address_restored: event(a: addr);
# Raised when an that was dropped in the past is no
# longer monitored specifically for new connections.
global address_cleared: event(a: addr);
const debugging = F &redef;
global debug_log: function(msg: string);
}
type drop_rec: record {
tot_drop_count: count &default=0;
tot_restore_count: count &default=0;
actual_restore_count: count &default=0;
tot_drop_time: interval &default=0secs;
last_timeout: interval &default=0secs;
};
global clear_host: function(t: table[addr] of drop_rec, a: addr): interval;
global drop_info: table[addr] of drop_rec
&read_expire = 1 days &expire_func=clear_host &persistent;
global last_notice: notice_info;
function do_notice(n: notice_info)
{
last_notice = n;
NOTICE(n);
}
function dont_drop(a: addr) : bool
{
return ! can_drop_connectivity || a in never_shut_down ||
a in never_drop_nets || (dont_drop_locals && is_local_addr(a));
}
function is_dropped(a: addr) : bool
{
if ( a !in drop_info )
return F;
local di = drop_info[a];
if ( di$tot_drop_count < di$tot_restore_count )
{ # This shouldn't happen.
# FIXME: We need an assert().
print "run-time error: more restores than drops!";
return F;
}
return di$tot_drop_count > di$tot_restore_count;
}
global debug_log_file: file;
function debug_log(msg: string)
{
if ( ! debugging )
return;
print debug_log_file,
fmt("%.6f [%s] %s", network_time(), peer_description, msg);
}
event bro_init()
{
if ( debugging )
{
debug_log_file =
open_log_file(fmt("drop-debug.%s", peer_description));
set_buf(debug_log_file, F);
}
}
function do_direct_drop(a: addr, msg: string)
{
if ( msg != "" )
msg = fmt(" (%s)", msg);
if ( a !in drop_info )
{
local tmp: drop_rec;
drop_info[a] = tmp;
}
local di = drop_info[a];
if ( is_dropped(a) )
# Already dropped. Nothing to do.
do_notice([$note=Drop::AddressAlreadyDropped, $src=a,
$msg=fmt("%s%s", a, msg)]);
else
{
system(fmt("%s %s", Drop::drop_connectivity_script, a));
debug_log(fmt("sending drop for %s", a));
event Drop::address_dropped(a);
if ( di$tot_drop_count == 0 )
do_notice([$note=Drop::AddressDropped, $src=a,
$msg=fmt("%s%s", a, msg)]);
else
{
local s = fmt("(%d times)", di$tot_drop_count + 1);
do_notice([$note=Drop::RepeatAddressDropped,
$src=a, $n=di$tot_drop_count+1,
$msg=fmt("%s%s %s", a, msg, s), $sub=s]);
}
}
++di$tot_drop_count;
debug_log(fmt("dropped %s: tot_drop_count=%d tot_restore_count=%d",
a, di$tot_drop_count, di$tot_restore_count));
}
# Restore a previously dropped address.
global do_restore: function(a: addr, force: bool);
event restore_dropped_address(a: addr)
{
do_restore(a, F);
}
function do_catch_release_drop(a: addr, msg: string)
{
do_direct_drop(a, msg);
local di = drop_info[a];
local t = (persistent_offender_time != 0 sec &&
di$tot_drop_time >= persistent_offender_time) ?
long_drop_time : drop_time;
di$tot_drop_time += t;
di$last_timeout = t;
schedule t { restore_dropped_address(a) };
}
function do_restore(a: addr, force: bool)
{
if ( a !in drop_info )
return;
local di = drop_info[a];
++drop_info[a]$tot_restore_count;
debug_log(fmt("restored %s: tot_drop_count=%d tot_restore_count=%d force=%s", a, drop_info[a]$tot_drop_count, drop_info[a]$tot_restore_count, force));
if ( di$tot_drop_count == di$tot_restore_count || force )
{
++di$actual_restore_count;
system(fmt("%s %s", Drop::restore_connectivity_script, a));
debug_log(fmt("sending restored for %s", a));
event Drop::address_restored(a);
local t = di$last_timeout;
if ( di$actual_restore_count == 1 )
{
local s1 = fmt("(timeout %.1f)", t);
do_notice([$note=Drop::AddressRestored, $src=a,
$msg=fmt("%s %s", a, s1), $sub=s1]);
}
else
{
local s2 = fmt("(%d times, timeout %.1f)",
di$actual_restore_count, t);
do_notice([$note=Drop::RepeatAddressRestored, $src=a,
$n=di$tot_restore_count,
$msg=fmt("%s %s", a, s2), $sub=s2]);
}
}
}
function clear_host(t: table[addr] of drop_rec, a: addr): interval
{
if ( is_dropped(a) )
# Restore address.
do_restore(a, T);
debug_log(fmt("sending cleared for %s", a));
event Drop::address_cleared(a);
return 0 secs;
}
# Returns true if drop was successful (or IP was already dropped).
function drop_address(a: addr, msg: string) : notice_info
{
debug_log(fmt("drop_address(%s, %s)", a, msg));
last_notice = [$note=NoticeNone];
if ( dont_drop(a) )
do_notice([$note=AddressDropIgnored, $src=a,
$msg=fmt("ignoring request to drop %s (%s)", a, msg)]);
else if ( use_catch_release )
do_catch_release_drop(a, msg);
else
do_direct_drop(a, msg);
if ( last_notice$note == NoticeNone )
print "run-time error: drop_address did not raise a NOTICE";
return last_notice;
}
event new_connection(c: connection)
{
if ( ! can_drop_connectivity )
return;
# With Catch & Release, 1 connection from a previously dropped system
# triggers an immediate redrop.
if ( ! use_catch_release )
return;
local a = c$id$orig_h;
if ( a !in drop_info )
# Never dropped.
return;
local di = drop_info[a];
if ( is_dropped(a) )
# Still dropped.
return;
NOTICE([$note=AddressSeenAgain, $src=a,
$msg=fmt("%s seen again after release", a)]);
}

View file

@ -1,53 +0,0 @@
# $Id: dyn-disable.bro,v 1.1.4.3 2006/05/31 01:52:02 sommer Exp $
#
# When this script is loaded, analyzers that raise protocol_violation events
# are disabled for the affected connection.
# Note that this a first-shot solution. Eventually, we should make the
# disable-decision more fine-grained/sophisticated.
@load conn
@load notice
module DynDisable;
export {
redef enum Notice += {
ProtocolViolation
};
# Ignore violations which go this many bytes into the connection.
const max_volume = 10 * 1024 &redef;
}
global conns: table[conn_id] of set[count];
event protocol_violation(c: connection, atype: count, aid: count,
reason: string)
{
if ( c$id in conns && aid in conns[c$id] )
return;
local size = c$orig$size + c$resp$size;
if ( max_volume > 0 && size > max_volume )
return;
# Disable the analyzer that raised the last core-generated event.
disable_analyzer(c$id, aid);
NOTICE([$note=ProtocolViolation, $conn=c,
$msg=fmt("%s analyzer %s disabled due to protocol violation",
id_string(c$id), analyzer_name(atype)),
$sub=reason, $n=atype]);
if ( c$id !in conns )
conns[c$id] = set();
add conns[c$id][aid];
}
event connection_state_remove(c: connection)
{
delete conns[$id=c$id];
}

View file

@ -1,18 +0,0 @@
# $Id: file-flush.bro 786 2004-11-24 08:25:16Z vern $
# Causes all files to be flushed every file_flush_interval seconds.
# Useful if you want to poke through the log files in real time,
# particularly if network traffic is light.
global file_flush_interval = 10 sec &redef;
event file_flush_event()
{
flush_all();
schedule file_flush_interval { file_flush_event() };
}
event bro_init()
{
schedule file_flush_interval { file_flush_event() };
}

View file

@ -1,69 +0,0 @@
# $Id: finger.bro 4758 2007-08-10 06:49:23Z vern $
module Finger;
export {
const hot_names = {
"root", "lp", "uucp", "nuucp", "demos", "operator", "sync",
"r00t", "tutor", "tour", "admin", "system", "guest", "visitor",
"0", "1", "2", "3", "4", "5", "6", "7", "8", "9",
} &redef;
const max_finger_request_len = 80 &redef;
}
redef capture_filters += { ["finger"] = "port finger" };
# DPM configuration.
global finger_ports = { 79/tcp } &redef;
redef dpd_config += { [ANALYZER_FINGER] = [$ports = finger_ports] };
function public_user(user: string): bool
{
return T;
}
function authorized_client(host: addr): bool
{
return T;
}
event finger_request(c: connection, full: bool, username: string, hostname: string)
{
local id = c$id;
local request: string;
if ( hostname != "" )
request = cat(username, "@", hostname);
else
request = username;
if ( byte_len(request) > max_finger_request_len )
{
request = fmt("%s...", sub_bytes(request, 1, max_finger_request_len));
++c$hot;
}
if ( hostname != "" )
++c$hot;
if ( username in hot_names )
++c$hot;
local req = request == "" ? "ALL" : fmt("\"%s\"", request);
if ( full )
req = fmt("%s (/W)", req);
if ( c$addl != "" )
# This is an additional request.
req = fmt("(%s)", req);
append_addl_marker(c, req, " *");
}
function is_finger_conn(c: connection): bool
{
return c$id$resp_p == finger;
}

View file

@ -1,195 +0,0 @@
# $Id: firewall.bro 4758 2007-08-10 06:49:23Z vern $
#
# Firewall-like rules.
@load notice
@load conn
@load ftp
module Firewall;
export {
type action: enum { ALLOW, DENY };
type cmp: enum { EQ, NE };
type rule: record {
label: string &default = "<no-label>";
orig: subnet &default = 0.0.0.0/0;
orig_set: set[addr] &optional;
orig_cmp: cmp &default = EQ;
orig_p: port &default = 0/tcp;
orig_p_cmp: cmp &default = EQ;
resp: subnet &default = 0.0.0.0/0;
resp_set: set[addr] &optional;
resp_cmp: cmp &default = EQ;
resp_p: port &default = 0/tcp;
resp_p_cmp: cmp &default = EQ;
prot: transport_proto &default = unknown_transport;
prot_cmp: cmp &default = EQ;
state: string &default = "";
state_cmp: cmp &default = EQ;
is_ftp: bool &default = F;
action: action &default = ALLOW;
};
redef enum Notice += {
DenyRuleMatched
};
global begin: function(c: connection);
global match_rule: function(c: connection, r: rule);
}
const log_file = open_log_file("firewall") &redef;
global stop_matching = F;
function do_match(c: connection, r: rule): bool
{
if ( r$orig_cmp == EQ )
{
if ( r?$orig_set )
{
if ( c$id$orig_h !in r$orig_set && c$id$orig_h !in r$orig )
return F;
}
else
{
if ( c$id$orig_h !in r$orig )
return F;
}
}
else
{
if ( r?$orig_set )
{
if ( c$id$orig_h in r$orig_set || c$id$orig_h in r$orig )
return F;
}
else
{
if ( c$id$orig_h in r$orig )
return F;
}
}
if ( r$resp_cmp == EQ )
{
if ( r?$resp_set )
{
if ( c$id$resp_h !in r$resp_set && c$id$resp_h !in r$resp )
return F;
}
else
{
if ( c$id$resp_h !in r$resp )
return F;
}
}
else
{
if ( r?$resp_set )
{
if ( c$id$resp_h in r$resp_set || c$id$resp_h in r$resp )
return F;
}
else
{
if ( c$id$resp_h in r$resp )
return F;
}
}
if ( r$orig_p != 0/tcp )
{
if ( r$orig_p_cmp == EQ )
{
if ( c$id$orig_p != r$orig_p )
return F;
}
else
if ( c$id$orig_p == r$orig_p )
return F;
}
if ( r$resp_p != 0/tcp )
{
if ( r$resp_p_cmp == EQ )
{
if ( c$id$resp_p != r$resp_p )
return F;
}
else
if ( c$id$resp_p == r$resp_p )
return F;
}
if ( r$state != "" )
{
local state = conn_state(c, get_port_transport_proto(c$id$orig_p));
if ( r$state_cmp == EQ )
{
if ( state != r$state )
return F;
}
else
if ( state == r$state )
return F;
}
if ( r$prot != unknown_transport )
{
local proto = get_port_transport_proto(c$id$orig_p);
if ( r$prot_cmp == EQ )
{
if ( proto != r$prot )
return F;
}
else
if ( proto == r$prot )
return F;
}
if ( r$is_ftp && ! FTP::is_ftp_data_conn(c) )
return F;
return T;
}
function report_violation(c: connection, r:rule)
{
local trans = get_port_transport_proto(c$id$orig_p);
local state = conn_state(c, trans);
NOTICE([$note=DenyRuleMatched,
$msg=fmt("%s %s",
id_string(c$id), trans), $conn=c, $sub=r$label]);
append_addl(c, fmt("<%s>", r$label));
record_connection(log_file, c);
}
function begin(c: connection)
{
stop_matching = F;
}
function match_rule(c: connection, r: rule)
{
if ( stop_matching )
return;
if ( do_match(c, r) )
{
stop_matching = T;
if ( r$action == DENY )
report_violation(c, r);
}
}
event bro_init()
{
set_buf(log_file, F);
}

View file

@ -1,18 +0,0 @@
# $Id: flag-irc.bro 4758 2007-08-10 06:49:23Z vern $
#
# include this module to flag various forms of IRC access.
@load ftp
redef FTP::hot_files +=
/.*eggdrop.*/
| /.*eggsun.*/
;
redef Hot::flag_successful_inbound_service: table[port] of string += {
[[6666/tcp, 6667/tcp]] = "inbound IRC",
};
redef Hot::hot_dsts: table[addr] of string += {
[bitchx.com] = "IRC source sites",
};

View file

@ -1,11 +0,0 @@
# $Id: flag-warez.bro 416 2004-09-17 03:52:28Z vern $
#
# include this module to flag various forms of Warez access.
@load hot-ids
@load ftp
redef FTP::hot_files += /.*[wW][aA][rR][eE][zZ].*/ ;
redef always_hot_ids += { "warez", "hanzwarez", "zeraw", };
redef hot_ids += { "warez", "hanzwarez", "zeraw", };

View file

@ -1,6 +0,0 @@
# Capture TCP fragments, but not UDP (or ICMP), since those are a lot more
# common due to high-volume, fragmenting protocols such as NFS :-(.
redef capture_filters += { ["frag"] = "(ip[6:2] & 0x3fff != 0) and tcp" };
redef frag_timeout = 5 min;

File diff suppressed because it is too large Load diff

View file

@ -1,61 +0,0 @@
# $Id: gnutella.bro 4017 2007-02-28 07:11:54Z vern $
redef capture_filters += { ["gnutella"] = "port 6346 or port 8436" };
global gnutella_ports = { 6346/tcp, 8436/tcp } &redef;
redef dpd_config += { [ANALYZER_GNUTELLA] = [$ports = gnutella_ports] };
event gnutella_text_msg(c: connection, orig: bool, headers: string)
{
if ( orig )
print fmt("gnu txt %s -> %s %s", c$id$orig_h, c$id$resp_h, headers);
else
print fmt("gnu txt %s -> %s %s", c$id$resp_h, c$id$orig_h, headers);
}
event gnutella_binary_msg(c: connection, orig: bool, msg_type: count,
ttl: count, hops: count, msg_len: count,
payload: string, payload_len: count,
trunc: bool, complete: bool)
{
local s = "";
if ( orig )
s = fmt("gnu bin %s -> %s", c$id$orig_h, c$id$resp_h);
else
s = fmt("gnu bin %s -> %s", c$id$resp_h, c$id$orig_h);
print fmt("%s %d %d %d %d %d %d %d %s",
s, msg_type, ttl, hops, msg_len,
trunc, complete, payload_len, payload);
}
event gnutella_partial_binary_msg(c: connection, orig: bool,
msg: string, len: count)
{
if ( orig )
print fmt("gnu pbin %s -> %s", c$id$orig_h, c$id$resp_h);
else
print fmt("gnu pbin %s -> %s", c$id$resp_h, c$id$orig_h);
}
event gnutella_establish(c: connection)
{
print fmt("gnu est %s <-> %s", c$id$orig_h, c$id$resp_h);
}
event gnutella_not_establish(c: connection)
{
print fmt("gnu !est %s <-> %s", c$id$orig_h, c$id$resp_h);
}
event gnutella_http_notify(c: connection)
{
print fmt("gnu http %s/%s <-> %s/%s", c$id$orig_h, c$id$orig_p,
c$id$resp_h, c$id$resp_p);
}

View file

@ -1,144 +0,0 @@
# $Id: hand-over.bro 617 2004-11-02 00:54:31Z scottc $
#
# Hand-over between two instances of Bro.
@load remote
# The host from which we want to take over the state has to be
# added to remote_peers_{clear,ssl}, setting hand_over to T.
#
# The host which we want to allow to perform a hand-over with us
# has to be added to remote_peers with a port of 0/tcp and
# hand_over = T.
function is_it_us(host: addr, p: port): bool
{
@ifdef ( listen_if_clear )
if ( is_local_interface(host) && p == listen_port_clear )
return T;
@endif
@ifdef ( listen_if_ssl )
if ( is_local_interface(host) && p == listen_port_ssl )
return T;
@endif
return F;
}
function is_handover_peer(p: event_peer): bool
{
local peer: Remote::Destination;
if ( p$id in Remote::pending_peers )
peer = Remote::pending_peers[p$id];
else
return F;
return peer$hand_over;
}
function handover_start_processing()
{
uninstall_src_net_filter(0.0.0.0/0);
}
event bro_init()
{
# Disable packet processing.
install_src_net_filter(0.0.0.0/0, 0, 100);
# Reporter::message("waiting for hand-over - packet processing disabled.");
}
event remote_connection_error(p: event_peer, reason: string)
{
if ( is_remote_event() || ! ( p$id in Remote::connected_peers) )
return;
# Seems that the other side in not running.
# Reporter::error("can't connect for hand-over - starting processing ...");
handover_start_processing();
}
event remote_connection_established(p: event_peer)
{
if ( is_remote_event() )
return;
# If [p$id] is defined in Remote::connected_peers and p != 0, we have connected
# to the host.
if ( p$p != 0/tcp &&
([p$id] in Remote::connected_peers ) )
{
if ( ! is_handover_peer(p) )
return;
# Reporter::message(fmt("requesting hand-over from %s:%d", p$host, p$p));
request_remote_events(p, /handover_.*|finished_send_state/);
# Give the remote side some time to register its handlers.
schedule 3 secs { handover_request(p$host, p$p) };
return;
}
# If the other side connected to us, we will allow the hand-over
# if the remote host is defined as a hand-over host in remote_peers.
if ( is_handover_peer(p) )
{
# Reporter::message(fmt("allowing hand-over from %s:%d", p$host, p$p));
request_remote_events(p, /handover_.*|finished_send_state/);
}
}
event handover_send_state(p: event_peer)
{
if ( is_remote_event() )
return;
# There may be a serialization in progress in which case
# we will have to try again.
if ( ! send_state(p) )
{
# Reporter::message("can't send state; serialization in progress");
schedule 5 secs { handover_send_state(p$host, p$p) };
}
}
event handover_request(p: event_peer)
{
# Make sure the event is for us.
if ( ! (is_remote_event() && is_it_us(p$host, p$p)) )
return;
# Send state to other side.
schedule 1 sec { handover_send_state(p) };
}
event finished_send_state(p: event_peer)
{
# We will get this event from the remote side.
# Make sure it's indeed for us.
if ( ! is_remote_event() )
return;
if ( ! is_handover_peer(p) )
return;
#Reporter::message(fmt("full state received from %s:%d - starting processing ...",
# p$host, p$p));
event handover_got_state(p);
# Start processing.
handover_start_processing();
}
event handover_got_state(p: event_peer)
{
# Make sure the event is for us.
if ( ! (is_remote_event() && is_it_us(p$host, p$p)) )
return;
# Reporter::message(fmt("%s:%d received our state - terminating", p$host, p$p));
terminate();
}

View file

@ -1,26 +0,0 @@
# $Id: heavy-analysis.bro 2771 2006-04-18 23:53:09Z vern $
#
# Loading this files enables somewhat more accurate, yet also significantly
# more expensive, analysis (in terms of memory as well as CPU time).
#
# This script only sets core-level options. Script-level timeouts are
# adjusted in heavy.*.bro, loaded via Bro's prefix mechanism. To make this
# work, the prefix has to be set *before* reading other scripts, either by
# loading this script first of all, or by manually putting a @prefix
# at the start of Bro's configuration.
@prefixes += heavy
redef tcp_SYN_timeout = 120 secs;
redef tcp_session_timer = 30 secs;
redef tcp_connection_linger = 30 secs;
redef tcp_attempt_delay = 300 secs;
redef tcp_close_delay = 15 secs;
redef tcp_reset_delay = 15 secs;
redef tcp_partial_close_delay = 10 secs;
redef max_timer_expires = 32;
redef tcp_inactivity_timeout = 2 hrs;
redef udp_inactivity_timeout = 1 hrs;
redef icmp_inactivity_timeout = 1 hrs;

View file

@ -1,4 +0,0 @@
# $Id: heavy.irc.bro 4723 2007-08-07 18:14:35Z vern $
redef active_users &persistent &read_expire = 1 days;
redef active_channels &persistent &read_expire = 1 days;

View file

@ -1,6 +0,0 @@
# $Id: heavy.scan.bro 4758 2007-08-10 06:49:23Z vern $
redef distinct_peers &create_expire = 10 hrs;
redef distinct_ports &create_expire = 10 hrs;
redef distinct_low_ports &create_expire = 10 hrs;
redef possible_scan_sources &create_expire = 10 hrs;

View file

@ -1,3 +0,0 @@
# $Id: heavy.software.bro 2771 2006-04-18 23:53:09Z vern $
redef only_report_local = F;

View file

@ -1,8 +0,0 @@
# $Id: heavy.trw.bro 4723 2007-08-07 18:14:35Z vern $
redef TRW::scan_sources &write_expire = 1 day;
redef TRW::benign_sources &write_expire = 1 day;
redef TRW::failed_locals &write_expire = 12 hrs;
redef TRW::successful_locals &write_expire = 12 hrs;
redef TRW::lambda &write_expire = 12 hrs;
redef TRW::num_scanned_locals &write_expire = 12 hrs;

View file

@ -1,29 +0,0 @@
# @(#) $Id: hot-ids.bro 785 2004-11-24 05:56:06Z rwinslow $ (LBL)
# If these ids are seen, the corresponding connection is terminated.
const forbidden_ids = {
"uucp", "daemon", "rewt", "nuucp",
"EZsetup", "OutOfBox", "4Dgifts",
"ezsetup", "outofbox", "4dgifts", "sgiweb",
"r00t", "ruut", "bomb", "backdoor",
"bionic", "warhead", "check_mate", "checkmate", "check_made",
"themage", "darkmage", "y0uar3ownd", "netfrack", "netphrack",
} &redef;
const forbidden_ids_if_no_password = { "lp" } &redef;
const forbidden_id_patterns = /(y[o0]u)(r|ar[e3])([o0]wn.*)/ &redef;
const always_hot_ids = {
"sync", "tutor", "tour",
"retro", "milk", "moof", "own", "gdm", "anacnd",
"lp", "demos", forbidden_ids,
} &redef;
# The ones here that aren't in always_hot_ids are only hot upon
# success.
const hot_ids = {
"root", "system", "smtp", "sysadm", "diag", "sysdiag", "sundiag",
"operator", "sys", "toor", "issadmin", "msql", "sysop", "sysoper",
"wank", always_hot_ids,
} &redef;

View file

@ -1,160 +0,0 @@
# $Id: hot.bro 7057 2010-07-19 23:22:19Z vern $
@load site
@load port-name
@load notice
@load terminate-connection
module Hot;
export {
# True if it should be considered a spoofing attack if a connection has
# the same local net for source and destination.
const same_local_net_is_spoof = F &redef;
const allow_spoof_services = {
110/tcp, # pop-3
139/tcp, # netbios-ssn
} &redef;
# Indexed by source address and destination address.
const allow_pairs: set[addr, addr] &redef;
const hot_srcs: table[addr] of string = {
# [ph33r.the.eleet.com] = "kidz",
} &redef;
const hot_dsts: table[addr] of string = {
[206.101.197.226] = "ILOVEYOU worm destination",
} &redef;
const allow_services = {
ssh, http, gopher, ident, smtp, 20/tcp,
53/udp, # DNS queries
123/udp, # NTP
} &redef;
const allow_services_to: set[addr, port] &redef;
const allow_services_from: set[addr, port] &redef;
const allow_service_pairs: set[addr, addr, port] &redef;
const flag_successful_service: table[port] of string = {
[[31337/tcp]] = "popular backdoors",
} &redef;
const flag_successful_inbound_service: table[port] of string = {
[1524/tcp] = "popular backdoor, but with false hits outbound",
} &redef;
const terminate_successful_inbound_service: table[port] of string &redef;
const flag_rejected_service: table[port] of string &redef;
# Different values to hand to check_hot() at different stages in
# a connection's lifetime.
const CONN_ATTEMPTED = 1;
const CONN_ESTABLISHED = 2;
const APPL_ESTABLISHED = 3;
const CONN_FINISHED = 4;
const CONN_REJECTED = 5;
const CONN_TIMEOUT = 6;
const CONN_REUSED = 7;
global check_hot: function(c: connection, state: count): bool;
global check_spoof: function(c: connection): bool;
}
# An internal function used by check_hot.
function do_hot_check(c: connection, a: addr, t: table[addr] of string)
{
if ( a in t )
{
++c$hot;
local hot_msg = fmt("<%s>", t[a]);
append_addl(c, hot_msg);
}
}
function check_spoof(c: connection): bool
{
local orig = c$id$orig_h;
local resp = c$id$resp_h;
local service = c$id$resp_p;
if ( is_local_addr(orig) && is_local_addr(resp) &&
service !in allow_spoof_services )
{
if ( c$id$orig_p == service && orig == resp )
event conn_weird("Land_attack", c, "");
if ( same_local_net_is_spoof )
++c$hot;
}
return c$hot != 0;
}
function check_hot(c: connection, state: count): bool
{
local id = c$id;
local service = id$resp_p;
if ( service in allow_services || "ftp-data" in c$service )
return F;
if ( state == CONN_ATTEMPTED )
check_spoof(c);
else if ( state == CONN_REJECTED )
{
check_spoof(c);
if ( service in flag_rejected_service )
++c$hot;
}
else if ( state == CONN_ESTABLISHED )
{
check_spoof(c);
local inbound = is_local_addr(id$resp_h);
if ( (service in flag_successful_service ||
(inbound &&
service in flag_successful_inbound_service)) &&
([id$resp_h, id$resp_p] !in allow_services_to ||
[id$orig_h, id$resp_p] !in allow_services_from) )
{
if ( inbound &&
service in terminate_successful_inbound_service )
TerminateConnection::terminate_connection(c);
++c$hot;
if ( service in flag_successful_service )
append_addl(c, flag_successful_service[service]);
else
append_addl(c, flag_successful_inbound_service[service]);
}
}
else if ( state == APPL_ESTABLISHED ||
((state == CONN_FINISHED || state == CONN_TIMEOUT ||
state == CONN_REUSED) &&
service != telnet && c$orig$size > 0 && c$resp$size > 0) )
{
# Connection established and has a non-trivial size.
local orig = c$id$orig_h;
local resp = c$id$resp_h;
if ( [resp, service] in allow_services_to ||
[orig, service] in allow_services_from ||
[orig, resp, service] in allow_service_pairs ||
[orig, resp] in allow_pairs )
return F;
do_hot_check(c, resp, hot_srcs);
do_hot_check(c, resp, hot_dsts);
}
return c$hot != 0;
}

View file

@ -1,54 +0,0 @@
# $Id: http-abstract.bro 47 2004-06-11 07:26:32Z vern $
@load http
@load http-entity
module HTTP;
export {
const abstract_max_length = 512 &redef;
}
redef http_entity_data_delivery_size = 4096;
redef include_HTTP_abstract = T;
function skip_abstract(c: connection, is_orig: bool, msg: http_message)
{
msg$skip_abstract = T;
if ( ! process_HTTP_data )
skip_http_entity_data(c, is_orig);
}
event http_content_type(c: connection, is_orig: bool, ty: string, subty: string)
{
local s = lookup_http_request_stream(c);
local msg = get_http_message(s, is_orig);
if ( msg$entity_level == 1 && ty == "TEXT" )
# Do not skip the body in this case.
return;
skip_abstract(c, is_orig, msg);
}
event http_entity_data(c: connection, is_orig: bool, length: count, data: string)
{
local s = lookup_http_request_stream(c);
local msg = get_http_message(s, is_orig);
if ( msg$skip_abstract )
return;
local len = byte_len(data);
if ( len > abstract_max_length )
msg$abstract = sub_bytes(data, 1, abstract_max_length);
else
msg$abstract = data;
skip_abstract(c, is_orig, msg);
# print http_log, fmt("%.6f %s %s %d bytes: \"%s\"",
# network_time(), s$id,
# is_orig ? "=>" : "<=", byte_len(msg$abstract),
# msg$abstract);
}

View file

@ -1,209 +0,0 @@
# $Id:$
# Anonymize values in Server: headers.
#
# TODO:
#
# - Zedo and IBM web servers can have Apache mods -- the parsing should
# be extended to support them
#
@load anon
@load http-anon-utils
# ---------------------------------------------------------------------
# Apache (and friends)
# - abandon all hope ye who enter here .....
# ---------------------------------------------------------------------
const apache_server =
/apache(-ish)?(\/([0-9]+\.)*[0-9]+)? *(\(?(red hat( linux)?|cobalt|suse\/linux|linux\/suse|darwin|gentoo\/linux|debian gnu\/linux|win32|fedora|freebsd|red-hat\/linux|unix)\)? *)*/;
const apache_mod_pat =
/mod_fastcgi\/(([0-9]+\.)*[0-9a-z]{1,4})/
| /openssl\/([0-9]+\.)*[0-9a-z]{1,4}(-beta[0-9]{0,2})?/
| /dav\/(([0-9]+\.)*[0-9a-z]{1,4})/
| /php-cgi\/(([0-9]+\.)*[0-9a-z]{1,4})/
| /ben-ssl\/(([0-9]+\.)*[0-9a-z]{1,4})/
| /embperl\/(([0-9]+\.)*[0-9a-z]{1,4})/
| /mod_ruby\/(([0-9]+\.)*[0-9a-z]{1,4})/
| /nexadesic\/(([0-9]+\.)*[0-9a-z]{1,4})/
| /postgresql\/(([0-9]+\.)*[0-9a-z]{1,4})/
| /mod_tsunami\/(([0-9]+\.)*[0-9a-z]{1,4})/
| /mod_auth_svn\/(([0-9]+\.)*[0-9a-z]{1,4})/
| /mod_auth_mda\/(([0-9]+\.)*[0-9a-z]{1,4})/
| /rus\/pl(([0-9]+\.)*[0-9]{1,4})/
| /authmysql\/(([0-9]+\.)*[0-9]{1,4})/
| /mod_auth_pgsql\/(([0-9]+\.)*[0-9]{1,4})/
| /mod_ssl\/(([0-9]+\.)*[0-9a-z]{1,4})/
| /php\/(([0-9]+\.)*[0-9a-z]{1,4})(-[0-9]+)?/
| /mod_perl\/(([0-9]+\.)*[0-9a-z]{1,4})(\_[0-9]+)?/
| /mod_macro\/(([0-9]+\.)*[0-9a-z]{1,4})/
| /mod_auth_pam\/(([0-9]+\.)*[0-9a-z]{1,4})/
| /mod_oas\/(([0-9]+\.)*[0-9a-z]{1,4})/
| /mod_cap\/(([0-9]+\.)*[0-9a-z]{1,4})/
| /powweb\/(([0-9]+\.)*[0-9a-z]{1,4})/
| /mod_gzip\/(([0-9]+\.)*[0-9a-z]{1,4})/
| /resin\/(([0-9]+\.)*[0-9a-z]{1,4})/
| /mod_jk\/(([0-9]+\.)*[0-9a-z]{1,4})/
| /python\/(([0-9]+\.)*[0-9a-z]{1,4})/
| /perl\/(v)?(([0-9]+\.)*[0-9a-z]{1,4})/
| /mod_python\/(([0-9]+\.)*[0-9a-z]{1,4})/
| /mod_log_bytes\/(([0-9]+\.)*[0-9a-z]{1,4})/
| /mod_auth_passthrough\/(([0-9]+\.)*[0-9a-z]{1,4})/
| /mod_bwlimited\/(([0-9]+\.)*[0-9a-z]{1,4})/
| /mod_throttle\/(([0-9]+\.)*[0-9a-z]{1,4})/
| /mod_webapp\/(([0-9]+\.)*[0-9a-z]{1,4})(-dev)?/
| /frontpage\/(([0-9]+\.)*[0-9a-z]{1,5})/
| /mod_pubcookie\/[0-9a-z]{2}\/[0-9]+\.[0-9]+\-[0-9]+/
| /(-)?coyote\/(([0-9]+\.)*[0-9a-z]{1,4})/
| /svn\/(([0-9]+\.)*[0-9a-z]{1,4})/
;
# Various Apache variants (e.g., stronghold).
const apache_misc =
/stronghold\/(([0-9]+\.)*[0-9]+) apache(\/([0-9]+\.)*[0-9]+)? (c2neteu\/[0-9])? *(\(?(red hat( linux)?|cobalt|suse\/linux|linux\/suse|darwin|gentoo\/linux|debian gnu\/linux|win32|fedora|freebsd|red-hat\/linux|unix)\)? *)*/;
const apache_basic = /apache?(\/([0-9]+\.)*[0-9]+)?/;
const apache_platforms =
/(\(?(red hat( linux)?|cobalt|suse\/linux|linux\/suse|darwin|gentoo\/linux|debian gnu\/linux|win32|fedora|freebsd|red-hat\/linux|unix)\)? *)*/;
# ibm_http_server/1.3.26.2, apache/1.3.26 (unix).
const IBM_server =
/ibm_http_server(\/[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+)?( *apache\/[0-9]+\.[0-9]+\.[0-9]+ \(unix\))?/;
# ---------------------------------------------------------------------
# Servers values for which we don't retain all values.
# ---------------------------------------------------------------------
const zope_server =
/zope\/\(zope ([0-9]+\.)*[0-9]+-[a-z0-9]{1,2}\, python ([0-9]+\.)*[0-9]+\, linux[0-9]\)/;
const thttp_server = /thttpd\/[0-9]+\.[0-9]+(beta[0-9]+)?/;
const weblogic_server = /weblogic server [0-9]+\.[0-9]+/;
const zedo_server = /zedo 3g(\/([0-9]+\.)*[0-9]+)?/;
const jetty_server = /jetty\/[0-9]+\.[0-9]+/;
# ---------------------------------------------------------------------
# Misc Servers
# ---------------------------------------------------------------------
const misc_server =
/dclk creative/
| /gws\/[0-9]+\.[0-9]+/
| /nfe\/[0-9]+\.[0-9]+/
| /gfe\/[0-9]+\.[0-9]+/
| /dclk-adsvr/
| /rsi/
| /swcd\/([0-9]+\.)*[0-9]+/
| /microsoft-iis\/[0-9]{1,2}\.[0-9]{1,2}/
| /cafe\/[0-9]+\.[0-9]+/
| /artblast\/([0-9]+\.)*[0-9]+/
| /aolserver\/([0-9]+\.)*[0-9]+/
| /resin\/([0-9]+\.)*s?[0-9]+/
| /netscape-enterprise\/([0-9]+\.)*[0-9a-z]{1,2}+ *(aol)?/
| /mapquest listener/
| /miixpc\/[0-9]+\.[0-9]+/
| /sun-one-web-server\/[0-9]+\.[0-9]+/
| /appledotmacserver/
| /cj\/[0-9]+\.[0-9]+/
| /jigsaw\/([0-9]+\.)*[0-9]+/
| /boa\/[0-9]+\.[0-9]+(\.[0-9]+(rc[0-9]+)?)?/
| /tux\/[0-9]+\.[0-9]+ *\(linux\)/
| /igfe/
| /trafficmarketplace-jforce\/([0-9]+\.)*[0-9]+/
| /lighttpd/
| /hitbox gateway ([0-9]+\.)*[0-9]+ [a-z][0-9]/
| /jbird\/[0-9]+\.[0-9a-z]{1,2}/
| /perlbal/
| /big-ip/
| /konichiwa\/[0-9]+\.[0-9]+/
| /footprint [0-9]+\.[0-9]+\/fpmc/
| /iii [0-9]+/
| /clickability web server\/([0-9]+\.)*[0-9]+ *\(unix\)/
| /accipiter-directserver\/([0-9]+\.)*[0-9]+ \(nt; pentium\)/
| /ibm-proxy-wte\/([0-9]+\.)*[0-9]+/
| /netscape-commerce\/[0-9]+\.[0-9]+/
| /nde/
;
function do_apache_server(server: string): string
{
local apache_parts = split_all(server, apache_server);
if ( apache_parts[3] == "" )
return apache_parts[2];
local apache_return_string = apache_parts[2];
local mod_parts = split(apache_parts[3], / /);
for ( part in mod_parts )
{
if ( mod_parts[part] == apache_mod_pat )
{
apache_return_string =
string_cat(apache_return_string,
" ");
apache_return_string =
string_cat(apache_return_string,
mod_parts[part]);
}
else
print http_anon_log, fmt("** unknown Apache mod: %s:%s", mod_parts[part], server);
}
return apache_return_string;
}
function check_server(server: string, server_pat: pattern): bool
{
return server_pat in server;
}
function do_server(server: string, server_pat: pattern): string
{
return split_all(server, server_pat)[2];
}
function filter_in_http_server(server: string): string
{
# Vanilla Apache is a hard one and a special case. Let's get the
# nastiness over first.
if ( apache_server in server )
return do_apache_server(server);
if ( check_server(server, apache_misc) )
return do_server(server, apache_misc);
if ( check_server(server, IBM_server) )
return do_server(server, IBM_server);
if ( check_server(server, zedo_server) )
return do_server(server, zedo_server);
if ( check_server(server, zope_server) )
return do_server(server, zope_server);
if ( check_server(server, jetty_server) )
return do_server(server, jetty_server);
if ( check_server(server, thttp_server) )
return do_server(server, thttp_server);
if ( check_server(server, weblogic_server) )
return do_server(server, weblogic_server);
# Grab bag.
if ( misc_server in server )
return server;
# Best guess - unknown Apache variant of some sort.
if ( apache_basic in server )
{
print http_anon_log,
fmt("** unknown Apache variant: %s", server);
return fmt("(bro: unknown) %s %s",
split_all(server, apache_basic)[2],
split_all(server, apache_platforms)[2]);
}
print http_anon_log, fmt("** unknown server: %s", server);
return fmt("(bro: unknown) %s", anonymize_arg("server", server));
}

View file

@ -1,111 +0,0 @@
# $Id:$
# Filter-in known "USER-AGENT:" values.
@load anon
@load http-anon-utils
# ---------------------------------------------------------------------
# Mozilla (and friends)
# ---------------------------------------------------------------------
const mozilla_full_pat =
/mozilla\/[0-9]\.[0-9] \(( *|;|iebar| freebsd i[0-9]{1,4}|fr|-|windows|windows 98|sunos sun4u|compatible|msie [0-9]\.[0-9]|windows nt [0-9]\.[0-9]|google-tr-1|sv1|\.net clr ([0-9]\.)*[0-9]+|x11|en|ppc mac os x|macintosh|u|linux i[0-9]{1,4}|en-us|rv\:([0-9]+\.)*[0-9]+|aol [0-9]\.[0-9]|gnotify ([0-9]+\.)*[0-9]+)*\) *(gecko\/[0-9]+)? *(firefox\/([0-9]+.)*[0-9]+)?/;
const mozilla_head_pat = /mozilla\/[0-9]\.[0-9]/;
const misc_user_pat =
/spiderman/
| /w3m\/([0-9]+\.)*[0-9]+/
| /java([0-9]+\.)*[0-9]+(_[0-9]+)?/
| /java\/([0-9]+\.)*[0-9]+(_[0-9]+)?/
| /freecorder/
| /industry update control/
| /microsoft-cryptoapi\/([0-9]+\.)*[0-9]+/
| /ruriko\/([0-9]+\.)*[0-9]+/
| /crawler[0-9]\.[0-9]/
| /w3search/
| /symantec liveupdate/
| /davkit\/[0-9]\.[0-9]/
| /windows-media-player\/([0-9]+\.)*[0-9]+/
| /winamp\/([0-9]+\.)*[0-9]+/
| /headdump/
;
const misc_cmplx_user_pat =
/lynx\/([0-9]+\.)*[0-9]+.*/
| /wget\/([0-9]+\.)*[0-9]+.*/
| /yahooseeker\/([0-9]+\.)*[0-9]+.*/
| /rma\/([0-9]+\.)*[0-9]+.*/
| /aim\/[0-9]+.*/
| /ichiro\/([0-9]+\.)*[0-9]+.*/
| /unchaos.*/
| /irlbot\/[0-9]\.[0-9]+.*/
| /msnbot\/([0-9]+\.)*[0-9]+.*/
| /opera\/([0-9]+\.)*[0-9]+.*/
| /netnewswire\/([0-9]+\.)*[0-9]+.*/
| /nsplayer\/([0-9]+\.)*[0-9]+.*/
| /aipbot\/([0-9]+\.)*[0-9]+.*/
| /mac os x; webservicescore\.framework.*/
| /fast-webcrawler\/([0-9]+\.)*[0-9]+.*/
| /skype.*/
| /googlebot\/([0-9]+\.)*[0-9]+.*/
;
const misc_cmplx_user_start =
/lynx\/([0-9]+\.)*[0-9]+/
| /wget\/([0-9]+\.)*[0-9]+/
| /yahooseeker\/([0-9]+\.)*[0-9]+/
| /rma\/([0-9]+\.)*[0-9]+/
| /aim\/[0-9]+/
| /ichiro\/([0-9]+\.)*[0-9]+/
| /unchaos/
| /irlbot\/[0-9]\.[0-9]+/
| /opera\/([0-9]+\.)*[0-9]+/
| /msnbot\/([0-9]+\.)*[0-9]+/
| /netnewswire\/([0-9]+\.)*[0-9]+/
| /nsplayer\/([0-9]+\.)*[0-9]+/
| /aipbot\/([0-9]+\.)*[0-9]+/
| /mac os x; webservicescore\.framework/
| /fast-webcrawler\/([0-9]+\.)*[0-9]+/
| /skype/
| /googlebot\/([0-9]+\.)*[0-9]+/
;
function filter_in_http_useragent(user: string): string
{
# Check for an exact match for Mozilla.
if ( mozilla_full_pat in user )
return split_all(user, mozilla_full_pat)[2];
# Look for popular Mozilla-compatible crawlers.
if ( mozilla_head_pat in user )
{
local crawler = "(bro: unknown)";
if ( /.*yahoo\! slurp/ in user )
crawler = "(yahoo! slurp)";
else if ( /.*ask jeeves/ in user )
crawler = "(ask jeeves)";
else
print http_anon_log,
fmt("*** unknown Mozilla user-agent %s\n", user);
return fmt("%s %s", split_all(user, mozilla_head_pat)[2],
crawler);
}
# Some simple, common user names.
if ( misc_user_pat in user )
return user;
# Require some info removal.
if ( misc_cmplx_user_pat in user )
return split_all(user, misc_cmplx_user_pat)[2];
print http_anon_log,fmt("*** unknown user agent %s\n", user);
return fmt("(bro: unknown) %s", anonymize_arg("user-agent", user));
}

View file

@ -1,164 +0,0 @@
# $Id:$
@load anon
global http_anon_log = open_log_file("http-anon") &redef;
const URI_proto_pat = /^ *([a-zA-Z]+)\:\/\// ;
const known_URI_proto_pat = /^ *(http|https|ftp|ssh)\:\/\// ;
const host_pat = / *^([\-0-9a-zA-Z]+\.)+([\_\-0-9a-zA-Z])*/ ;
const port_pat = /^ *(\:[0-9]+\.)/ ;
const query_pat = /\?/ ;
function anonymize_http_URI(URI: string): string
{
URI = to_lower(URI);
# Strip off protocol.
local proto = "";
if ( URI_proto_pat in URI )
{
local proto_part = split(URI, /\:\/\//);
# Check if we know the protocol. If not, flag it so we
# can update our protocol database.
if ( known_URI_proto_pat !in URI )
{
print http_anon_log,
fmt("*** protocol %s unknown ", proto_part[1]);
proto_part[1] =
string_cat(" (bro: unknown) ",
anonymize_arg("proto", proto_part[1]));
}
proto = string_cat(proto_part[1],"://");
URI = proto_part[2];
}
# Strip off domain.
local host = "";
if ( host_pat in URI )
{
local base_parts =
split_all(URI, / *^([\-\_0-9a-z]+\.)+[\-\_0-9a-z]*/);
if ( |base_parts| < 2 )
{
print http_anon_log,
fmt (" XXXXXXXXXXXXXXXXXXXXXX BASE %s", URI);
return " XXXX processing error XXXX";
}
if ( |base_parts| == 2 )
URI = "";
else if ( |base_parts| == 3)
URI = base_parts[3];
else if ( |base_parts| > 3)
{
local patch_me = "";
local hack = base_parts[2];
local i = 1;
for ( part in base_parts )
{
if ( i != 2 )
patch_me = string_cat(patch_me,
base_parts[i]);
i += 1;
}
URI = patch_me;
}
if ( host == simple_filename )
host = anonymize_path(host);
else
host = anonymize_host(base_parts[2]);
}
# Strip off port (if it exists).
local pport = "";
if ( port_pat in URI )
{
print "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX ";
print "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX ";
print "XXXXX anon.bro doing nothing with port XXXXXXXXXXX ";
print "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX ";
print "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX ";
}
# Handle query (if exists).
local tail = "";
if ( URI == "/" )
{
# -- pass
}
else if ( query_pat in URI )
{
local query_part = split(URI, /\?/);
tail = fmt("%s?%s",
anonymize_path(query_part[1]),
anonymize_path(query_part[2]));
}
else
tail = anonymize_path(URI);
tail = string_cat("/", tail);
return fmt("%s%s%s%s", proto, host, pport, tail);
}
const a_href_pat = /.*\< *a *href.*\>.*/ ;
#/.*\< *a *href *= *\"[[:print:]]+\" *\>.*/;
# Doesn't get everything ... but works for most.
const a_href_split =
/\< *a *href *= *(\\)?(\"|\')?([0-9a-z\/._!\[\]():*;~&|$\\=+\-?%@])+(\\)?(\"|\')?/ ;
# Elegant ... yeah ... really .. :-/
const file_split =
/(\"|\')([0-9a-z\/._!\[\]():*;~&|$\\=+\-?%@])+(\"|\')/ ;
const file_strip_split = /([0-9a-z\/._!\[\]():*;~&|$\\=+\-?%@])+/ ;
function http_doc_link_list(abstract: string): string
{
abstract = to_lower(abstract);
if ( abstract == "" )
return abstract;
local concat_key = "";
local href_parts = split_all(abstract, a_href_split);
for ( part in href_parts )
{
if ( href_parts[part] == a_href_split )
{
local file_parts =
split_all(href_parts[part], file_split);
for ( a_part in file_parts )
{
if ( file_parts[a_part] == file_split )
{
local file_strip_parts =
split_all(file_parts[a_part],
file_strip_split);
concat_key = fmt("%s %s", concat_key,
anonymize_http_URI(file_strip_parts[2]));
}
}
}
}
return concat_key;
}

View file

@ -1,60 +0,0 @@
# $Id: http-body.bro 5230 2008-01-14 01:38:18Z vern $
# Counts length of data.
#
# If log_HTTP_data = T, it also outputs an abstract of data.
@load http
module HTTP;
redef process_HTTP_data = T;
redef log_HTTP_data = T;
export {
# If the following is > 0, then when logging contents, they will be
# truncated beyond this many bytes.
global content_truncation_limit = 40 &redef;
}
event http_entity_data(c: connection, is_orig: bool, length: count, data: string)
{
local s = lookup_http_request_stream(c);
local msg = get_http_message(s, is_orig);
local len = byte_len(data);
msg$data_length = msg$data_length + length;
if ( log_HTTP_data )
{
local abstract: string;
if ( content_truncation_limit > 0 &&
len > content_truncation_limit )
abstract = cat(sub_bytes(data, 1, content_truncation_limit), "...");
else
abstract = data;
print http_log, fmt("%.6f %s %s %d bytes: \"%s\"",
network_time(), s$id,
is_orig ? "=>" : "<=", length,
abstract);
}
}
event http_message_done(c: connection, is_orig: bool, stat: http_message_stat)
{
local s = lookup_http_request_stream(c);
local msg = get_http_message(s, is_orig);
# This is for debugging purpose only
if ( msg$data_length > 0 &&
stat$body_length != msg$data_length + stat$content_gap_length)
{
# This can happen for multipart messages with a
# 'content-length' header, which is not required for multipart
# messages.
# Log::warning(fmt("length mismatch: %s %d %d %d",
# id_string(c$id), stat$body_length, msg$data_length,
# stat$content_gap_length));
}
}

View file

@ -1,45 +0,0 @@
@load http
module HTTP;
export {
redef enum Notice += {
PasswordFullFetch, # they got back the whole thing
PasswordShadowFetch, # they got back a shadowed version
};
# Pattern to search for in replies indicating that a full password
# file was returned.
const full_fetch =
/[[:alnum:]]+\:[[:alnum:]]+\:[[:digit:]]+\:[[:digit:]]+\:/
&redef;
# Same, but indicating a shadow password file was returned.
const shadow_fetch =
/[[:alnum:]]+\:\*\:[[:digit:]]+\:[[:digit:]]+\:/
&redef;
}
event http_entity_data(c: connection, is_orig: bool, length: count, data: string)
{
local s = lookup_http_request_stream(c);
local n = s$first_pending_request;
if ( n !in s$requests )
return;
local req = s$requests[n];
local passwd_request = req$passwd_req;
if ( ! passwd_request )
return;
if ( full_fetch in data )
NOTICE([$note=PasswordFullFetch,
$conn=c, $method=req$method, $URL=req$URI,
$msg=fmt("%s %s: %s %s", id_string(c$id), c$addl,
req$method, req$URI)]);
else if ( shadow_fetch in data )
NOTICE([$note=PasswordShadowFetch,
$conn=c, $method=req$method, $URL=req$URI,
$msg=fmt("%s %s: %s %s", id_string(c$id), c$addl,
req$method, req$URI)]);
}

Some files were not shown because too many files have changed in this diff Show more