Merge remote branch 'origin/master' into topic/seth/ssl-binpac

Conflicts:
	src/bro.bif
This commit is contained in:
Seth Hall 2011-05-23 17:09:41 -04:00
commit 15bfa23ce1
222 changed files with 2711 additions and 1846 deletions

24
CHANGES
View file

@ -1,3 +1,27 @@
1.6-dev.99 Fri Apr 22 22:10:03 PDT 2011
- Extending the connection record with a unique identifier. (Robin
Sommer)
type connection: record {
[...]
id: string;
};
These identifiers very likely unique even across independent Bro
runs.
- Delete operator for record fields. (Robin Sommer)
"delete x$y" now resets record field "x" back to its original state
if it is either &optional or has a &default. "delete" may not be
used with non-optional/default fields.
- Fixing bug with nested record coercions. (Robin Sommer)
- Fixing a do_split() bug. (Seth Hall)
1.6-dev.94 Thu Apr 21 19:51:38 PDT 2011
- Fixing generation of config.h. (Jon Siwek)

View file

@ -23,13 +23,13 @@ install: configured
clean: configured
( cd $(BUILD) && make clean )
( cd $(BUILD) && make doc-clean )
( cd $(BUILD) && make docclean && make restclean )
doc: configured
( cd $(BUILD) && make doc )
doc-clean: configured
( cd $(BUILD) && make doc-clean )
docclean: configured
( cd $(BUILD) && make docclean && make restclean )
dist: cmake_version
# Minimum Bro source package

View file

@ -1 +1 @@
1.6-dev.94
1.6-dev.99

@ -1 +1 @@
Subproject commit 43994f50b320ff9e1d129b87bbe3a26dbfb3e33d
Subproject commit 4fc13f7c6987b4163609e3df7a31f38501411cb7

@ -1 +1 @@
Subproject commit 3ec53f69fd45d8e2c0950dc0e7af117bb3e02c0d
Subproject commit 14a7cfe4ea2ff6c7f5301dcb81a869adcd6e9834

@ -1 +1 @@
Subproject commit 9c5d8c882411dc26de8ef92ff5d77b524b9d48ee
Subproject commit 8843da57dc8aee433550727dcbd1199824ca9da4

@ -1 +1 @@
Subproject commit 7f337459574d027559e419219e83c65d8a6e1c23
Subproject commit 1bf5407722ef5910bafd513bcec6a51b280eeb10

@ -1 +1 @@
Subproject commit aee56f4632c9a4224727da1b535b673a4bcb5d3b
Subproject commit f096c0e4088f2d92743e0c28077f086dff216cce

View file

@ -63,10 +63,7 @@ endmacro(SetPackageVersion)
#
# Darwin - PackageMaker
# Linux - RPM if the platform has rpmbuild installed
# DEB is ommitted because CPack does not give enough
# control over how the package is created and lacks support
# for automatic dependency detection.
#
# DEB if the platform has dpkg-shlibdeps installed
#
# CPACK_GENERATOR is set by this macro
# CPACK_SOURCE_GENERATOR is set by this macro
@ -77,9 +74,14 @@ macro(SetPackageGenerators)
list(APPEND CPACK_GENERATOR PackageMaker)
elseif (${CMAKE_SYSTEM_NAME} MATCHES "Linux")
find_program(RPMBUILD_EXE rpmbuild)
find_program(DPKGSHLIB_EXE dpkg-shlibdeps)
if (RPMBUILD_EXE)
set(CPACK_GENERATOR ${CPACK_GENERATOR} RPM)
endif ()
if (DPKGSHLIB_EXE)
set(CPACK_GENERATOR ${CPACK_GENERATOR} DEB)
set(CPACK_DEBIAN_PACKAGE_SHLIBDEPS true)
endif ()
endif ()
endmacro(SetPackageGenerators)
@ -159,9 +161,25 @@ macro(SetPackageInstallScripts VERSION)
endif ()
if (${CMAKE_SYSTEM_NAME} MATCHES "Linux")
# DEB packages can automatically handle configuration files
# if provided in a "conffiles" file in the packaging
set(conffiles_file ${CMAKE_CURRENT_BINARY_DIR}/conffiles)
if (INSTALLED_CONFIG_FILES)
string(REPLACE " " ";" conffiles ${INSTALLED_CONFIG_FILES})
endif ()
file(WRITE ${conffiles_file} "")
foreach (_file ${conffiles})
file(APPEND ${conffiles_file} "${_file}\n")
endforeach ()
list(APPEND CPACK_DEBIAN_PACKAGE_CONTROL_EXTRA
${CMAKE_CURRENT_BINARY_DIR}/conffiles)
# RPMs don't need any explicit direction regarding config files.
# Leaving the set of installed config files empty will just
# bypass the logic in the pre/post install scripts and let
# the RPM do their own thing (regarding backups, etc.)
# bypass the logic in the default pre/post install scripts and let
# the RPMs/DEBs do their own thing (regarding backups, etc.)
# when upgrading packages.
set(INSTALLED_CONFIG_FILES "")
endif ()
@ -171,10 +189,16 @@ macro(SetPackageInstallScripts VERSION)
${CMAKE_CURRENT_SOURCE_DIR}/cmake/package_preinstall.sh.in
${CMAKE_CURRENT_BINARY_DIR}/package_preinstall.sh
@ONLY)
configure_file(
${CMAKE_CURRENT_SOURCE_DIR}/cmake/package_preinstall.sh.in
${CMAKE_CURRENT_BINARY_DIR}/preinst
@ONLY)
set(CPACK_PREFLIGHT_SCRIPT
${CMAKE_CURRENT_BINARY_DIR}/package_preinstall.sh)
set(CPACK_RPM_PRE_INSTALL_SCRIPT_FILE
${CMAKE_CURRENT_BINARY_DIR}/package_preinstall.sh)
list(APPEND CPACK_DEBIAN_PACKAGE_CONTROL_EXTRA
${CMAKE_CURRENT_BINARY_DIR}/preinst)
endif ()
if (EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/cmake/package_postupgrade.sh.in)
@ -182,10 +206,16 @@ macro(SetPackageInstallScripts VERSION)
${CMAKE_CURRENT_SOURCE_DIR}/cmake/package_postupgrade.sh.in
${CMAKE_CURRENT_BINARY_DIR}/package_postupgrade.sh
@ONLY)
configure_file(
${CMAKE_CURRENT_SOURCE_DIR}/cmake/package_postupgrade.sh.in
${CMAKE_CURRENT_BINARY_DIR}/postinst
@ONLY)
set(CPACK_POSTUPGRADE_SCRIPT
${CMAKE_CURRENT_BINARY_DIR}/package_postupgrade.sh)
set(CPACK_RPM_POST_INSTALL_SCRIPT_FILE
${CMAKE_CURRENT_BINARY_DIR}/package_postupgrade.sh)
list(APPEND CPACK_DEBIAN_PACKAGE_CONTROL_EXTRA
${CMAKE_CURRENT_BINARY_DIR}/postinst)
endif ()
endmacro(SetPackageInstallScripts)

View file

@ -48,21 +48,13 @@ if [ -n "${sampleFiles}" ]; then
EOF
fi
# make sure that world-writeable dirs have the sticky bit set
# so that unprivileged can't rename/remove files within
if [ -d /var/opt/bro/spool ]; then
chmod +t /var/opt/bro/spool
fi
if [ -d /var/opt/bro/spool/tmp ]; then
chmod +t /var/opt/bro/spool/tmp
fi
if [ -d /var/opt/bro/spool/policy ]; then
chmod +t /var/opt/bro/spool/policy
fi
if [ -d /var/opt/bro/logs ]; then
chmod +t /var/opt/bro/logs
# Set up world writeable spool and logs directory for broctl, making sure
# to set the sticky bit so that unprivileged users can't rename/remove files.
# (CMake/CPack is supposed to install them, but has problems with empty dirs)
if [ -n "@EMPTY_WORLD_DIRS@" ]; then
for dir in "@EMPTY_WORLD_DIRS@"; do
mkdir -p ${dir}
chmod 777 ${dir}
chmod +t ${dir}
done
fi

View file

@ -1,42 +1 @@
set(POLICY_SRC_DIR ${PROJECT_SOURCE_DIR}/policy)
set(DOC_OUTPUT_DIR ${CMAKE_CURRENT_BINARY_DIR}/out)
set(DOC_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/source)
set(DOC_SOURCE_WORKDIR ${CMAKE_CURRENT_BINARY_DIR}/source)
file(GLOB_RECURSE DOC_SOURCES FOLLOW_SYMLINKS "*")
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/conf.py.in
${CMAKE_CURRENT_BINARY_DIR}/conf.py
@ONLY)
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/scripts/generate_reST_docs.py.in
${CMAKE_CURRENT_BINARY_DIR}/generate_reST_docs.py
@ONLY)
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/scripts/BroToReST.py.in
${CMAKE_CURRENT_BINARY_DIR}/BroToReST.py
@ONLY)
add_custom_target(doc
COMMAND "${CMAKE_COMMAND}" -E copy_directory
${DOC_SOURCE_DIR}
${DOC_SOURCE_WORKDIR}
COMMAND python generate_reST_docs.py
COMMAND sphinx-build
-b html
-c ${CMAKE_CURRENT_BINARY_DIR}
-d ${DOC_OUTPUT_DIR}/doctrees
${DOC_SOURCE_WORKDIR}
${DOC_OUTPUT_DIR}/html
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
COMMENT "[Sphinx] Generating Script Documentation"
VERBATIM
# SOURCES just adds stuff to IDE projects as a convienience
SOURCES ${DOC_SOURCES})
add_dependencies(doc bro doc-clean)
add_custom_target(doc-clean
COMMAND "${CMAKE_COMMAND}" -E remove_directory
${CMAKE_CURRENT_BINARY_DIR}/source
COMMAND "${CMAKE_COMMAND}" -E remove_directory
${DOC_OUTPUT_DIR}
VERBATIM)
add_subdirectory(scripts)

View file

@ -1,44 +1 @@
This directory contains scripts and templates that can be used to automate
the generation of Bro script documentation. Two build targets are defined
by CMake:
``make doc``
This target depends on a Python interpreter (>=2.5) and
`Sphinx <http://sphinx.pocoo.org/>`_ being installed. Sphinx can be
installed like::
> sudo easy_install sphinx
This target will also first build the bro binary if it is not already
since the generation of reStructuredText (reST) documentation from
Bro scripts is integrated within the parsing process.
After completion, HTML documentation can be located inside the CMake
``build/`` directory as ``build/doc/out/html``. The generated reST
documentation will be located in ``build/doc/source/policy``.
``make doc-clean``
This target removes Sphinx inputs and outputs from the CMake ``build/`` dir.
To schedule a script to be documented, edit ``scripts/generate_reST_docs.py.in``
and try adding the name of the script along with an optional script group to
the ``docs`` dictionary. That python script also shows other, more specialized
methods for generating documentation for some types of corner-cases.
When adding a new logical grouping for generated scripts, create a new
reST document in ``source/policy/<group_name>.rst`` and add some default
documentation. References to (and summaries of) documents associated with
the group get appended to this file during the ``make doc`` process.
The Sphinx source tree template in ``source/`` can be modified to add more
common/general documentation, style sheets, JavaScript, etc. The Sphinx
config file is produced from ``conf.py.in``, so that can be edited to change
various Sphinx options, like setting the default HTML rendering theme.
There is also a custom Sphinx domain implemented in ``source/ext/bro.py``
which adds some reST directives and roles that aid in generating useful
index entries and cross-references.
See ``example.bro`` for an example of how to document a Bro script such that
``make doc`` will be able to produce reST/HTML documentation for it.
TODO

View file

@ -1,152 +0,0 @@
#! /usr/bin/env python
import os
import subprocess
import shutil
import glob
import string
import sys
BRO = "@CMAKE_BINARY_DIR@/src/bro"
BROPATHDEV = "`@CMAKE_BINARY_DIR@/bro-path-dev`"
BRO_ARGS = "--doc-scripts"
DOC_DST_DIR = "@DOC_SOURCE_WORKDIR@/policy"
BROPATH = subprocess.Popen("@CMAKE_BINARY_DIR@/bro-path-dev", shell=True,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT).stdout.readline()
class BroToReST:
"""A class to encapsulate the the generation of reST documentation from
a given Bro script.
"""
bro_src_file = None
doc_src_file = None
load_via_stdin = False
group = None
def __init__(self, src_file, load_method=False, search_dir=None, group=None):
"""
:param src_file: the file name of a Bro script (not a path)
:param load_method: T if script must be loaded by Bro via a stdin
redirection of "@load <script>", F if script can be loaded as
a command line argument to Bro
:param search_dir: a list of directories in which to search for
src_file. If None, the default BROPATH is used.
:param group: a string representing a logical group that the script's
documentation should belong to. A corresponding <group>.rst
document must be pre-existing in the policy/ dir of the source tree
used by Sphinx.
"""
self.bro_src_file = FindBroScript(src_file, search_dir)
self.load_via_stdin = load_method
self.group = group
# formulate doc_src_file from src_file
filename = os.path.basename(src_file)
basename, ext = os.path.splitext(filename)
if ext == ".bro":
self.doc_src_file = basename + ".rst"
else:
self.doc_src_file = filename + ".rst"
def __str__(self):
return "bro_src_file: " + self.bro_src_file \
+ "\ndoc_src_file: " + self.doc_src_file \
+ "\ndoc_dst_file: " + os.path.join(DOC_DST_DIR, self.doc_src_file) \
+ "\nstdin_load: %s" % self.load_via_stdin \
+ "\ngroup: %s" % self.group
def GenDoc(self):
"""Generates the reST documentation for a Bro script and copies
both the documentation and original script into Sphinx source tree.
If the documentation belongs to a group, the necessary modifications
to add it to the group's documentation are done. Afterwards, any
files with a ".rst" suffix are removed for the working directory.
"""
bro_src_basename = os.path.basename(self.bro_src_file)
if self.load_via_stdin:
cmd = "echo '@load %s' | %s %s" % (bro_src_basename, BRO, BRO_ARGS)
else:
cmd = "%s %s %s" % (BRO, BRO_ARGS, self.bro_src_file)
p = subprocess.Popen(cmd, shell=True, env={"BROPATH": BROPATH})
if p.wait() == 0:
shutil.copy(self.doc_src_file, DOC_DST_DIR)
shutil.copy(self.bro_src_file, DOC_DST_DIR)
AppendToDocGroup(self.group, self.bro_src_file, self.doc_src_file)
for leftover in glob.glob("*.rst"):
os.remove(leftover)
def GenDocs(doc_dict, load_method=False):
"""Generates reST documentation for all scripts in the given dictionary.
:param doc_dict: a dictionary whose keys are file names of Bro scripts
(not paths), and whose value is the logical documentation group
it belongs to
:param load_method: T if script must be loaded by Bro via a stdin
redirection of "@load <script>", F if script can be loaded as
a command line argument to Bro
"""
for k, v in doc_dict.iteritems():
doc = BroToReST(k, load_method, group=v)
print "Generating reST document for " + k
doc.GenDoc()
def FindBroScript(src_file, search_dir=None):
"""Search a set of paths for a given Bro script and return the absolute
path to it.
:param src_file: the file name of a Bro script (not a path)
:param search_dir: a list of directories in which to search for
src_file. If None, the default BROPATH is used.
"""
if search_dir is None:
search_dir = string.split(BROPATH, ":")
for path in search_dir:
abs_path = os.path.join(path, src_file)
if os.path.exists(abs_path):
return abs_path
print >> sys.stderr, "Couldn't find '%s'" % src_file
return None
def AppendToDocGroup(group, src_file, doc_file):
"""Adds a reference to the given documentation for a Bro script
to the documentation file for it's associated group. Also, associated
summary text (comments marked up like "##!" in the original Bro script
source) are added.
:param group: a string representing a logical group that the script's
documentation should belong to. A corresponding <group>.rst
document must be pre-existing in the policy/ dir of the source tree
used by Sphinx.
:param src_file: a path to the original Bro script source file
:param doc_file: the file name of a script's generated reST document
"""
if group is None:
return
group_file = os.path.join(DOC_DST_DIR, group + ".rst")
if not os.path.exists(group_file):
print >> sys.stderr, "Group file doesn't exist: " + group_file
return
summary_comments = []
with open(src_file, 'r') as f:
for line in f:
sum_pos = string.find(line, "##!")
if sum_pos != -1:
summary_comments.append(line[(sum_pos+3):])
doc_name, ext = os.path.splitext(doc_file)
with open(group_file, 'a') as f:
f.write("\n:doc:`%s`\n" % doc_name)
for line in summary_comments:
f.write(line)

264
doc/scripts/CMakeLists.txt Normal file
View file

@ -0,0 +1,264 @@
set(POLICY_SRC_DIR ${PROJECT_SOURCE_DIR}/policy)
set(BIF_SRC_DIR ${PROJECT_SOURCE_DIR}/src)
set(RST_OUTPUT_DIR ${CMAKE_CURRENT_BINARY_DIR}/rest_output)
set(DOC_OUTPUT_DIR ${CMAKE_CURRENT_BINARY_DIR}/out)
set(DOC_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/source)
set(DOC_SOURCE_WORKDIR ${CMAKE_CURRENT_BINARY_DIR}/source)
file(GLOB_RECURSE DOC_SOURCES FOLLOW_SYMLINKS "*")
# configure the Sphinx config file (expand variables CMake might know about)
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/conf.py.in
${CMAKE_CURRENT_BINARY_DIR}/conf.py
@ONLY)
# find out what BROPATH to use when executing bro
execute_process(COMMAND ${CMAKE_BINARY_DIR}/bro-path-dev
OUTPUT_VARIABLE BROPATH
RESULT_VARIABLE retval
OUTPUT_STRIP_TRAILING_WHITESPACE)
if (NOT ${retval} EQUAL 0)
message(FATAL_ERROR "Problem setting BROPATH")
endif ()
# This macro is used to add a new makefile target for reST policy script
# documentation that can be generated using Bro itself to parse policy scripts.
# It's called like:
#
# rest_target(srcDir broInput [group])
#
# srcDir: the directory which contains broInput
# broInput: the file name of a bro policy script
# group: optional name of group that the script documentation will belong to
#
# In addition to adding the makefile target, several CMake variables are set:
#
# MASTER_POLICY_INDEX_TEXT: a running list of policy scripts docs that have
# been generated so far, formatted such that it can be appended to a file
# that ends in a Sphinx toctree directive
# ALL_REST_OUTPUTS: a running list (the CMake list type) of all reST docs
# that are to be generated
# MASTER_GROUP_LIST: a running list (the CMake list type) of all script groups
# ${group}_files: a running list of files belonging to a given group, from
# which summary text can be extracted at build time
# ${group}_doc_names: a running list of reST style document names that can be
# given to a :doc: role, shared indices with ${group}_files
#
macro(REST_TARGET srcDir broInput)
get_filename_component(basename ${broInput} NAME_WE)
get_filename_component(extension ${broInput} EXT)
get_filename_component(relDstDir ${broInput} PATH)
set(sumTextSrc ${srcDir}/${broInput})
if (${extension} STREQUAL ".bif.bro")
set(basename "${basename}.bif")
# the summary text is taken at configure time, but .bif.bro files
# may not have been generated yet, so read .bif file instead
set(sumTextSrc ${BIF_SRC_DIR}/${basename})
elseif (${extension} STREQUAL ".init")
set(basename "${basename}.init")
endif ()
set (restFile "${basename}.rst")
if (NOT relDstDir)
set(docName "${basename}")
set(dstDir "${RST_OUTPUT_DIR}")
else ()
set(docName "${relDstDir}/${basename}")
set(dstDir "${RST_OUTPUT_DIR}/${relDstDir}")
endif ()
set(restOutput "${dstDir}/${restFile}")
set(indexEntry " ${docName} <${docName}>")
set(MASTER_POLICY_INDEX_TEXT "${MASTER_POLICY_INDEX_TEXT}\n${indexEntry}")
list(APPEND ALL_REST_OUTPUTS ${restOutput})
if (NOT "${ARGN}" STREQUAL "")
set(group ${ARGN})
# add group to master group list if not already in it
list(FIND MASTER_GROUP_LIST ${group} _found)
if (_found EQUAL -1)
list(APPEND MASTER_GROUP_LIST ${group})
if (MASTER_GROUP_LIST_TEXT)
set(MASTER_GROUP_LIST_TEXT "${MASTER_GROUP_LIST_TEXT}\n${group}")
else ()
set(MASTER_GROUP_LIST_TEXT "${group}")
endif ()
endif ()
list(APPEND ${group}_files ${sumTextSrc})
list(APPEND ${group}_doc_names ${docName})
else ()
set(group "")
endif ()
if (${group} STREQUAL "default" OR ${group} STREQUAL "bifs")
set(BRO_ARGS --doc-scripts --exec '')
else ()
set(BRO_ARGS --doc-scripts ${srcDir}/${broInput})
endif ()
add_custom_command(OUTPUT ${restOutput}
# delete any leftover state from previous bro runs
COMMAND "${CMAKE_COMMAND}"
ARGS -E remove_directory .state
# generate the reST documentation using bro
COMMAND BROPATH=${BROPATH} ${CMAKE_BINARY_DIR}/src/bro
ARGS ${BRO_ARGS} || (rm -rf .state *.log *.rst && exit 1)
# move generated doc into a new directory tree that
# defines the final structure of documents
COMMAND "${CMAKE_COMMAND}"
ARGS -E make_directory ${dstDir}
COMMAND "${CMAKE_COMMAND}"
ARGS -E copy ${restFile} ${restOutput}
# copy the bro policy script, too
COMMAND "${CMAKE_COMMAND}"
ARGS -E copy ${srcDir}/${broInput} ${dstDir}
# clean up the build directory
COMMAND rm
ARGS -rf .state *.log *.rst
DEPENDS bro
DEPENDS ${srcDir}/${broInput}
COMMENT "[Bro] Generating reST docs for ${broInput}"
)
endmacro(REST_TARGET)
# Schedule Bro scripts for which to generate documentation.
# Note: the script may be located in a subdirectory off of one of the main
# directories in BROPATH. In that case, just list the script as 'foo/bar.bro'
rest_target(${POLICY_SRC_DIR} alarm.bro user)
rest_target(${POLICY_SRC_DIR} arp.bro user)
rest_target(${POLICY_SRC_DIR} conn.bro user)
rest_target(${POLICY_SRC_DIR} dhcp.bro user)
rest_target(${POLICY_SRC_DIR} dns.bro user)
rest_target(${POLICY_SRC_DIR} ftp.bro user)
rest_target(${POLICY_SRC_DIR} http.bro user)
rest_target(${POLICY_SRC_DIR} http-reply.bro user)
rest_target(${POLICY_SRC_DIR} http-request.bro user)
rest_target(${POLICY_SRC_DIR} irc.bro user)
rest_target(${POLICY_SRC_DIR} smtp.bro user)
rest_target(${POLICY_SRC_DIR} ssl.bro user)
rest_target(${POLICY_SRC_DIR} ssl-ciphers.bro user)
rest_target(${POLICY_SRC_DIR} ssl-errors.bro user)
rest_target(${POLICY_SRC_DIR} synflood.bro user)
rest_target(${POLICY_SRC_DIR} tcp.bro user)
rest_target(${POLICY_SRC_DIR} udp.bro user)
rest_target(${POLICY_SRC_DIR} weird.bro user)
rest_target(${CMAKE_CURRENT_SOURCE_DIR} example.bro internal)
# Finding out what scripts bro will generate documentation for by default
# can be done like: `bro --doc-scripts --exec ""`
rest_target(${POLICY_SRC_DIR} bro.init default)
rest_target(${POLICY_SRC_DIR} logging-ascii.bro default)
rest_target(${POLICY_SRC_DIR} logging.bro default)
rest_target(${POLICY_SRC_DIR} pcap.bro default)
rest_target(${POLICY_SRC_DIR} server-ports.bro default)
rest_target(${CMAKE_BINARY_DIR}/src bro.bif.bro bifs)
rest_target(${CMAKE_BINARY_DIR}/src const.bif.bro bifs)
rest_target(${CMAKE_BINARY_DIR}/src event.bif.bro bifs)
rest_target(${CMAKE_BINARY_DIR}/src logging.bif.bro bifs)
rest_target(${CMAKE_BINARY_DIR}/src strings.bif.bro bifs)
rest_target(${CMAKE_BINARY_DIR}/src types.bif.bro bifs)
# create temporary list of all docs to include in the master policy/index file
file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/tmp_policy_index
"${MASTER_POLICY_INDEX_TEXT}")
# create temporary file containing list of all groups
file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/group_list
"${MASTER_GROUP_LIST_TEXT}")
# create temporary files containing list of each source file in a given group
foreach (group ${MASTER_GROUP_LIST})
if (EXISTS ${CMAKE_CURRENT_BINARY_DIR}/${group}_files)
file(REMOVE ${CMAKE_CURRENT_BINARY_DIR}/${group}_files)
endif ()
if (EXISTS ${CMAKE_CURRENT_BINARY_DIR}/${group}_doc_names)
file(REMOVE ${CMAKE_CURRENT_BINARY_DIR}/${group}_doc_names)
endif ()
foreach (src ${${group}_files})
file(APPEND ${CMAKE_CURRENT_BINARY_DIR}/${group}_files "${src}\n")
endforeach ()
foreach (dname ${${group}_doc_names})
file(APPEND ${CMAKE_CURRENT_BINARY_DIR}/${group}_doc_names "${dname}\n")
endforeach ()
endforeach ()
# remove previously generated docs no longer scheduled for generation
if (EXISTS ${RST_OUTPUT_DIR})
file(GLOB_RECURSE EXISTING_REST_DOCS "${RST_OUTPUT_DIR}/*.rst")
foreach (_doc ${EXISTING_REST_DOCS})
list(FIND ALL_REST_OUTPUTS ${_doc} _found)
if (_found EQUAL -1)
file(REMOVE ${_doc})
message(STATUS "Removing stale reST doc: ${_doc}")
endif ()
endforeach ()
endif ()
# The "restdoc" target uses Bro to parse policy scripts in order to
# generate reST documentation from them.
add_custom_target(restdoc
# create symlink to the reST output directory for convenience
COMMAND "${CMAKE_COMMAND}" -E create_symlink
${RST_OUTPUT_DIR}
${CMAKE_BINARY_DIR}/reST
DEPENDS ${ALL_REST_OUTPUTS})
# The "restclean" target removes all generated reST documentation from the
# build directory.
add_custom_target(restclean
COMMAND "${CMAKE_COMMAND}" -E remove_directory
${RST_OUTPUT_DIR}
VERBATIM)
# The "doc" target generates reST documentation for any outdated bro scripts
# and then uses Sphinx to generate HTML documentation from the reST
add_custom_target(doc
# copy the template documentation to the build directory
# to give as input for sphinx
COMMAND "${CMAKE_COMMAND}" -E copy_directory
${DOC_SOURCE_DIR}
${DOC_SOURCE_WORKDIR}
# copy generated policy script documentation into the
# working copy of the template documentation
COMMAND "${CMAKE_COMMAND}" -E copy_directory
${RST_OUTPUT_DIR}
${DOC_SOURCE_WORKDIR}/policy
# append to the master index of all policy scripts
COMMAND cat ${CMAKE_CURRENT_BINARY_DIR}/tmp_policy_index >>
${DOC_SOURCE_WORKDIR}/policy/index.rst
# construct a reST file for each group
COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/group_index_generator.py
${CMAKE_CURRENT_BINARY_DIR}/group_list
${CMAKE_CURRENT_BINARY_DIR}
${DOC_SOURCE_WORKDIR}
# tell sphinx to generate html
COMMAND sphinx-build
-b html
-c ${CMAKE_CURRENT_BINARY_DIR}
-d ${DOC_OUTPUT_DIR}/doctrees
${DOC_SOURCE_WORKDIR}
${DOC_OUTPUT_DIR}/html
# create symlink to the html output directory for convenience
COMMAND "${CMAKE_COMMAND}" -E create_symlink
${DOC_OUTPUT_DIR}/html
${CMAKE_BINARY_DIR}/html
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
COMMENT "[Sphinx] Generating HTML policy script docs"
# SOURCES just adds stuff to IDE projects as a convienience
SOURCES ${DOC_SOURCES})
# The "docclean" target removes just the Sphinx input/output directories
# from the build directory.
add_custom_target(docclean
COMMAND "${CMAKE_COMMAND}" -E remove_directory
${DOC_SOURCE_WORKDIR}
COMMAND "${CMAKE_COMMAND}" -E remove_directory
${DOC_OUTPUT_DIR}
VERBATIM)
add_dependencies(doc docclean restdoc)

61
doc/scripts/README Normal file
View file

@ -0,0 +1,61 @@
This directory contains scripts and templates that can be used to automate
the generation of Bro script documentation. Several build targets are defined
by CMake:
``restdoc``
This target uses Bro to parse policy scripts in order to generate
reStructuredText (reST) documentation from them. The list of scripts
for which to generate reST documentation is defined in the
``CMakeLists.txt`` file in this directory. Script documentation is
rebuild automatically if the policy script from which it is derived
or the Bro binary becomes out of date
The resulting output from this target can be found in the CMake
``build/`` directory inside ``reST`` (a symlink to
``doc/scripts/rest_output``).
``doc``
This target depends on a Python interpreter (>=2.5) and
`Sphinx <http://sphinx.pocoo.org/>`_ being installed. Sphinx can be
installed like::
> sudo easy_install sphinx
This target will first build ``restdoc`` target and then copy the
resulting reST files as an input directory to Sphinx.
After completion, HTML documentation can be located in the CMake
``build/`` directory inside ``html`` (a symlink to
``doc/scripts/out/html``)
``restclean``
This target removes any reST documentation that has been generated so far.
``docclean``
This target removes Sphinx inputs and outputs from the CMake ``build/`` dir.
To schedule a script to be documented, edit ``CMakeLists.txt`` inside this
directory add a call to the ``rest_target()`` macro. Calling that macro
with a group name for the script is optional, but if not given, the only
link to the script will be in the master TOC tree for all policy scripts.
When adding a new logical grouping for generated scripts, create a new
reST document in ``source/<group_name>.rst`` and add some default
documentation for the group. References to (and summaries of) documents
associated with the group get appended to this file during the
``make doc`` process.
The Sphinx source tree template in ``source/`` can be modified to add more
common/general documentation, style sheets, JavaScript, etc. The Sphinx
config file is produced from ``conf.py.in``, so that can be edited to change
various Sphinx options, like setting the default HTML rendering theme.
There is also a custom Sphinx domain implemented in ``source/ext/bro.py``
which adds some reST directives and roles that aid in generating useful
index entries and cross-references.
See ``example.bro`` for an example of how to document a Bro script such that
``make doc`` will be able to produce reST/HTML documentation for it.

View file

@ -6,6 +6,18 @@
##!
##! .. tip:: You can embed directives and roles within ``##``-stylized comments.
##!
##! A script's logging information has to be documented manually as minimally
##! shown below. Note that references may not always be possible (e.g.
##! anonymous filter functions) and a script may not need to document
##! each of "columns", "event", "filter" depending on exactly what it's doing.
##!
##! **Logging Stream ID:** :bro:enum:`Example::EXAMPLE`
##! :Columns: :bro:type:`Example::Info`
##! :Event: :bro:id:`Example::log_example`
##! :Filter: ``example-filter``
##! uses :bro:id:`Example::filter_func` to determine whether to
##! exclude the ``ts`` field
##!
##! :Author: Jon Siwek <jsiwek@ncsa.illinois.edu>
# Comments that use a single pound sign (#) are not significant to
@ -64,6 +76,12 @@ redef enum Notice += {
Notice_Four,
};
# Redef'ing the ID enumeration for logging streams is automatically tracked.
# Comments of the "##" form can be use to further document it, but it's
# better to do all documentation related to logging in the summary section
# as is shown above.
redef enum Log::ID += { EXAMPLE };
# Anything declared in the export section will show up in the rendered
# documentation's "public interface" section
@ -107,6 +125,11 @@ export {
field2: bool; ##< toggles something
};
## document the record extension redef here
redef record SimpleRecord += {
## document the extending field here
field_ext: string &optional; ##< (or here)
};
## general documentation for a type "ComplexRecord" goes here
type ComplexRecord: record {
@ -116,6 +139,13 @@ export {
msg: string &default="blah"; ##< attributes are self-documenting
} &redef;
## An example record to be used with a logging stream.
type Info: record {
ts: time &log;
uid: string &log;
status: count &log &optional;
};
############## options ################
# right now, I'm just defining an option as
# any const with &redef (something that can
@ -159,6 +189,15 @@ export {
## Give more details about "an_event" here.
## name: describe the argument here
global an_event: event(name: string);
## This is a declaration of an example event that can be used in
## logging streams and is raised once for each log entry.
global log_example: event(rec: Info);
}
function filter_func(rec: Info): bool
{
return T;
}
# this function is documented in the "private interface" section
@ -176,3 +215,14 @@ type PrivateRecord: record {
field1: bool;
field2: count;
};
event bro_init()
{
Log::create_stream(EXAMPLE, [$columns=Info, $ev=log_example]);
Log::add_filter(EXAMPLE, [
$name="example-filter",
$path="example-filter",
$pred=filter_func,
$exclude=set("ts")
]);
}

View file

@ -1,71 +0,0 @@
#! /usr/bin/env python
import os
import subprocess
import shutil
import glob
import string
import sys
from BroToReST import *
# TODO: generate docs for more scripts
# TODO: the groups are just made up to test the functionality, fix them
# Scripts that can be loaded by bro via command line argument:
docs = {
"alarm.bro": "internal",
"arp.bro": "user",
"conn.bro": "internal",
"dhcp.bro": "user",
"dns.bro": "user",
"ftp.bro": "user",
"http.bro": "user",
"http-reply.bro": None,
"http-request.bro": None,
"irc.bro": "user",
"smtp.bro": "user",
"ssl.bro": "user",
"ssl-ciphers.bro": None,
"ssl-errors.bro": None,
"synflood.bro": "user",
"tcp.bro": "user",
"udp.bro": "user",
"weird.bro": "internal",
}
# Scripts that can't be loaded by bro via command line argument (possible
# due to dependency issues), but can be loaded via an @load on stdin:
stdin_docs = {
"notice.bro": "internal",
}
GenDocs(docs)
GenDocs(stdin_docs, True)
# The example documentation script doesn't live on the BROPATH, so
# explicitly generate the docs for it like this:
BroToReST("example.bro", False, ["@PROJECT_SOURCE_DIR@/doc"], group="internal").GenDoc()
# Generate documentation for stuff that's always loaded into bro by default:
cmd = "echo '' | %s %s" % (BRO, BRO_ARGS)
p = subprocess.Popen(cmd, shell=True, env={"BROPATH": BROPATH})
if p.wait() == 0:
for doc in glob.glob("*.rst"):
if doc == "<stdin>.rst":
os.remove(doc)
continue
basename, ext = os.path.splitext(doc)
basename2, ext = os.path.splitext(basename)
if ext == ".init":
src_file = basename
else:
src_file = basename + ".bro"
src_file = FindBroScript(src_file)
shutil.copy(src_file, DOC_DST_DIR)
shutil.copy(doc, DOC_DST_DIR)
if ext == ".bif":
AppendToDocGroup("bifs", src_file, doc)
else:
AppendToDocGroup("default", src_file, doc)
os.remove(doc)

View file

@ -0,0 +1,52 @@
#! /usr/bin/env python
# This script automatically generates a reST documents that lists
# a collection of Bro policy scripts that are "grouped" together.
# The summary text (##! comments) of the policy script is embedded
# in the list.
#
# 1st argument is the file containing list of groups
# 2nd argument is the directory containing ${group}_files lists of
# scripts that belong to the group and ${group}_doc_names lists of
# document names that can be supplied to a reST :doc: role
# 3rd argument is a directory in which write a ${group}.rst file (will
# append to existing file) that contains reST style references to
# script docs along with summary text contained in original script
import sys
import os
import string
group_list = sys.argv[1]
file_manifest_dir = sys.argv[2]
output_dir = sys.argv[3]
with open(group_list, 'r') as f_group_list:
for group in f_group_list.read().splitlines():
#print group
file_manifest = os.path.join(file_manifest_dir, group + "_files")
doc_manifest = os.path.join(file_manifest_dir, group + "_doc_names")
src_files = []
doc_names = []
with open(file_manifest, 'r') as f_file_manifest:
src_files = f_file_manifest.read().splitlines()
with open(doc_manifest, 'r') as f_doc_manifest:
doc_names = f_doc_manifest.read().splitlines()
for i in range(len(src_files)):
src_file = src_files[i]
#print "\t" + src_file
summary_comments = []
with open(src_file, 'r') as f_src_file:
for line in f_src_file:
sum_pos = string.find(line, "##!")
if sum_pos != -1:
summary_comments.append(line[(sum_pos+3):])
#print summary_comments
group_file = os.path.join(output_dir, group + ".rst")
with open(group_file, 'a') as f_group_file:
f_group_file.write("\n:doc:`/policy/%s`\n" % doc_names[i])
for line in summary_comments:
f_group_file.write(" " + line)

View file

@ -118,4 +118,6 @@ The Bro scripting language supports the following built-in attributes.
.. bro:attr:: &group
.. bro:attr:: &log
.. bro:attr:: (&tracked)

View file

@ -11,11 +11,11 @@ Contents:
common
builtins
policy/default
policy/user
policy/bifs
policy/internal
policy/index
default
bifs
user
internal
Indices and tables
==================

View file

@ -1,10 +1,6 @@
Index of All Policy Script Documentation
========================================
Contents:
.. toctree::
:maxdepth: 1
:glob:
*

52
make-deb-packages Executable file
View file

@ -0,0 +1,52 @@
#!/bin/sh
# This script generates binary DEB packages.
# They can be found in build/ after running.
prefix=/opt/bro
# CMake/CPack versions before 2.8.2 have bugs that can create bad packages
CMAKE_PACK_REQ=2.8.2
CMAKE_VER=`cmake -version`
if [ "${CMAKE_VER}" \< "${CMAKE_PACK_REQ}" ]; then
echo "Package creation requires CMake > 2.8.2" >&2
exit 1
fi
# The DEB CPack generator depends on `dpkg-shlibdeps` to automatically
# determine what dependencies to set for the packages
type dpkg-shlibdeps > /dev/null 2>&1 || {
echo "\
Creating DEB packages requires the `dpkg-shlibs` command, usually provided by
the 'dpkg-dev' package, please install it first.
" >&2;
exit 1;
}
# During the packaging process, `dpkg-shlibs` will fail if used on a library
# that links to other internal/project libraries unless an RPATH is used or
# we set LD_LIBRARY_PATH such that it can find the internal/project library
# in the temporary packaging tree.
export LD_LIBRARY_PATH=./${prefix}/lib
# Minimum Bro
./configure --prefix=${prefix} --disable-broccoli --disable-broctl \
--pkg-name-prefix=Bro --binary-package
( cd build && make package )
# Full Bro package
./configure --prefix=${prefix} --pkg-name-prefix=Bro-all --binary-package
( cd build && make package )
# Broccoli
cd aux/broccoli
./configure --prefix=${prefix} --binary-package
( cd build && make package && mv Broccoli*.deb ../../../build/ )
cd ../..
# Broctl
cd aux/broctl
./configure --prefix=${prefix} --binary-package
( cd build && make package && mv Broctl*.deb ../../../build/ )
cd ../..

View file

@ -69,8 +69,12 @@ type ftp_port: record {
};
type endpoint: record {
size: count;
size: count; # logical size (for TCP: from seq numbers)
state: count;
# The following are set if use_conn_size_analyzer is T.
num_pkts: count &optional; # number of packets on the wire
num_bytes_ip: count &optional; # actual number of IP-level bytes on the wire
};
type endpoint_stats: record {
@ -95,6 +99,7 @@ type connection: record {
addl: string;
hot: count; # how hot; 0 = don't know or not hot
history: string;
uid: string;
};
type SYN_packet: record {
@ -499,6 +504,12 @@ const encap_hdr_size = 0 &redef;
# ... or just for the following UDP port.
const tunnel_port = 0/udp &redef;
# Whether to use the ConnSize analyzer to count the number of
# packets and IP-level bytes transfered by each endpoint. If
# true, these values are returned in the connection's endpoint
# record val.
const use_conn_size_analyzer = F &redef;
const UDP_INACTIVE = 0;
const UDP_ACTIVE = 1; # means we've seen something from this endpoint
@ -1176,6 +1187,10 @@ function string_escape(s: string, chars: string): string
return s;
}
# The filter the user has set via the -f command line options, or
# empty if none.
const cmd_line_bpf_filter = "" &redef;
@load pcap.bro
# Rotate logs every x seconds.

View file

@ -22,6 +22,15 @@ export { global FTP::is_ftp_data_conn: function(c: connection): bool; }
# by record_connection.
const record_state_history = F &redef;
# Whether to add 4 more columns to conn.log with
# orig_packet orig_ip_bytes resp_packets resp_ip_bytes
# Requires use_conn_size_analyzer=T
# Columns are added after history but before addl
const report_conn_size_analyzer = F &redef;
# Activate conn-size analyzer if necessary.
redef use_conn_size_analyzer = (! report_conn_size_analyzer);
# Whether to translate the local address in SensitiveConnection notices
# to a hostname. Meant as a demonstration of the "when" construct.
const xlate_hot_local_addr = F &redef;
@ -96,6 +105,12 @@ function conn_size(e: endpoint, trans: transport_proto): string
return "?";
}
function conn_size_from_analyzer(e: endpoint): string
{
return fmt("%d %d", (e?$num_pkts) ? e$num_pkts : 0,
(e?$num_bytes_ip) ? e$num_bytes_ip : 0);
}
function service_name(c: connection): string
{
local p = c$id$resp_p;
@ -304,6 +319,10 @@ function record_connection(f: file, c: connection)
log_msg = fmt("%s %s", log_msg,
c$history == "" ? "X" : c$history);
if ( use_conn_size_analyzer && report_conn_size_analyzer )
log_msg = fmt("%s %s %s", log_msg,
conn_size_from_analyzer(c$orig), conn_size_from_analyzer(c$resp));
if ( addl != "" )
log_msg = fmt("%s %s", log_msg, addl);

View file

@ -500,7 +500,7 @@ event irc_channel_topic(c: connection, channel: string, topic: string)
local conn = get_conn(c);
local ch = get_channel(conn, channel);
ch$topic_history[|ch$topic_history| + 1] = ch$topic;
ch$topic_history[|ch$topic_history|] = ch$topic;
ch$topic = topic;
if ( c$id in bot_conns )

View file

@ -4,8 +4,16 @@
global capture_filters: table[string] of string &redef;
global restrict_filters: table[string] of string &redef;
# Filter string which is unconditionally or'ed to every pcap filter.
global unrestricted_filter = "" &redef;
# By default, Bro will examine all packets. If this is set to false,
# it will dynamically build a BPF filter that only select protocols
# for which the user has loaded a corresponding analysis script.
# The latter used to be default for Bro versions < 1.6. That has now
# changed however to enable port-independent protocol analysis.
const all_packets = T &redef;
# Filter string which is unconditionally or'ed to every dynamically
# built pcap filter.
const unrestricted_filter = "" &redef;
redef enum PcapFilterID += {
DefaultPcapFilter,
@ -27,6 +35,7 @@ function join_filters(capture_filter: string, restrict_filter: string): string
if ( capture_filter != "" && restrict_filter != "" )
filter = fmt( "(%s) and (%s)", restrict_filter, capture_filter );
else if ( capture_filter != "" )
filter = capture_filter;
@ -34,7 +43,7 @@ function join_filters(capture_filter: string, restrict_filter: string): string
filter = restrict_filter;
else
filter = "tcp or udp or icmp";
filter = "ip or not ip";
if ( unrestricted_filter != "" )
filter = fmt( "(%s) or (%s)", unrestricted_filter, filter );
@ -44,28 +53,39 @@ function join_filters(capture_filter: string, restrict_filter: string): string
function build_default_pcap_filter(): string
{
# Build capture_filter.
local cfilter = "";
if ( cmd_line_bpf_filter != "" )
# Return what the user specified on the command line;
return cmd_line_bpf_filter;
if ( all_packets )
{
# Return an "always true" filter.
if ( bro_has_ipv6() )
return "ip or not ip";
else
return "not ip6";
}
## Build filter dynamically.
# First the capture_filter.
local cfilter = "";
for ( id in capture_filters )
cfilter = add_to_pcap_filter(cfilter, capture_filters[id], "or");
# Build restrict_filter.
# Then the restrict_filter.
local rfilter = "";
local saw_VLAN = F;
for ( id in restrict_filters )
{
if ( restrict_filters[id] == "vlan" )
# These are special - they need to come first.
saw_VLAN = T;
else
rfilter = add_to_pcap_filter(rfilter, restrict_filters[id], "and");
}
if ( saw_VLAN )
rfilter = add_to_pcap_filter("vlan", rfilter, "and");
# Finally, join them.
local filter = join_filters(cfilter, rfilter);
return join_filters(cfilter, rfilter);
# Exclude IPv6 if we don't support it.
if ( ! bro_has_ipv6() )
filter = fmt("(not ip6) and (%s)", filter);
return filter;
}
function install_default_pcap_filter()

View file

@ -295,8 +295,8 @@ function pm_mapping_to_text(server: addr, m: pm_mappings): string
if ( [prog, p] !in mapping_seen )
{
add mapping_seen[prog, p];
addls[++num_addls] = fmt("%s -> %s", rpc_prog(prog), p);
addls[num_addls] = fmt("%s -> %s", rpc_prog(prog), p);
++num_addls;
update_RPC_server_map(server, p, rpc_prog(prog));
}
}

View file

@ -186,17 +186,17 @@ export {
# More precisely, the counter is the next index of threshold vector.
global shut_down_thresh_reached: table[addr] of bool &default=F;
global rb_idx: table[addr] of count
&default=1 &read_expire = 1 days &redef;
&default=0 &read_expire = 1 days &redef;
global rps_idx: table[addr] of count
&default=1 &read_expire = 1 days &redef;
&default=0 &read_expire = 1 days &redef;
global rops_idx: table[addr] of count
&default=1 &read_expire = 1 days &redef;
&default=0 &read_expire = 1 days &redef;
global rpts_idx: table[addr,addr] of count
&default=1 &read_expire = 1 days &redef;
&default=0 &read_expire = 1 days &redef;
global rat_idx: table[addr] of count
&default=1 &read_expire = 1 days &redef;
&default=0 &read_expire = 1 days &redef;
global rrat_idx: table[addr] of count
&default=1 &read_expire = 1 days &redef;
&default=0 &read_expire = 1 days &redef;
}
global thresh_check: function(v: vector of count, idx: table[addr] of count,
@ -609,7 +609,7 @@ function thresh_check(v: vector of count, idx: table[addr] of count,
return F;
}
if ( idx[orig] <= |v| && n >= v[idx[orig]] )
if ( idx[orig] < |v| && n >= v[idx[orig]] )
{
++idx[orig];
return T;
@ -628,7 +628,7 @@ function thresh_check_2(v: vector of count, idx: table[addr, addr] of count,
return F;
}
if ( idx[orig,resp] <= |v| && n >= v[idx[orig, resp]] )
if ( idx[orig,resp] < |v| && n >= v[idx[orig, resp]] )
{
++idx[orig,resp];
return T;

View file

@ -1,5 +0,0 @@
# $Id: vlan.bro 416 2004-09-17 03:52:28Z vern $
redef restrict_filters += { ["vlan"] = "vlan" };
redef encap_hdr_size = 4;

View file

@ -35,6 +35,7 @@
#include "POP3.h"
#include "SSH.h"
#include "SSL-binpac.h"
#include "ConnSizeAnalyzer.h"
// Keep same order here as in AnalyzerTag definition!
const Analyzer::Config Analyzer::analyzer_configs[] = {
@ -56,6 +57,9 @@ const Analyzer::Config Analyzer::analyzer_configs[] = {
{ AnalyzerTag::ICMP_Echo, "ICMP_ECHO",
ICMP_Echo_Analyzer::InstantiateAnalyzer,
ICMP_Echo_Analyzer::Available, 0, false },
{ AnalyzerTag::ICMP_Redir, "ICMP_REDIR",
ICMP_Redir_Analyzer::InstantiateAnalyzer,
ICMP_Redir_Analyzer::Available, 0, false },
{ AnalyzerTag::TCP, "TCP", TCP_Analyzer::InstantiateAnalyzer,
TCP_Analyzer::Available, 0, false },
@ -148,6 +152,9 @@ const Analyzer::Config Analyzer::analyzer_configs[] = {
{ AnalyzerTag::TCPStats, "TCPSTATS",
TCPStats_Analyzer::InstantiateAnalyzer,
TCPStats_Analyzer::Available, 0, false },
{ AnalyzerTag::ConnSize, "CONNSIZE",
ConnSize_Analyzer::InstantiateAnalyzer,
ConnSize_Analyzer::Available, 0, false },
{ AnalyzerTag::Contents, "CONTENTS", 0, 0, 0, false },
{ AnalyzerTag::ContentLine, "CONTENTLINE", 0, 0, 0, false },
@ -848,6 +855,12 @@ unsigned int Analyzer::MemoryAllocation() const
return mem;
}
void Analyzer::UpdateConnVal(RecordVal *conn_val)
{
LOOP_OVER_CHILDREN(i)
(*i)->UpdateConnVal(conn_val);
}
void SupportAnalyzer::ForwardPacket(int len, const u_char* data, bool is_orig,
int seq, const IP_Hdr* ip, int caplen)
{

View file

@ -227,6 +227,13 @@ public:
virtual unsigned int MemoryAllocation() const;
// Called whenever the connection value needs to be updated. Per
// default, this method will be called for each analyzer in the tree.
// Analyzers can use this method to attach additional data to the
// connections. A call to BuildConnVal will in turn trigger a call to
// UpdateConnVal.
virtual void UpdateConnVal(RecordVal *conn_val);
// The following methods are proxies: calls are directly forwarded
// to the connection instance. These are for convenience only,
// allowing us to reuse more of the old analyzer code unchanged.
@ -366,7 +373,6 @@ public:
: Analyzer(tag, conn) { pia = 0; }
virtual void Done();
virtual void UpdateEndpointVal(RecordVal* endp, int is_orig) = 0;
virtual bool IsReuse(double t, const u_char* pkt) = 0;
virtual void SetContentsFile(unsigned int direction, BroFile* f);

View file

@ -22,7 +22,9 @@ namespace AnalyzerTag {
PIA_TCP, PIA_UDP,
// Transport-layer analyzers.
ICMP, ICMP_TimeExceeded, ICMP_Unreachable, ICMP_Echo, TCP, UDP,
ICMP,
ICMP_TimeExceeded, ICMP_Unreachable, ICMP_Echo, ICMP_Redir,
TCP, UDP,
// Application-layer analyzers (hand-written).
BitTorrent, BitTorrentTracker,
@ -37,6 +39,8 @@ namespace AnalyzerTag {
// Other
File, Backdoor, InterConn, SteppingStone, TCPStats,
ConnSize,
// Support-analyzers
Contents, ContentLine, NVT, Zip, Contents_DNS, Contents_NCP,

View file

@ -19,7 +19,7 @@ const char* attr_name(attr_tag t)
"&persistent", "&synchronized", "&postprocessor",
"&encrypt", "&match", "&disable_print_hook",
"&raw_output", "&mergeable", "&priority",
"&group", "(&tracked)",
"&group", "&log", "(&tracked)",
};
return attr_names[int(t)];

View file

@ -8,8 +8,8 @@
#include <sys/types.h>
#include <regex.h>
# define FMT_INT "%lld"
# define FMT_UINT "%llu"
# define FMT_INT "%" PRId64
# define FMT_UINT "%" PRIu64
static TableType* bt_tracker_headers = 0;
static RecordType* bittorrent_peer;

View file

@ -1,16 +0,0 @@
#include <cstdio>
#include <string>
#include <list>
#include "BroDoc.h"
#include "BroBifDoc.h"
BroBifDoc::BroBifDoc(const std::string& sourcename) : BroDoc(sourcename)
{
}
// TODO: This needs to do something different than parent class's version.
void BroBifDoc::WriteDocFile() const
{
BroDoc::WriteDocFile();
}

View file

@ -1,18 +0,0 @@
#ifndef brobifdoc_h
#define brobifdoc_h
#include <cstdio>
#include <string>
#include <list>
#include "BroDoc.h"
class BroBifDoc : public BroDoc {
public:
BroBifDoc(const std::string& sourcename);
virtual ~BroBifDoc() { }
void WriteDocFile() const;
};
#endif

View file

@ -58,15 +58,8 @@ void BroDoc::AddImport(const std::string& s)
if ( ext_pos == std::string::npos )
imports.push_back(s);
else
{
if ( s.substr(ext_pos + 1) == "bro" )
imports.push_back(s.substr(0, ext_pos));
else
fprintf(stderr, "Warning: skipped documenting @load of file "
"without .bro extension: %s\n", s.c_str());
}
}
void BroDoc::SetPacketFilter(const std::string& s)
@ -116,7 +109,15 @@ void BroDoc::WriteDocFile() const
if ( ! imports.empty() )
{
WriteToDoc(":Imports: ");
WriteStringList(":doc:`%s`, ", ":doc:`%s`\n", imports);
std::list<std::string>::const_iterator it;
for ( it = imports.begin(); it != imports.end(); ++it )
{
if ( it != imports.begin() )
WriteToDoc(", ");
WriteToDoc(":doc:`%s </policy/%s>`", it->c_str(), it->c_str());
}
WriteToDoc("\n");
}
WriteToDoc("\n");

View file

@ -285,7 +285,6 @@ set(bro_SRCS
BitTorrent.cc
BitTorrentTracker.cc
BPF_Program.cc
BroBifDoc.cc
BroDoc.cc
BroDocObj.cc
BroString.cc
@ -294,6 +293,7 @@ set(bro_SRCS
CompHash.cc
Conn.cc
ConnCompressor.cc
ConnSizeAnalyzer.cc
ContentLine.cc
DCE_RPC.cc
DFA.cc

View file

@ -9,6 +9,8 @@
#include <assert.h>
#include <openssl/ssl.h>
#include <algorithm>
#include "config.h"
#include "ChunkedIO.h"
#include "NetVar.h"
@ -140,7 +142,7 @@ bool ChunkedIOFd::Write(Chunk* chunk)
{
#ifdef DEBUG
DBG_LOG(DBG_CHUNKEDIO, "write of size %d [%s]",
chunk->len, fmt_bytes(chunk->data, min(20, chunk->len)));
chunk->len, fmt_bytes(chunk->data, min((uint32)20, chunk->len)));
#endif
// Reject if our queue of pending chunks is way too large. Otherwise,
@ -166,13 +168,13 @@ bool ChunkedIOFd::Write(Chunk* chunk)
// We have to split it up.
char* p = chunk->data;
unsigned long left = chunk->len;
uint32 left = chunk->len;
while ( left )
{
Chunk* part = new Chunk;
part->len = min(BUFFER_SIZE - sizeof(uint32), left);
part->len = min<uint32>(BUFFER_SIZE - sizeof(uint32), left);
part->data = new char[part->len];
memcpy(part->data, p, part->len);
left -= part->len;
@ -427,7 +429,7 @@ bool ChunkedIOFd::Read(Chunk** chunk, bool may_block)
(*chunk)->len & ~FLAG_PARTIAL,
(*chunk)->len & FLAG_PARTIAL ? "(P) " : "",
fmt_bytes((*chunk)->data,
min(20, (*chunk)->len)));
min((uint32)20, (*chunk)->len)));
#endif
if ( ! ((*chunk)->len & FLAG_PARTIAL) )

View file

@ -152,7 +152,6 @@ Connection::Connection(NetSessions* s, HashKey* k, double t, const ConnID* id)
proto = TRANSPORT_UNKNOWN;
conn_val = 0;
orig_endp = resp_endp = 0;
login_conn = 0;
is_active = 1;
@ -182,6 +181,8 @@ Connection::Connection(NetSessions* s, HashKey* k, double t, const ConnID* id)
TimerMgr::Tag* tag = current_iosrc->GetCurrentTag();
conn_timer_mgr = tag ? new TimerMgr::Tag(*tag) : 0;
uid = 0; // Will set later.
if ( conn_timer_mgr )
{
++external_connections;
@ -215,6 +216,56 @@ Connection::~Connection()
--external_connections;
}
uint64 Connection::uid_counter = 0;
uint64 Connection::uid_instance = 0;
uint64 Connection::CalculateNextUID()
{
if ( uid_instance == 0 )
{
// This is the first time we need a UID.
if ( ! have_random_seed() )
{
// If we don't need deterministic output (as
// indicated by a set seed), we calculate the
// instance ID by hashing something likely to be
// globally unique.
struct {
char hostname[128];
struct timeval time;
pid_t pid;
int rnd;
} unique;
gethostname(unique.hostname, 128);
unique.hostname[sizeof(unique.hostname)-1] = '\0';
gettimeofday(&unique.time, 0);
unique.pid = getpid();
unique.rnd = bro_random();
uid_instance = HashKey::HashBytes(&unique, sizeof(unique));
++uid_instance; // Now it's larger than zero.
}
else
// Generate determistic UIDs.
uid_instance = 1;
}
// Now calculate the unique ID for this connection.
struct {
uint64 counter;
hash_t instance;
} key;
key.counter = ++uid_counter;
key.instance = uid_instance;
uint64_t h = HashKey::HashBytes(&key, sizeof(key));
return h;
}
void Connection::Done()
{
finished = 1;
@ -346,14 +397,15 @@ RecordVal* Connection::BuildConnVal()
id_val->Assign(1, new PortVal(ntohs(orig_port), prot_type));
id_val->Assign(2, new AddrVal(resp_addr));
id_val->Assign(3, new PortVal(ntohs(resp_port), prot_type));
conn_val->Assign(0, id_val);
orig_endp = new RecordVal(endpoint);
RecordVal *orig_endp = new RecordVal(endpoint);
orig_endp->Assign(0, new Val(0, TYPE_COUNT));
orig_endp->Assign(1, new Val(0, TYPE_COUNT));
conn_val->Assign(1, orig_endp);
resp_endp = new RecordVal(endpoint);
RecordVal *resp_endp = new RecordVal(endpoint);
resp_endp->Assign(0, new Val(0, TYPE_COUNT));
resp_endp->Assign(1, new Val(0, TYPE_COUNT));
conn_val->Assign(2, resp_endp);
@ -363,13 +415,16 @@ RecordVal* Connection::BuildConnVal()
conn_val->Assign(6, new StringVal("")); // addl
conn_val->Assign(7, new Val(0, TYPE_COUNT)); // hot
conn_val->Assign(8, new StringVal("")); // history
if ( ! uid )
uid = CalculateNextUID();
char tmp[20];
conn_val->Assign(9, new StringVal(uitoa_n(uid, tmp, sizeof(tmp), 62)));
}
if ( root_analyzer )
{
root_analyzer->UpdateEndpointVal(orig_endp, 1);
root_analyzer->UpdateEndpointVal(resp_endp, 0);
}
root_analyzer->UpdateConnVal(conn_val);
conn_val->Assign(3, new Val(start_time, TYPE_TIME)); // ###
conn_val->Assign(4, new Val(last_time - start_time, TYPE_INTERVAL));
@ -744,10 +799,6 @@ void Connection::FlipRoles()
resp_port = orig_port;
orig_port = tmp_port;
RecordVal* tmp_rc = resp_endp;
resp_endp = orig_endp;
orig_endp = tmp_rc;
Unref(conn_val);
conn_val = 0;
@ -843,8 +894,6 @@ bool Connection::DoSerialize(SerialInfo* info) const
return false;
SERIALIZE_OPTIONAL(conn_val);
SERIALIZE_OPTIONAL(orig_endp);
SERIALIZE_OPTIONAL(resp_endp);
// FIXME: RuleEndpointState not yet serializable.
// FIXME: Analyzers not yet serializable.
@ -908,10 +957,6 @@ bool Connection::DoUnserialize(UnserialInfo* info)
UNSERIALIZE_OPTIONAL(conn_val,
(RecordVal*) Val::Unserialize(info, connection_type));
UNSERIALIZE_OPTIONAL(orig_endp,
(RecordVal*) Val::Unserialize(info, endpoint));
UNSERIALIZE_OPTIONAL(resp_endp,
(RecordVal*) Val::Unserialize(info, endpoint));
int iproto;

View file

@ -301,7 +301,12 @@ public:
::operator delete(((char*) ptr) - 4);
}
void SetUID(uint64 arg_uid) { uid = arg_uid; }
static uint64 CalculateNextUID();
protected:
Connection() { persistent = 0; }
// Add the given timer to expire at time t. If do_expire
@ -333,8 +338,6 @@ protected:
double start_time, last_time;
double inactivity_timeout;
RecordVal* conn_val;
RecordVal* orig_endp;
RecordVal* resp_endp;
LoginConn* login_conn; // either nil, or this
int suppress_event; // suppress certain events to once per conn.
@ -358,6 +361,11 @@ protected:
TransportLayerAnalyzer* root_analyzer;
PIA* primary_PIA;
uint64 uid; // Globally unique connection ID.
static uint64 uid_counter; // Counter for uids.
static uint64 uid_instance; // Instance ID, computed once.
};
class ConnectionTimer : public Timer {

View file

@ -4,6 +4,7 @@
#include "ConnCompressor.h"
#include "Event.h"
#include "ConnSizeAnalyzer.h"
#include "net_util.h"
// The basic model of the compressor is to wait for an answer before
@ -45,6 +46,11 @@
// - We don't match signatures on connections which are completely handled
// by the compressor. Matching would require significant additional state
// w/o being very helpful.
//
// - If use_conn_size_analyzer is True, the reported counts for bytes and
// packets may not account for some packets/data that is part of those
// packets which the connection compressor handles. The error, if any, will
// however be small.
#ifdef DEBUG
@ -234,7 +240,7 @@ Connection* ConnCompressor::NextPacket(double t, HashKey* key, const IP_Hdr* ip,
else if ( addr_eq(ip->SrcAddr(), SrcAddr(pending)) &&
tp->th_sport == SrcPort(pending) )
// Another packet from originator.
tc = NextFromOrig(pending, t, key, tp);
tc = NextFromOrig(pending, t, key, ip, tp);
else
// A reply.
@ -329,11 +335,15 @@ Connection* ConnCompressor::FirstFromOrig(double t, HashKey* key,
}
Connection* ConnCompressor::NextFromOrig(PendingConn* pending, double t,
HashKey* key, const tcphdr* tp)
HashKey* key, const IP_Hdr* ip,
const tcphdr* tp)
{
// Another packet from the same host without seeing an answer so far.
DBG_LOG(DBG_COMPRESSOR, "%s same again", fmt_conn_id(pending));
++pending->num_pkts;
++pending->num_bytes_ip += ip->PayloadLen();
// New window scale overrides old - not great, this is a (subtle)
// evasion opportunity.
if ( TCP_Analyzer::ParseTCPOptions(tp, parse_tcp_options, 0, 0,
@ -521,6 +531,8 @@ Connection* ConnCompressor::Instantiate(HashKey* key, PendingConn* pending)
return 0;
}
new_conn->SetUID(pending->uid);
DBG_LOG(DBG_COMPRESSOR, "%s instantiated", fmt_conn_id(pending));
++sizes.connections;
@ -608,6 +620,9 @@ void ConnCompressor::PktHdrToPendingConn(double time, const HashKey* key,
c->FIN = (tp->th_flags & TH_FIN) != 0;
c->RST = (tp->th_flags & TH_RST) != 0;
c->ACK = (tp->th_flags & TH_ACK) != 0;
c->uid = Connection::CalculateNextUID();
c->num_bytes_ip = ip->TotalLen();
c->num_pkts = 1;
c->invalid = 0;
if ( TCP_Analyzer::ParseTCPOptions(tp, parse_tcp_options, 0, 0, c) < 0 )
@ -848,8 +863,23 @@ void ConnCompressor::Event(const PendingConn* pending, double t,
TRANSPORT_TCP));
orig_endp->Assign(0, new Val(orig_size, TYPE_COUNT));
orig_endp->Assign(1, new Val(orig_state, TYPE_COUNT));
if ( ConnSize_Analyzer::Available() )
{
orig_endp->Assign(2, new Val(pending->num_pkts, TYPE_COUNT));
orig_endp->Assign(3, new Val(pending->num_bytes_ip, TYPE_COUNT));
}
else
{
orig_endp->Assign(2, new Val(0, TYPE_COUNT));
orig_endp->Assign(3, new Val(0, TYPE_COUNT));
}
resp_endp->Assign(0, new Val(0, TYPE_COUNT));
resp_endp->Assign(1, new Val(resp_state, TYPE_COUNT));
resp_endp->Assign(2, new Val(0, TYPE_COUNT));
resp_endp->Assign(3, new Val(0, TYPE_COUNT));
}
else
{
@ -859,10 +889,26 @@ void ConnCompressor::Event(const PendingConn* pending, double t,
id_val->Assign(2, new AddrVal(SrcAddr(pending)));
id_val->Assign(3, new PortVal(ntohs(SrcPort(pending)),
TRANSPORT_TCP));
orig_endp->Assign(0, new Val(0, TYPE_COUNT));
orig_endp->Assign(1, new Val(resp_state, TYPE_COUNT));
orig_endp->Assign(2, new Val(0, TYPE_COUNT));
orig_endp->Assign(3, new Val(0, TYPE_COUNT));
resp_endp->Assign(0, new Val(orig_size, TYPE_COUNT));
resp_endp->Assign(1, new Val(orig_state, TYPE_COUNT));
if ( ConnSize_Analyzer::Available() )
{
resp_endp->Assign(2, new Val(pending->num_pkts, TYPE_COUNT));
resp_endp->Assign(3, new Val(pending->num_bytes_ip, TYPE_COUNT));
}
else
{
resp_endp->Assign(2, new Val(0, TYPE_COUNT));
resp_endp->Assign(3, new Val(0, TYPE_COUNT));
}
DBG_LOG(DBG_COMPRESSOR, "%s swapped direction", fmt_conn_id(pending));
}
@ -877,6 +923,9 @@ void ConnCompressor::Event(const PendingConn* pending, double t,
conn_val->Assign(7, new Val(0, TYPE_COUNT)); // hot
conn_val->Assign(8, new StringVal("")); // history
char tmp[20]; // uid.
conn_val->Assign(9, new StringVal(uitoa_n(pending->uid, tmp, sizeof(tmp), 62)));
conn_val->SetOrigin(0);
}

View file

@ -97,6 +97,11 @@ public:
uint32 ack;
hash_t hash;
uint16 window;
uint64 uid;
// The following are set if use_conn_size_analyzer is T.
uint16 num_pkts;
uint16 num_bytes_ip;
};
private:
@ -118,8 +123,8 @@ private:
const IP_Hdr* ip, const tcphdr* tp);
// Called for more packets from the orginator w/o seeing a response.
Connection* NextFromOrig(PendingConn* pending,
double t, HashKey* key, const tcphdr* tp);
Connection* NextFromOrig(PendingConn* pending, double t, HashKey* key,
const IP_Hdr* ip, const tcphdr* tp);
// Called for the first response packet. Instantiates a Connection.
Connection* Response(PendingConn* pending, double t, HashKey* key,

90
src/ConnSizeAnalyzer.cc Normal file
View file

@ -0,0 +1,90 @@
// $Id$
//
// See the file "COPYING" in the main distribution directory for copyright.
//
// See ConnSize.h for more extensive comments.
#include "ConnSizeAnalyzer.h"
#include "TCP.h"
ConnSize_Analyzer::ConnSize_Analyzer(Connection* c)
: Analyzer(AnalyzerTag::ConnSize, c)
{
}
ConnSize_Analyzer::~ConnSize_Analyzer()
{
}
void ConnSize_Analyzer::Init()
{
Analyzer::Init();
orig_bytes = 0;
orig_pkts = 0;
resp_bytes = 0;
resp_pkts = 0;
}
void ConnSize_Analyzer::Done()
{
Analyzer::Done();
}
void ConnSize_Analyzer::DeliverPacket(int len, const u_char* data, bool is_orig, int seq, const IP_Hdr* ip, int caplen)
{
Analyzer::DeliverPacket(len, data, is_orig, seq, ip, caplen);
if ( is_orig )
{
orig_bytes += ip->TotalLen();
orig_pkts ++;
}
else
{
resp_bytes += ip->TotalLen();
resp_pkts ++;
}
}
void ConnSize_Analyzer::UpdateConnVal(RecordVal *conn_val)
{
// RecordType *connection_type is decleared in NetVar.h
int orig_endp_idx = connection_type->FieldOffset("orig");
int resp_endp_idx = connection_type->FieldOffset("resp");
RecordVal *orig_endp = conn_val->Lookup(orig_endp_idx)->AsRecordVal();
RecordVal *resp_endp = conn_val->Lookup(resp_endp_idx)->AsRecordVal();
// endpoint is the RecordType from NetVar.h
// TODO: or orig_endp->Type()->AsRecordVal()->FieldOffset()
int pktidx = endpoint->FieldOffset("num_pkts");
int bytesidx = endpoint->FieldOffset("num_bytes_ip");
// TODO: error handling?
orig_endp->Assign(pktidx, new Val(orig_pkts, TYPE_COUNT));
orig_endp->Assign(bytesidx, new Val(orig_bytes, TYPE_COUNT));
resp_endp->Assign(pktidx, new Val(resp_pkts, TYPE_COUNT));
resp_endp->Assign(bytesidx, new Val(resp_bytes, TYPE_COUNT));
Analyzer::UpdateConnVal(conn_val);
}
void ConnSize_Analyzer::FlipRoles()
{
Analyzer::FlipRoles();
uint64_t tmp;
tmp = orig_bytes;
orig_bytes = resp_bytes;
resp_bytes = tmp;
tmp = orig_pkts;
orig_pkts = resp_pkts;
resp_pkts = tmp;
}

41
src/ConnSizeAnalyzer.h Normal file
View file

@ -0,0 +1,41 @@
// $Id$
//
// See the file "COPYING" in the main distribution directory for copyright.
//
#ifndef CONNSTATS_H
#define CONNSTATS_H
#include "Analyzer.h"
#include "NetVar.h"
class ConnSize_Analyzer : public Analyzer {
public:
ConnSize_Analyzer(Connection* c);
virtual ~ConnSize_Analyzer();
virtual void Init();
virtual void Done();
// from Analyzer.h
virtual void UpdateConnVal(RecordVal *conn_val);
virtual void FlipRoles();
static Analyzer* InstantiateAnalyzer(Connection* conn)
{ return new ConnSize_Analyzer(conn); }
static bool Available() { return BifConst::use_conn_size_analyzer ; }
protected:
virtual void DeliverPacket(int len, const u_char* data, bool is_orig,
int seq, const IP_Hdr* ip, int caplen);
uint64_t orig_bytes;
uint64_t resp_bytes;
uint64_t orig_pkts;
uint64_t resp_pkts;
};
#endif

View file

@ -120,7 +120,7 @@ void ContentLine_Analyzer::EndpointEOF(bool is_orig)
DeliverStream(1, (const u_char*) "\n", is_orig);
}
void ContentLine_Analyzer::SetPlainDelivery(int length)
void ContentLine_Analyzer::SetPlainDelivery(int64_t length)
{
if ( length < 0 )
internal_error("negative length for plain delivery");
@ -154,7 +154,7 @@ void ContentLine_Analyzer::DoDeliver(int len, const u_char* data)
if ( plain_delivery_length > 0 )
{
int deliver_plain = min(plain_delivery_length, len);
int deliver_plain = min(plain_delivery_length, (int64_t)len);
last_char = 0; // clear last_char
plain_delivery_length -= deliver_plain;
@ -179,7 +179,7 @@ void ContentLine_Analyzer::DoDeliver(int len, const u_char* data)
if ( seq < seq_to_skip )
{
// Skip rest of the data and return
int skip_len = seq_to_skip - seq;
int64_t skip_len = seq_to_skip - seq;
if ( skip_len > len )
skip_len = len;
@ -310,7 +310,7 @@ void ContentLine_Analyzer::CheckNUL()
}
}
void ContentLine_Analyzer::SkipBytesAfterThisLine(int length)
void ContentLine_Analyzer::SkipBytesAfterThisLine(int64_t length)
{
// This is a little complicated because Bro has to handle
// both CR and CRLF as a line break. When a line is delivered,
@ -326,7 +326,7 @@ void ContentLine_Analyzer::SkipBytesAfterThisLine(int length)
SkipBytes(length);
}
void ContentLine_Analyzer::SkipBytes(int length)
void ContentLine_Analyzer::SkipBytes(int64_t length)
{
skip_pending = 0;
seq_to_skip = SeqDelivered() + length;

View file

@ -44,16 +44,16 @@ public:
// mode for next <length> bytes. Plain-delivery data is also passed
// via DeliverStream() and can differentiated by calling
// IsPlainDelivery().
void SetPlainDelivery(int length);
int GetPlainDeliveryLength() const { return plain_delivery_length; }
void SetPlainDelivery(int64_t length);
int64_t GetPlainDeliveryLength() const { return plain_delivery_length; }
bool IsPlainDelivery() { return is_plain; }
// Skip <length> bytes after this line.
// Can be used to skip HTTP data for performance considerations.
void SkipBytesAfterThisLine(int length);
void SkipBytes(int length);
void SkipBytesAfterThisLine(int64_t length);
void SkipBytes(int64_t length);
bool IsSkippedContents(int seq, int length)
bool IsSkippedContents(int64_t seq, int64_t length)
{ return seq + length <= seq_to_skip; }
protected:
@ -71,26 +71,26 @@ protected:
void CheckNUL();
// Returns the sequence number delivered so far.
int SeqDelivered() const { return seq_delivered_in_lines; }
int64_t SeqDelivered() const { return seq_delivered_in_lines; }
u_char* buf; // where we build up the body of the request
int offset; // where we are in buf
int buf_len; // how big buf is, total
unsigned int last_char; // last (non-option) character scanned
int seq; // last seq number
int seq_to_skip;
int64_t seq; // last seq number
int64_t seq_to_skip;
// Seq delivered up to through NewLine() -- it is adjusted
// *before* NewLine() is called.
int seq_delivered_in_lines;
int64_t seq_delivered_in_lines;
// Number of bytes to be skipped after this line. See
// comments in SkipBytesAfterThisLine().
int skip_pending;
int64_t skip_pending;
// Remaining bytes to deliver plain.
int plain_delivery_length;
int64_t plain_delivery_length;
int is_plain;
// Don't deliver further data.

View file

@ -10,6 +10,7 @@
#include "BackDoor.h"
#include "InterConn.h"
#include "SteppingStone.h"
#include "ConnSizeAnalyzer.h"
ExpectedConn::ExpectedConn(const uint32* _orig, const uint32* _resp,
@ -189,6 +190,8 @@ bool DPM::BuildInitialAnalyzerTree(TransportProto proto, Connection* conn,
const u_char* data)
{
TCP_Analyzer* tcp = 0;
UDP_Analyzer* udp = 0;
ICMP_Analyzer* icmp = 0;
TransportLayerAnalyzer* root = 0;
AnalyzerTag::Tag expected = AnalyzerTag::Error;
analyzer_map* ports = 0;
@ -206,7 +209,7 @@ bool DPM::BuildInitialAnalyzerTree(TransportProto proto, Connection* conn,
break;
case TRANSPORT_UDP:
root = new UDP_Analyzer(conn);
root = udp = new UDP_Analyzer(conn);
pia = new PIA_UDP(conn);
expected = GetExpected(proto, conn);
ports = &udp_ports;
@ -221,15 +224,23 @@ bool DPM::BuildInitialAnalyzerTree(TransportProto proto, Connection* conn,
case ICMP_ECHOREPLY:
if ( ICMP_Echo_Analyzer::Available() )
{
root = new ICMP_Echo_Analyzer(conn);
root = icmp = new ICMP_Echo_Analyzer(conn);
DBG_DPD(conn, "activated ICMP Echo analyzer");
}
break;
case ICMP_REDIRECT:
if ( ICMP_Redir_Analyzer::Available() )
{
root = new ICMP_Redir_Analyzer(conn);
DBG_DPD(conn, "activated ICMP Redir analyzer");
}
break;
case ICMP_UNREACH:
if ( ICMP_Unreachable_Analyzer::Available() )
{
root = new ICMP_Unreachable_Analyzer(conn);
root = icmp = new ICMP_Unreachable_Analyzer(conn);
DBG_DPD(conn, "activated ICMP Unreachable analyzer");
}
break;
@ -237,14 +248,14 @@ bool DPM::BuildInitialAnalyzerTree(TransportProto proto, Connection* conn,
case ICMP_TIMXCEED:
if ( ICMP_TimeExceeded_Analyzer::Available() )
{
root = new ICMP_TimeExceeded_Analyzer(conn);
root = icmp = new ICMP_TimeExceeded_Analyzer(conn);
DBG_DPD(conn, "activated ICMP Time Exceeded analyzer");
}
break;
}
if ( ! root )
root = new ICMP_Analyzer(conn);
root = icmp = new ICMP_Analyzer(conn);
analyzed = true;
break;
@ -363,6 +374,16 @@ bool DPM::BuildInitialAnalyzerTree(TransportProto proto, Connection* conn,
// we cannot add it as a normal child.
if ( TCPStats_Analyzer::Available() )
tcp->AddChildPacketAnalyzer(new TCPStats_Analyzer(conn));
// Add ConnSize analyzer. Needs to see packets, not stream.
if ( ConnSize_Analyzer::Available() )
tcp->AddChildPacketAnalyzer(new ConnSize_Analyzer(conn));
}
else
{
if ( ConnSize_Analyzer::Available() )
root->AddChildAnalyzer(new ConnSize_Analyzer(conn), false);
}
if ( pia )

View file

@ -127,7 +127,7 @@ void ODesc::Add(int64 i)
else
{
char tmp[256];
sprintf(tmp, "%lld", i);
sprintf(tmp, "%" PRId64, i);
Add(tmp);
}
}
@ -139,7 +139,7 @@ void ODesc::Add(uint64 u)
else
{
char tmp[256];
sprintf(tmp, "%llu", u);
sprintf(tmp, "%" PRIu64, u);
Add(tmp);
}
}

View file

@ -494,8 +494,7 @@ Val* UnaryExpr::Eval(Frame* f) const
VectorVal* v_op = v->AsVectorVal();
VectorVal* result = new VectorVal(Type()->AsVectorType());
for ( unsigned int i = VECTOR_MIN;
i < v_op->Size() + VECTOR_MIN; ++i )
for ( unsigned int i = 0; i < v_op->Size(); ++i )
{
Val* v_i = v_op->Lookup(i);
result->Assign(i, v_i ? Fold(v_i) : 0, this);
@ -633,8 +632,7 @@ Val* BinaryExpr::Eval(Frame* f) const
VectorVal* v_result = new VectorVal(Type()->AsVectorType());
for ( unsigned int i = VECTOR_MIN;
i < v_op1->Size() + VECTOR_MIN; ++i )
for ( unsigned int i = 0; i < v_op1->Size(); ++i )
{
if ( v_op1->Lookup(i) && v_op2->Lookup(i) )
v_result->Assign(i,
@ -656,8 +654,7 @@ Val* BinaryExpr::Eval(Frame* f) const
VectorVal* vv = (is_vec1 ? v1 : v2)->AsVectorVal();
VectorVal* v_result = new VectorVal(Type()->AsVectorType());
for ( unsigned int i = VECTOR_MIN;
i < vv->Size() + VECTOR_MIN; ++i )
for ( unsigned int i = 0; i < vv->Size(); ++i )
{
Val* vv_i = vv->Lookup(i);
if ( vv_i )
@ -1063,8 +1060,7 @@ Val* IncrExpr::Eval(Frame* f) const
if ( is_vector(v) )
{
VectorVal* v_vec = v->AsVectorVal();
for ( unsigned int i = VECTOR_MIN;
i < v_vec->Size() + VECTOR_MIN; ++i )
for ( unsigned int i = 0; i < v_vec->Size(); ++i )
{
Val* elt = v_vec->Lookup(i);
if ( elt )
@ -1941,7 +1937,7 @@ Val* BoolExpr::Eval(Frame* f) const
{
result = new VectorVal(Type()->AsVectorType());
result->Resize(vector_v->Size());
result->AssignRepeat(VECTOR_MIN, result->Size(),
result->AssignRepeat(0, result->Size(),
scalar_v, this);
}
else
@ -1970,8 +1966,7 @@ Val* BoolExpr::Eval(Frame* f) const
VectorVal* result = new VectorVal(Type()->AsVectorType());
result->Resize(vec_v1->Size());
for ( unsigned int i = VECTOR_MIN;
i < vec_v1->Size() + VECTOR_MIN; ++i )
for ( unsigned int i = 0; i < vec_v1->Size(); ++i )
{
Val* op1 = vec_v1->Lookup(i);
Val* op2 = vec_v2->Lookup(i);
@ -2353,7 +2348,7 @@ Val* CondExpr::Eval(Frame* f) const
VectorVal* result = new VectorVal(Type()->AsVectorType());
result->Resize(cond->Size());
for ( unsigned int i = VECTOR_MIN; i < cond->Size() + VECTOR_MIN; ++i )
for ( unsigned int i = 0; i < cond->Size(); ++i )
{
Val* local_cond = cond->Lookup(i);
if ( local_cond )
@ -2951,8 +2946,7 @@ Val* IndexExpr::Eval(Frame* f) const
return 0;
}
for ( unsigned int i = VECTOR_MIN;
i < v_v2->Size() + VECTOR_MIN; ++i )
for ( unsigned int i = 0; i < v_v2->Size(); ++i )
{
if ( v_v2->Lookup(i)->AsBool() )
v_result->Assign(v_result->Size() + 1, v_v1->Lookup(i), this);
@ -2964,8 +2958,7 @@ Val* IndexExpr::Eval(Frame* f) const
// S does, i.e., by excluding those elements.
// Probably only do this if *all* are negative.
v_result->Resize(v_v2->Size());
for ( unsigned int i = VECTOR_MIN;
i < v_v2->Size() + VECTOR_MIN; ++i )
for ( unsigned int i = 0; i < v_v2->Size(); ++i )
v_result->Assign(i, v_v1->Lookup(v_v2->Lookup(i)->CoerceToInt()), this);
}
}
@ -3113,9 +3106,14 @@ Expr* FieldExpr::Simplify(SimplifyType simp_type)
return this;
}
int FieldExpr::CanDel() const
{
return td->FindAttr(ATTR_DEFAULT) || td->FindAttr(ATTR_OPTIONAL);
}
void FieldExpr::Assign(Frame* f, Val* v, Opcode opcode)
{
if ( IsError() || ! v )
if ( IsError() )
return;
if ( field < 0 )
@ -3130,6 +3128,11 @@ void FieldExpr::Assign(Frame* f, Val* v, Opcode opcode)
}
}
void FieldExpr::Delete(Frame* f)
{
Assign(f, 0, OP_ASSIGN_IDX);
}
Val* FieldExpr::Fold(Val* v) const
{
Val* result = v->AsRecordVal()->Lookup(field);
@ -3541,9 +3544,9 @@ Val* VectorConstructorExpr::Eval(Frame* f) const
{
Expr* e = exprs[i];
Val* v = e->Eval(f);
if ( ! vec->Assign(i + VECTOR_MIN, v, e) )
if ( ! vec->Assign(i, v, e) )
{
Error(fmt("type mismatch at index %d", i + VECTOR_MIN), e);
Error(fmt("type mismatch at index %d", i), e);
return 0;
}
}
@ -3565,9 +3568,9 @@ Val* VectorConstructorExpr::InitVal(const BroType* t, Val* aggr) const
Expr* e = exprs[i];
Val* v = check_and_promote(e->Eval(0), vt, 1);
if ( ! v || ! vec->Assign(i + VECTOR_MIN, v, e) )
if ( ! v || ! vec->Assign(i, v, e) )
{
Error(fmt("initialization type mismatch at index %d", i + VECTOR_MIN), e);
Error(fmt("initialization type mismatch at index %d", i), e);
return 0;
}
}
@ -3926,7 +3929,7 @@ Val* ArithCoerceExpr::Fold(Val* v) const
VectorVal* vv = v->AsVectorVal();
VectorVal* result = new VectorVal(Type()->AsVectorType());
for ( unsigned int i = VECTOR_MIN; i < vv->Size() + VECTOR_MIN; ++i )
for ( unsigned int i = 0; i < vv->Size(); ++i )
{
Val* elt = vv->Lookup(i);
if ( elt )
@ -5043,9 +5046,9 @@ Val* ListExpr::InitVal(const BroType* t, Val* aggr) const
{
Expr* e = exprs[i];
Val* v = e->Eval(0);
if ( ! vec->Assign(i + VECTOR_MIN, v, e) )
if ( ! vec->Assign(i, v, e) )
{
e->Error(fmt("type mismatch at index %d", i + VECTOR_MIN));
e->Error(fmt("type mismatch at index %d", i));
return 0;
}

View file

@ -688,8 +688,11 @@ public:
int Field() const { return field; }
int CanDel() const;
Expr* Simplify(SimplifyType simp_type);
void Assign(Frame* f, Val* v, Opcode op = OP_ASSIGN);
void Delete(Frame* f);
Expr* MakeLvalue();

View file

@ -735,7 +735,7 @@ int BroFile::Write(const char* data, int len)
while ( len )
{
int outl;
int inl = min(MIN_BUFFER_SIZE, len);
int inl = min(+MIN_BUFFER_SIZE, len);
if ( ! EVP_SealUpdate(cipher_ctx, cipher_buffer, &outl,
(unsigned char*)data, inl) )

View file

@ -238,7 +238,7 @@ void Gnutella_Analyzer::SendEvents(GnutellaMsgState* p, bool is_orig)
vl->append(new StringVal(p->payload));
vl->append(new Val(p->payload_len, TYPE_COUNT));
vl->append(new Val((p->payload_len <
min(p->msg_len, GNUTELLA_MAX_PAYLOAD)),
min(p->msg_len, (unsigned int)GNUTELLA_MAX_PAYLOAD)),
TYPE_BOOL));
vl->append(new Val((p->payload_left == 0), TYPE_BOOL));

View file

@ -29,7 +29,7 @@ public:
u_char msg_type;
u_char msg_ttl;
char payload[GNUTELLA_MAX_PAYLOAD];
int payload_len;
unsigned int payload_len;
unsigned int payload_left;
};

View file

@ -226,11 +226,11 @@ void HTTP_Entity::DeliverBodyClear(int len, const char* data, int trailing_CRLF)
// Returns 1 if the undelivered bytes are completely within the body,
// otherwise returns 0.
int HTTP_Entity::Undelivered(int len)
int HTTP_Entity::Undelivered(int64_t len)
{
if ( DEBUG_http )
{
DEBUG_MSG("Content gap %d, expect_data_length %d\n",
DEBUG_MSG("Content gap %" PRId64", expect_data_length %" PRId64 "\n",
len, expect_data_length);
}
@ -283,7 +283,7 @@ void HTTP_Entity::SubmitData(int len, const char* buf)
MIME_Entity::SubmitData(len, buf);
}
void HTTP_Entity::SetPlainDelivery(int length)
void HTTP_Entity::SetPlainDelivery(int64_t length)
{
ASSERT(length >= 0);
ASSERT(length == 0 || ! in_header);
@ -302,7 +302,7 @@ void HTTP_Entity::SubmitHeader(MIME_Header* h)
data_chunk_t vt = h->get_value_token();
if ( ! is_null_data_chunk(vt) )
{
int n;
int64_t n;
if ( atoi_n(vt.length, vt.data, 0, 10, n) )
content_length = n;
else
@ -409,7 +409,7 @@ void HTTP_Entity::SubmitAllHeaders()
HTTP_Message::HTTP_Message(HTTP_Analyzer* arg_analyzer,
ContentLine_Analyzer* arg_cl, bool arg_is_orig,
int expect_body, int init_header_length)
int expect_body, int64_t init_header_length)
: MIME_Message (arg_analyzer)
{
analyzer = arg_analyzer;
@ -477,7 +477,7 @@ void HTTP_Message::Done(const int interrupted, const char* detail)
}
}
int HTTP_Message::Undelivered(int len)
int HTTP_Message::Undelivered(int64_t len)
{
if ( ! top_level )
return 0;
@ -628,7 +628,7 @@ void HTTP_Message::SubmitEvent(int event_type, const char* detail)
MyHTTP_Analyzer()->HTTP_Event(category, detail);
}
void HTTP_Message::SetPlainDelivery(int length)
void HTTP_Message::SetPlainDelivery(int64_t length)
{
content_line->SetPlainDelivery(length);
@ -701,7 +701,7 @@ void HTTP_Message::DeliverEntityData()
total_buffer_size = 0;
}
int HTTP_Message::InitBuffer(int length)
int HTTP_Message::InitBuffer(int64_t length)
{
if ( length <= 0 )
return 0;
@ -1633,7 +1633,7 @@ void HTTP_Analyzer::HTTP_MessageDone(int is_orig, HTTP_Message* /* message */)
}
void HTTP_Analyzer::InitHTTPMessage(ContentLine_Analyzer* cl, HTTP_Message*& message,
bool is_orig, int expect_body, int init_header_length)
bool is_orig, int expect_body, int64_t init_header_length)
{
if ( message )
{

View file

@ -39,9 +39,9 @@ public:
void EndOfData();
void Deliver(int len, const char* data, int trailing_CRLF);
int Undelivered(int len);
int BodyLength() const { return body_length; }
int HeaderLength() const { return header_length; }
int Undelivered(int64_t len);
int64_t BodyLength() const { return body_length; }
int64_t HeaderLength() const { return header_length; }
void SkipBody() { deliver_body = 0; }
protected:
@ -50,11 +50,11 @@ protected:
HTTP_Message* http_message;
int chunked_transfer_state;
int content_length;
int expect_data_length;
int64_t content_length;
int64_t expect_data_length;
int expect_body;
int body_length;
int header_length;
int64_t body_length;
int64_t header_length;
int deliver_body;
enum { IDENTITY, GZIP, COMPRESS, DEFLATE } encoding;
#ifdef HAVE_LIBZ
@ -68,7 +68,7 @@ protected:
void SubmitData(int len, const char* buf);
void SetPlainDelivery(int length);
void SetPlainDelivery(int64_t length);
void SubmitHeader(MIME_Header* h);
void SubmitAllHeaders();
@ -94,12 +94,12 @@ enum {
class HTTP_Message : public MIME_Message {
public:
HTTP_Message(HTTP_Analyzer* analyzer, ContentLine_Analyzer* cl,
bool is_orig, int expect_body, int init_header_length);
bool is_orig, int expect_body, int64_t init_header_length);
~HTTP_Message();
void Done(const int interrupted, const char* msg);
void Done() { Done(0, "message ends normally"); }
int Undelivered(int len);
int Undelivered(int64_t len);
void BeginEntity(MIME_Entity* /* entity */);
void EndEntity(MIME_Entity* entity);
@ -111,7 +111,7 @@ public:
void SubmitEvent(int event_type, const char* detail);
void SubmitTrailingHeaders(MIME_HeaderList& /* hlist */);
void SetPlainDelivery(int length);
void SetPlainDelivery(int64_t length);
void SkipEntityData();
HTTP_Analyzer* MyHTTP_Analyzer() const
@ -135,16 +135,16 @@ protected:
double start_time;
int body_length; // total length of entity bodies
int header_length; // total length of headers, including the request/reply line
int64_t body_length; // total length of entity bodies
int64_t header_length; // total length of headers, including the request/reply line
// Total length of content gaps that are "successfully" skipped.
// Note: this might NOT include all content gaps!
int content_gap_length;
int64_t content_gap_length;
HTTP_Entity* current_entity;
int InitBuffer(int length);
int InitBuffer(int64_t length);
void DeliverEntityData();
Val* BuildMessageStat(const int interrupted, const char* msg);
@ -191,7 +191,7 @@ protected:
int HTTP_ReplyLine(const char* line, const char* end_of_line);
void InitHTTPMessage(ContentLine_Analyzer* cl, HTTP_Message*& message, bool is_orig,
int expect_body, int init_header_length);
int expect_body, int64_t init_header_length);
const char* PrefixMatch(const char* line, const char* end_of_line,
const char* prefix);

View file

@ -11,7 +11,7 @@
#define UHASH_KEY_SIZE 32
typedef unsigned int hash_t;
typedef uint64 hash_t;
typedef enum {
HASH_KEY_INT,

View file

@ -79,6 +79,9 @@ void ICMP_Analyzer::DeliverPacket(int arg_len, const u_char* data,
NextICMP(current_timestamp, icmpp, len, caplen, data);
if ( caplen >= len )
ForwardPacket(len, data, is_orig, seq, ip, caplen);
if ( rule_matcher )
matcher_state.Match(Rule::PAYLOAD, data, len, is_orig,
false, false, true);
@ -252,6 +255,20 @@ void ICMP_Analyzer::Describe(ODesc* d) const
d->Add(dotted_addr(Conn()->RespAddr()));
}
void ICMP_Analyzer::UpdateConnVal(RecordVal *conn_val)
{
int orig_endp_idx = connection_type->FieldOffset("orig");
int resp_endp_idx = connection_type->FieldOffset("resp");
RecordVal *orig_endp = conn_val->Lookup(orig_endp_idx)->AsRecordVal();
RecordVal *resp_endp = conn_val->Lookup(resp_endp_idx)->AsRecordVal();
UpdateEndpointVal(orig_endp, 1);
UpdateEndpointVal(resp_endp, 0);
// Call children's UpdateConnVal
Analyzer::UpdateConnVal(conn_val);
}
void ICMP_Analyzer::UpdateEndpointVal(RecordVal* endp, int is_orig)
{
Conn()->EnableStatusUpdateTimer();
@ -304,6 +321,24 @@ void ICMP_Echo_Analyzer::NextICMP(double t, const struct icmp* icmpp, int len,
ConnectionEvent(f, vl);
}
ICMP_Redir_Analyzer::ICMP_Redir_Analyzer(Connection* c)
: ICMP_Analyzer(AnalyzerTag::ICMP_Redir, c)
{
}
void ICMP_Redir_Analyzer::NextICMP(double t, const struct icmp* icmpp, int len,
int caplen, const u_char*& data)
{
uint32 addr = ntohl(icmpp->icmp_hun.ih_void);
val_list* vl = new val_list;
vl->append(BuildConnVal());
vl->append(BuildICMPVal());
vl->append(new AddrVal(htonl(addr)));
ConnectionEvent(icmp_redirect, vl);
}
void ICMP_Context_Analyzer::NextICMP(double t, const struct icmp* icmpp,
int len, int caplen, const u_char*& data)

View file

@ -18,6 +18,8 @@ class ICMP_Analyzer : public TransportLayerAnalyzer {
public:
ICMP_Analyzer(Connection* conn);
virtual void UpdateConnVal(RecordVal *conn_val);
static Analyzer* InstantiateAnalyzer(Connection* conn)
{ return new ICMP_Analyzer(conn); }
@ -30,7 +32,6 @@ protected:
virtual void Done();
virtual void DeliverPacket(int len, const u_char* data, bool orig,
int seq, const IP_Hdr* ip, int caplen);
virtual void UpdateEndpointVal(RecordVal* endp, int is_orig);
virtual bool IsReuse(double t, const u_char* pkt);
virtual unsigned int MemoryAllocation() const;
@ -52,6 +53,9 @@ protected:
int request_len, reply_len;
RuleMatcherState matcher_state;
private:
void UpdateEndpointVal(RecordVal* endp, int is_orig);
};
class ICMP_Echo_Analyzer : public ICMP_Analyzer {
@ -70,6 +74,22 @@ protected:
int len, int caplen, const u_char*& data);
};
class ICMP_Redir_Analyzer : public ICMP_Analyzer {
public:
ICMP_Redir_Analyzer(Connection* conn);
static Analyzer* InstantiateAnalyzer(Connection* conn)
{ return new ICMP_Redir_Analyzer(conn); }
static bool Available() { return icmp_redirect; }
protected:
ICMP_Redir_Analyzer() { }
virtual void NextICMP(double t, const struct icmp* icmpp,
int len, int caplen, const u_char*& data);
};
class ICMP_Context_Analyzer : public ICMP_Analyzer {
public:
ICMP_Context_Analyzer(AnalyzerTag::Tag tag, Connection* conn)

View file

@ -1,5 +1,7 @@
// See the file "COPYING" in the main distribution directory for copyright.
#include <algorithm>
#include "LogMgr.h"
#include "Event.h"
#include "EventHandler.h"
@ -1051,7 +1053,7 @@ LogVal* LogMgr::ValToLogVal(Val* val, BroType* ty)
for ( int i = 0; i < lval->val.vector_val.size; i++ )
{
lval->val.vector_val.vals[i] =
ValToLogVal(vec->Lookup(VECTOR_MIN + i),
ValToLogVal(vec->Lookup(i),
vec->Type()->YieldType());
}

View file

@ -51,7 +51,6 @@ int reading_live = 0;
int reading_traces = 0;
int have_pending_timers = 0;
double pseudo_realtime = 0.0;
char* user_pcap_filter = 0;
bool using_communication = false;
double network_time = 0.0; // time according to last packet timestamp

View file

@ -58,9 +58,6 @@ extern int have_pending_timers;
// is the speedup (1 = real-time, 0.5 = half real-time, etc.).
extern double pseudo_realtime;
// Pcap filter supplied by the user on the command line (if any).
extern char* user_pcap_filter;
// When we started processing the current packet and corresponding event
// queue.
extern double processing_start_time;

View file

@ -259,6 +259,8 @@ int record_all_packets;
RecordType* script_id;
TableType* id_table;
StringVal* cmd_line_bpf_filter;
#include "const.bif.netvar_def"
#include "types.bif.netvar_def"
#include "event.bif.netvar_def"
@ -312,6 +314,9 @@ void init_general_global_var()
trace_output_file = internal_val("trace_output_file")->AsStringVal();
record_all_packets = opt_internal_int("record_all_packets");
cmd_line_bpf_filter =
internal_val("cmd_line_bpf_filter")->AsStringVal();
}
void init_net_var()

View file

@ -262,6 +262,8 @@ extern int record_all_packets;
extern RecordType* script_id;
extern TableType* id_table;
extern StringVal* cmd_line_bpf_filter;
// Initializes globals that don't pertain to network/event analysis.
extern void init_general_global_var();

View file

@ -576,9 +576,11 @@ void POP3_Analyzer::ProcessReply(int length, const char* line)
if ( multiLine == true )
{
bool terminator =
length > 1 && line[0] == '.' &&
line[0] == '.' &&
(length == 1 ||
(length > 1 &&
(line[1] == '\n' ||
(length > 2 && line[1] == '\r' && line[2] == '\n'));
(length > 2 && line[1] == '\r' && line[2] == '\n'))));
if ( terminator )
{

View file

@ -181,16 +181,98 @@ void PktSrc::Process()
current_timestamp = next_timestamp;
int pkt_hdr_size = hdr_size;
// Unfortunately some packets on the link might have MPLS labels
// while others don't. That means we need to ask the link-layer if
// labels are in place.
bool have_mpls = false;
int protocol = 0;
switch ( datalink ) {
case DLT_NULL:
{
protocol = (data[3] << 24) + (data[2] << 16) + (data[1] << 8) + data[0];
if ( protocol != AF_INET && protocol != AF_INET6 )
{
sessions->Weird("non_ip_packet_in_null_transport", &hdr, data);
data = 0;
return;
}
break;
}
case DLT_EN10MB:
{
// Get protocol being carried from the ethernet frame.
protocol = (data[12] << 8) + data[13];
// MPLS carried over the ethernet frame.
if ( protocol == 0x8847 )
have_mpls = true;
// VLAN carried over ethernet frame.
else if ( protocol == 0x8100 )
{
data += get_link_header_size(datalink);
data += 4; // Skip the vlan header
pkt_hdr_size = 0;
}
break;
}
case DLT_PPP_SERIAL:
{
// Get PPP protocol.
protocol = (data[2] << 8) + data[3];
if ( protocol == 0x0281 )
// MPLS Unicast
have_mpls = true;
else if ( protocol != 0x0021 && protocol != 0x0057 )
{
// Neither IPv4 nor IPv6.
sessions->Weird("non_ip_packet_in_ppp_encapsulation", &hdr, data);
data = 0;
return;
}
break;
}
}
if ( have_mpls )
{
// Remove the data link layer
data += get_link_header_size(datalink);
// Denote a header size of zero before the IP header
pkt_hdr_size = 0;
// Skip the MPLS label stack.
bool end_of_stack = false;
while ( ! end_of_stack )
{
end_of_stack = *(data + 2) & 0x01;
data += 4;
}
}
if ( pseudo_realtime )
{
current_pseudo = CheckPseudoTime();
net_packet_arrival(current_pseudo, &hdr, data, hdr_size, this);
net_packet_arrival(current_pseudo, &hdr, data, pkt_hdr_size, this);
if ( ! first_wallclock )
first_wallclock = current_time(true);
}
else
net_packet_arrival(current_timestamp, &hdr, data, hdr_size, this);
net_packet_arrival(current_timestamp, &hdr, data, pkt_hdr_size, this);
data = 0;
}
@ -399,6 +481,11 @@ PktInterfaceSrc::PktInterfaceSrc(const char* arg_interface, const char* filter,
if ( PrecompileFilter(0, filter) && SetFilter(0) )
{
SetHdrSize();
if ( closed )
// Couldn't get header size.
return;
fprintf(stderr, "listening on %s\n", interface);
}
else
@ -647,6 +734,9 @@ int get_link_header_size(int dl)
return 16;
#endif
case DLT_PPP_SERIAL: // PPP_SERIAL
return 4;
case DLT_RAW:
return 0;
}

View file

@ -173,6 +173,8 @@
#endif
#include <sys/resource.h>
#include <algorithm>
#include "RemoteSerializer.h"
#include "Func.h"
#include "EventRegistry.h"
@ -2394,12 +2396,12 @@ bool RemoteSerializer::SendPrintHookEvent(BroFile* f, const char* txt)
if ( ! fname )
continue; // not a managed file.
int len = strlen(txt);
size_t len = strlen(txt);
// We cut off everything after the max buffer size. That
// makes the code a bit easier, and we shouldn't have such
// long lines anyway.
len = min(len, PRINT_BUFFER_SIZE - strlen(fname) - 2);
len = min<size_t>(len, PRINT_BUFFER_SIZE - strlen(fname) - 2);
// If there's not enough space in the buffer, flush it.

View file

@ -21,6 +21,7 @@ void SerializationFormat::StartRead(char* data, uint32 arg_len)
input = data;
input_len = arg_len;
input_pos = 0;
bytes_read = 0;
}
void SerializationFormat::EndRead()
@ -44,7 +45,6 @@ void SerializationFormat::StartWrite()
output_pos = 0;
bytes_written = 0;
bytes_read = 0;
}
uint32 SerializationFormat::EndWrite(char** data)

View file

@ -122,7 +122,7 @@ protected:
// This will be increased whenever there is an incompatible change
// in the data format.
static const uint32 DATA_FORMAT_VERSION = 18;
static const uint32 DATA_FORMAT_VERSION = 19;
ChunkedIO* io;

View file

@ -220,7 +220,7 @@ void NetSessions::DispatchPacket(double t, const struct pcap_pkthdr* hdr,
}
else
// Blanket encapsulation (e.g., for VLAN).
// Blanket encapsulation
hdr_size += encap_hdr_size;
}

View file

@ -2,7 +2,7 @@
//
// See the file "COPYING" in the main distribution directory for copyright.
#include "NetVar.h"
#include "PIA.h"
#include "File.h"
#include "TCP.h"
@ -922,9 +922,6 @@ int TCP_Analyzer::DeliverData(double t, const u_char* data, int len, int caplen,
int need_contents = endpoint->DataSent(t, data_seq,
len, caplen, data, ip, tp);
LOOP_OVER_GIVEN_CHILDREN(i, packet_children)
(*i)->NextPacket(len, data, is_orig, data_seq, ip, caplen);
return need_contents;
}
@ -1053,6 +1050,12 @@ void TCP_Analyzer::DeliverPacket(int len, const u_char* data, bool is_orig,
CheckRecording(need_contents, flags);
// Handle child_packet analyzers. Note: This happens *after* the
// packet has been processed and the TCP state updated.
LOOP_OVER_GIVEN_CHILDREN(i, packet_children)
(*i)->NextPacket(len, data, is_orig,
base_seq - endpoint->StartSeq(), ip, caplen);
if ( ! reassembling )
ForwardPacket(len, data, is_orig,
base_seq - endpoint->StartSeq(), ip, caplen);
@ -1082,11 +1085,25 @@ void TCP_Analyzer::FlipRoles()
resp->is_orig = !resp->is_orig;
}
void TCP_Analyzer::UpdateEndpointVal(RecordVal* endp, int is_orig)
void TCP_Analyzer::UpdateConnVal(RecordVal *conn_val)
{
TCP_Endpoint* s = is_orig ? orig : resp;
endp->Assign(0, new Val(s->Size(), TYPE_COUNT));
endp->Assign(1, new Val(int(s->state), TYPE_COUNT));
int orig_endp_idx = connection_type->FieldOffset("orig");
int resp_endp_idx = connection_type->FieldOffset("resp");
RecordVal *orig_endp_val = conn_val->Lookup(orig_endp_idx)->AsRecordVal();
RecordVal *resp_endp_val = conn_val->Lookup(resp_endp_idx)->AsRecordVal();
orig_endp_val->Assign(0, new Val(orig->Size(), TYPE_COUNT));
orig_endp_val->Assign(1, new Val(int(orig->state), TYPE_COUNT));
resp_endp_val->Assign(0, new Val(resp->Size(), TYPE_COUNT));
resp_endp_val->Assign(1, new Val(int(resp->state), TYPE_COUNT));
// Call children's UpdateConnVal
Analyzer::UpdateConnVal(conn_val);
// Have to do packet_children ourselves.
LOOP_OVER_GIVEN_CHILDREN(i, packet_children)
(*i)->UpdateConnVal(conn_val);
}
Val* TCP_Analyzer::BuildSYNPacketVal(int is_orig, const IP_Hdr* ip,

View file

@ -77,6 +77,9 @@ public:
const u_char* option, TCP_Analyzer* analyzer,
bool is_orig, void* cookie);
// From Analyzer.h
virtual void UpdateConnVal(RecordVal *conn_val);
// Needs to be static because it's passed as a pointer-to-function
// rather than pointer-to-member-function.
static int ParseTCPOptions(const struct tcphdr* tcp,
@ -100,7 +103,6 @@ protected:
virtual void DeliverStream(int len, const u_char* data, bool orig);
virtual void Undelivered(int seq, int len, bool orig);
virtual void FlipRoles();
virtual void UpdateEndpointVal(RecordVal* endp, int is_orig);
virtual bool IsReuse(double t, const u_char* pkt);
// Returns the TCP header pointed to by data (which we assume is

View file

@ -848,8 +848,8 @@ void TypeDecl::DescribeReST(ODesc* d) const
}
CommentedTypeDecl::CommentedTypeDecl(BroType* t, const char* i,
attr_list* attrs, std::list<std::string>* cmnt_list)
: TypeDecl(t, i, attrs)
attr_list* attrs, bool in_record, std::list<std::string>* cmnt_list)
: TypeDecl(t, i, attrs, in_record)
{
comments = cmnt_list;
}
@ -1157,6 +1157,7 @@ void RecordType::DescribeFieldsReST(ODesc* d, bool func_args) const
for ( int i = 0; i < num_fields; ++i )
{
if ( i > 0 )
{
if ( func_args )
d->Add(", ");
else
@ -1164,6 +1165,7 @@ void RecordType::DescribeFieldsReST(ODesc* d, bool func_args) const
d->NL();
d->NL();
}
}
FieldDecl(i)->DescribeReST(d);
}

View file

@ -420,7 +420,7 @@ public:
class CommentedTypeDecl : public TypeDecl {
public:
CommentedTypeDecl(BroType* t, const char* i, attr_list* attrs = 0,
std::list<std::string>* cmnt_list = 0);
bool in_record = false, std::list<std::string>* cmnt_list = 0);
virtual ~CommentedTypeDecl();
void DescribeReST(ODesc* d) const;
@ -456,6 +456,8 @@ public:
// Given an offset, returns the field's name.
const char* FieldName(int field) const;
type_decl_list* Types() { return types; }
// Given an offset, returns the field's TypeDecl.
const TypeDecl* FieldDecl(int field) const;
TypeDecl* FieldDecl(int field);

View file

@ -162,6 +162,22 @@ void UDP_Analyzer::DeliverPacket(int len, const u_char* data, bool is_orig,
ForwardPacket(len, data, is_orig, seq, ip, caplen);
}
void UDP_Analyzer::UpdateConnVal(RecordVal *conn_val)
{
int orig_endp_idx = connection_type->FieldOffset("orig");
int resp_endp_idx = connection_type->FieldOffset("resp");
RecordVal *orig_endp = conn_val->Lookup(orig_endp_idx)->AsRecordVal();
RecordVal *resp_endp = conn_val->Lookup(resp_endp_idx)->AsRecordVal();
orig_endp = conn_val->Lookup(orig_endp_idx)->AsRecordVal();
resp_endp = conn_val->Lookup(resp_endp_idx)->AsRecordVal();
UpdateEndpointVal(orig_endp, 1);
UpdateEndpointVal(resp_endp, 0);
// Call children's UpdateConnVal
Analyzer::UpdateConnVal(conn_val);
}
void UDP_Analyzer::UpdateEndpointVal(RecordVal* endp, int is_orig)
{
bro_int_t size = is_orig ? request_len : reply_len;

View file

@ -19,6 +19,8 @@ public:
virtual void Init();
virtual void UpdateConnVal(RecordVal *conn_val);
static Analyzer* InstantiateAnalyzer(Connection* conn)
{ return new UDP_Analyzer(conn); }
@ -28,12 +30,14 @@ protected:
virtual void Done();
virtual void DeliverPacket(int len, const u_char* data, bool orig,
int seq, const IP_Hdr* ip, int caplen);
virtual void UpdateEndpointVal(RecordVal* endp, int is_orig);
virtual bool IsReuse(double t, const u_char* pkt);
virtual unsigned int MemoryAllocation() const;
bro_int_t request_len, reply_len;
private:
void UpdateEndpointVal(RecordVal* endp, int is_orig);
#define HIST_ORIG_DATA_PKT 0x1
#define HIST_RESP_DATA_PKT 0x2
#define HIST_ORIG_CORRUPT_PKT 0x4

View file

@ -2866,7 +2866,7 @@ RecordVal::RecordVal(RecordType* t) : MutableVal(t)
else if ( tag == TYPE_TABLE )
def = new TableVal(type->AsTableType(), a);
else if ( t->Tag() == TYPE_VECTOR )
else if ( tag == TYPE_VECTOR )
def = new VectorVal(type->AsVectorType());
}
@ -2883,7 +2883,7 @@ RecordVal::~RecordVal()
void RecordVal::Assign(int field, Val* new_val, Opcode op)
{
if ( Lookup(field) &&
if ( new_val && Lookup(field) &&
record_type->FieldType(field)->Tag() == TYPE_TABLE &&
new_val->AsTableVal()->FindAttr(ATTR_MERGEABLE) )
{
@ -2976,6 +2976,7 @@ RecordVal* RecordVal::CoerceTo(const RecordType* t, Val* aggr) const
Expr* rhs = new ConstExpr(Lookup(i)->Ref());
Expr* e = new RecordCoerceExpr(rhs, ar_t->FieldType(t_i)->AsRecordType());
ar->Assign(t_i, e->Eval(0));
continue;
}
ar->Assign(t_i, Lookup(i)->Ref());
@ -3230,15 +3231,6 @@ bool VectorVal::Assign(unsigned int index, Val* element, const Expr* assigner,
return false;
}
if ( index == 0 || index > (1 << 30) )
{
if ( assigner )
assigner->Error(fmt("index (%d) must be positive",
index));
Unref(element);
return true; // true = "no fatal error"
}
BroType* yt = Type()->AsVectorType()->YieldType();
if ( yt && yt->Tag() == TYPE_TABLE &&
@ -3253,7 +3245,7 @@ bool VectorVal::Assign(unsigned int index, Val* element, const Expr* assigner,
Val* ival = new Val(index, TYPE_COUNT);
StateAccess::Log(new StateAccess(OP_ASSIGN_IDX,
this, ival, element,
(*val.vector_val)[index - 1]));
(*val.vector_val)[index]));
Unref(ival);
}
@ -3263,10 +3255,10 @@ bool VectorVal::Assign(unsigned int index, Val* element, const Expr* assigner,
}
}
if ( index <= val.vector_val->size() )
Unref((*val.vector_val)[index - 1]);
if ( index < val.vector_val->size() )
Unref((*val.vector_val)[index]);
else
val.vector_val->resize(index);
val.vector_val->resize(index + 1);
if ( LoggingAccess() && op != OP_NONE )
{
@ -3277,14 +3269,14 @@ bool VectorVal::Assign(unsigned int index, Val* element, const Expr* assigner,
StateAccess::Log(new StateAccess(op == OP_INCR ?
OP_INCR_IDX : OP_ASSIGN_IDX,
this, ival, element, (*val.vector_val)[index - 1]));
this, ival, element, (*val.vector_val)[index]));
Unref(ival);
}
// Note: we do *not* Ref() the element, if any, at this point.
// AssignExpr::Eval() already does this; other callers must remember
// to do it similarly.
(*val.vector_val)[index - 1] = element;
(*val.vector_val)[index] = element;
Modified();
return true;
@ -3293,7 +3285,7 @@ bool VectorVal::Assign(unsigned int index, Val* element, const Expr* assigner,
bool VectorVal::AssignRepeat(unsigned int index, unsigned int how_many,
Val* element, const Expr* assigner)
{
ResizeAtLeast(index + how_many - 1);
ResizeAtLeast(index + how_many);
for ( unsigned int i = index; i < index + how_many; ++i )
if ( ! Assign(i, element, assigner) )
@ -3305,10 +3297,10 @@ bool VectorVal::AssignRepeat(unsigned int index, unsigned int how_many,
Val* VectorVal::Lookup(unsigned int index) const
{
if ( index == 0 || index > val.vector_val->size() )
if ( index >= val.vector_val->size() )
return 0;
return (*val.vector_val)[index - 1];
return (*val.vector_val)[index];
}
unsigned int VectorVal::Resize(unsigned int new_num_elements)
@ -3397,7 +3389,7 @@ bool VectorVal::DoUnserialize(UnserialInfo* info)
{
Val* v;
UNSERIALIZE_OPTIONAL(v, Val::Unserialize(info, TYPE_ANY));
Assign(i + VECTOR_MIN, v, 0);
Assign(i, v, 0);
}
return true;

View file

@ -969,9 +969,6 @@ protected:
};
// The minimum index for vectors (0 or 1).
const int VECTOR_MIN = 1;
class VectorVal : public MutableVal {
public:
VectorVal(VectorType* t);

View file

@ -242,6 +242,26 @@ void add_type(ID* id, BroType* t, attr_list* attr, int /* is_event */)
// t->GetTypeID() is true.
if ( generate_documentation )
{
switch ( t->Tag() ) {
// Only "shallow" copy types that may contain records because
// we want to be able to see additions to the original record type's
// list of fields
case TYPE_RECORD:
tnew = new RecordType(t->AsRecordType()->Types());
break;
case TYPE_TABLE:
tnew = new TableType(t->AsTableType()->Indices(),
t->AsTableType()->YieldType());
break;
case TYPE_VECTOR:
tnew = new VectorType(t->AsVectorType()->YieldType());
break;
case TYPE_FUNC:
tnew = new FuncType(t->AsFuncType()->Args(),
t->AsFuncType()->YieldType(),
t->AsFuncType()->IsEvent());
break;
default:
SerializationFormat* form = new BinarySerializationFormat();
form->StartWrite();
CloneSerializer ss(form);
@ -258,6 +278,7 @@ void add_type(ID* id, BroType* t, attr_list* attr, int /* is_event */)
tnew = t->Unserialize(&uinfo);
delete [] data;
}
tnew->SetTypeID(copy_string(id->Name()));
}

View file

@ -1979,9 +1979,14 @@ function precompile_pcap_filter%(id: PcapFilterID, s: string%): bool
# Install precompiled pcap filter.
function install_pcap_filter%(id: PcapFilterID%): bool
%{
ID* user_filter = global_scope()->Lookup("cmd_line_bpf_filter");
if ( ! user_filter )
internal_error("global cmd_line_bpf_filter not defined");
if ( user_filter->ID_Val()->AsStringVal()->Len() )
// Don't allow the script-level to change the filter when
// the user has specified one on the command line.
if ( user_pcap_filter )
return new Val(0, TYPE_BOOL);
bool success = true;
@ -2390,7 +2395,7 @@ function any_set%(v: any%) : bool
}
VectorVal* vv = v->AsVectorVal();
for ( unsigned int i = VECTOR_MIN; i < vv->Size() + VECTOR_MIN; ++i )
for ( unsigned int i = 0; i < vv->Size(); ++i )
if ( vv->Lookup(i) && vv->Lookup(i)->AsBool() )
return new Val(true, TYPE_BOOL);
@ -2408,7 +2413,7 @@ function all_set%(v: any%) : bool
}
VectorVal* vv = v->AsVectorVal();
for ( unsigned int i = VECTOR_MIN; i < vv->Size() + VECTOR_MIN; ++i )
for ( unsigned int i = 0; i < vv->Size(); ++i )
if ( ! vv->Lookup(i) || ! vv->Lookup(i)->AsBool() )
return new Val(false, TYPE_BOOL);
@ -2591,8 +2596,8 @@ function order%(v: any, ...%) : index_vec
// adjusting indices as we do so.
for ( i = 0; i < n; ++i )
{
int ind = ind_vv[i] + VECTOR_MIN;
result_v->Assign(i + VECTOR_MIN, new Val(ind, TYPE_COUNT), 0);
int ind = ind_vv[i];
result_v->Assign(i, new Val(ind, TYPE_COUNT), 0);
}
return result_v;
@ -3101,7 +3106,13 @@ function lookup_location%(a: addr%) : geo_location
}
#else
static int missing_geoip_reported = 0;
if ( ! missing_geoip_reported )
{
builtin_run_time("Bro was not configured for GeoIP support");
missing_geoip_reported = 1;
}
#endif
// We can get here even if we have GeoIP support if we weren't
@ -3159,7 +3170,13 @@ function lookup_asn%(a: addr%) : count
return new Val(atoi(gir+2), TYPE_COUNT);
}
#else
static int missing_geoip_reported = 0;
if ( ! missing_geoip_reported )
{
builtin_run_time("Bro was not configured for GeoIP ASN support");
missing_geoip_reported = 1;
}
#endif
// We can get here even if we have GeoIP support, if we weren't
@ -3323,6 +3340,15 @@ function entropy_test_finish%(index: any%): entropy_test_result
return ent_result;
%}
function bro_has_ipv6%(%) : bool
%{
#ifdef BROv6
return new Val(1, TYPE_BOOL);
#else
return new Val(0, TYPE_BOOL);
#endif
%}
%%{
#include <openssl/x509.h>

View file

@ -319,8 +319,8 @@ definitions: definitions definition opt_ws
fprintf(fp_netvar_h, "// %s\n\n", auto_gen_comment);
fprintf(fp_netvar_init, "// %s\n\n", auto_gen_comment);
fprintf(fp_bro_init, "%s", $1);
fprintf(fp_bro_init, "export {\n");
fprintf(fp_func_def, "%s", $1);
}
;

View file

@ -5,3 +5,5 @@
const ignore_keep_alive_rexmit: bool;
const skip_http_data: bool;
const parse_udp_tunnels: bool;
const use_conn_size_analyzer: bool;

View file

@ -52,6 +52,7 @@ event icmp_echo_request%(c: connection, icmp: icmp_conn, id: count, seq: count,
event icmp_echo_reply%(c: connection, icmp: icmp_conn, id: count, seq: count, payload: string%);
event icmp_unreachable%(c: connection, icmp: icmp_conn, code: count, context: icmp_context%);
event icmp_time_exceeded%(c: connection, icmp: icmp_conn, code: count, context: icmp_context%);
event icmp_redirect%(c: connection, icmp: icmp_conn, a: addr%);
event net_stats_update%(t: time, ns: net_stats%);
event conn_stats%(c: connection, os: endpoint_stats, rs: endpoint_stats%);
event conn_weird%(name: string, c: connection%);

View file

@ -9,6 +9,7 @@
#include <unistd.h>
#include <signal.h>
#include <string.h>
#include <list>
#ifdef HAVE_GETOPT_H
#include <getopt.h>
#endif
@ -47,6 +48,7 @@ extern "C" void OPENSSL_add_all_algorithms_conf(void);
#include "Stats.h"
#include "ConnCompressor.h"
#include "DPM.h"
#include "BroDoc.h"
#include "binpac_bro.h"
@ -103,6 +105,8 @@ char* proc_status_file = 0;
int FLAGS_use_binpac = false;
extern std::list<BroDoc*> docs_generated;
// Keep copy of command line
int bro_argc;
char** bro_argv;
@ -349,6 +353,7 @@ int main(int argc, char** argv)
char* events_file = 0;
char* seed_load_file = getenv("BRO_SEED_FILE");
char* seed_save_file = 0;
char* user_pcap_filter = 0;
int seed = 0;
int dump_cfg = false;
int to_xml = 0;
@ -743,6 +748,16 @@ int main(int argc, char** argv)
init_general_global_var();
if ( user_pcap_filter )
{
ID* id = global_scope()->Lookup("cmd_line_bpf_filter");
if ( ! id )
internal_error("global cmd_line_bpf_filter not defined");
id->SetVal(new StringVal(user_pcap_filter));
}
// Parse rule files defined on the script level.
char* script_rule_files =
copy_string(internal_val("signature_files")->AsString()->CheckString());
@ -800,8 +815,7 @@ int main(int argc, char** argv)
if ( dns_type != DNS_PRIME )
net_init(interfaces, read_files, netflows, flow_files,
writefile,
user_pcap_filter ? user_pcap_filter : "tcp or udp",
writefile, "tcp or udp or icmp",
secondary_path->Filter(), do_watchdog);
if ( ! reading_traces )
@ -961,6 +975,20 @@ int main(int argc, char** argv)
mgr.Drain();
if ( generate_documentation )
{
std::list<BroDoc*>::iterator it;
for ( it = docs_generated.begin(); it != docs_generated.end(); ++it )
(*it)->WriteDocFile();
for ( it = docs_generated.begin(); it != docs_generated.end(); ++it )
delete *it;
terminate_bro();
return 0;
}
have_pending_timers = ! reading_traces && timer_mgr->Size() > 0;
if ( io_sources.Size() > 0 || have_pending_timers )

View file

@ -936,6 +936,7 @@ type_decl:
if ( generate_documentation )
{
// TypeDecl ctor deletes the attr list, so make a copy
attr_list* a = $5;
attr_list* a_copy = 0;
@ -947,7 +948,7 @@ type_decl:
}
last_fake_type_decl = new CommentedTypeDecl(
$4, $2, a_copy, concat_opt_docs($1, $7));
$4, $2, a_copy, (in_record > 0), concat_opt_docs($1, $7));
}
$$ = new TypeDecl($4, $2, $5, (in_record > 0));
@ -1067,8 +1068,10 @@ decl:
}
| TOK_REDEF TOK_RECORD global_id TOK_ADD_TO
'{' type_decl_list '}' opt_attr ';'
'{' { do_doc_token_start(); } type_decl_list '}' opt_attr ';'
{
do_doc_token_stop();
if ( ! $3->Type() )
$3->Error("unknown identifier");
else
@ -1078,9 +1081,29 @@ decl:
$3->Error("not a record type");
else
{
const char* error = add_to->AddFields($6, $8);
const char* error = add_to->AddFields($7, $9);
if ( error )
$3->Error(error);
else if ( generate_documentation )
{
if ( fake_type_decl_list )
{
BroType* fake_record =
new RecordType(fake_type_decl_list);
ID* fake = create_dummy_id($3, fake_record);
fake_type_decl_list = 0;
BroDocObj* o =
new BroDocObj(fake, reST_doc_comments, true);
o->SetRole(true);
current_reST_doc->AddRedef(o);
}
else
{
fprintf(stderr, "Warning: doc mode did not process "
"record extension for '%s', CommentedTypeDecl"
"list unavailable.\n", $3->Name());
}
}
}
}
}
@ -1622,7 +1645,7 @@ opt_doc_list:
int yyerror(const char msg[])
{
char* msgbuf = new char[strlen(msg) + strlen(last_tok) + 64];
char* msgbuf = new char[strlen(msg) + strlen(last_tok) + 128];
if ( last_tok[0] == '\n' )
sprintf(msgbuf, "%s, on previous line", msg);
@ -1631,6 +1654,10 @@ int yyerror(const char msg[])
else
sprintf(msgbuf, "%s, at or near \"%s\"", msg, last_tok);
if ( generate_documentation )
strcat(msgbuf, "\nDocumentation mode is enabled: "
"remember to check syntax of ## style comments\n");
error(msgbuf);
return 0;

View file

@ -16,7 +16,6 @@
#include "PolicyFile.h"
#include "broparse.h"
#include "BroDoc.h"
#include "BroBifDoc.h"
#include "Analyzer.h"
#include "AnalyzerTags.h"
@ -58,7 +57,7 @@ char last_tok[128];
static PList(char) files_scanned;
// reST documents that we've created (or have at least opened so far).
static std::list<BroDoc*> docs_generated;
std::list<BroDoc*> docs_generated;
// reST comments (those starting with ##) seen so far.
std::list<std::string>* reST_doc_comments = 0;
@ -611,17 +610,8 @@ static int load_files_with_prefix(const char* orig_file)
if ( generate_documentation )
{
const char* bifExtStart = strstr(full_filename, ".bif.bro");
BroDoc* reST_doc;
if ( bifExtStart )
reST_doc = new BroBifDoc(full_filename);
else
reST_doc = new BroDoc(full_filename);
docs_generated.push_back(reST_doc);
current_reST_doc = reST_doc;
current_reST_doc = new BroDoc(full_filename);
docs_generated.push_back(current_reST_doc);
}
}
@ -877,18 +867,7 @@ int yywrap()
}
if ( generate_documentation )
{
std::list<BroDoc*>::iterator it;
for ( it = docs_generated.begin(); it != docs_generated.end(); ++it )
{
(*it)->WriteDocFile();
delete *it;
}
docs_generated.clear();
clear_reST_doc_comments();
}
// Otherwise, we are done.
return 1;

View file

@ -244,8 +244,11 @@ Val* do_split(StringVal* str_val, RE_Matcher* re, TableVal* other_sep,
--n;
}
if ( num_sep >= max_num_sep )
if ( max_num_sep && num_sep >= max_num_sep )
{
offset = end_of_s - s;
n=0;
}
Val* ind = new Val(++num, TYPE_COUNT);
a->Assign(ind, new StringVal(offset, (const char*) s));
@ -627,7 +630,7 @@ function strip%(str: string%): string
function string_fill%(len: int, source: string%): string
%{
const u_char* src = source->Bytes();
int n = source->Len();
int64_t n = source->Len();
char* dst = new char[len];
for ( int i = 0; i < len; i += n )

View file

@ -295,9 +295,9 @@ char* strcasestr(const char* s, const char* find)
}
#endif
int atoi_n(int len, const char* s, const char** end, int base, int& result)
template<class T> int atoi_n(int len, const char* s, const char** end, int base, T& result)
{
int n = 0;
T n = 0;
int neg = 0;
if ( len > 0 && *s == '-' )
@ -340,6 +340,32 @@ int atoi_n(int len, const char* s, const char** end, int base, int& result)
return 1;
}
// Instantiate the ones we need.
template int atoi_n<int>(int len, const char* s, const char** end, int base, int& result);
template int atoi_n<int64_t>(int len, const char* s, const char** end, int base, int64_t& result);
char* uitoa_n(uint64 value, char* str, int n, int base)
{
static char dig[] = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ";
int i = 0;
uint64 v;
char* p, *q;
char c;
v = value;
do {
str[i++] = dig[v % base];
v /= base;
} while ( v && i < n - 1 );
str[i] = '\0';
return str;
}
int strstr_n(const int big_len, const u_char* big,
const int little_len, const u_char* little)
{
@ -661,6 +687,11 @@ void init_random_seed(uint32 seed, const char* read_file, const char* write_file
write_file);
}
bool have_random_seed()
{
return bro_rand_determistic;
}
long int bro_random()
{
if ( ! bro_rand_determistic )

View file

@ -110,8 +110,8 @@ extern int strcasecmp_n(int s_len, const char* s, const char* t);
extern char* strcasestr(const char* s, const char* find);
#endif
extern const char* strpbrk_n(size_t len, const char* s, const char* charset);
extern int atoi_n(int len, const char* s, const char** end,
int base, int& result);
template<class T> int atoi_n(int len, const char* s, const char** end, int base, T& result);
extern char* uitoa_n(uint64 value, char* str, int n, int base);
int strstr_n(const int big_len, const unsigned char* big,
const int little_len, const unsigned char* little);
extern int fputs(int len, const char* s, FILE* fp);
@ -149,6 +149,9 @@ extern const char* md5_digest_print(const unsigned char digest[16]);
extern void init_random_seed(uint32 seed, const char* load_file,
const char* write_file);
// Returns true if the user explicitly set a seed via init_random_seed();
extern bool have_random_seed();
// Replacement for the system random(), to which is normally falls back
// except when a seed has been given. In that case, we use our own
// predictable PRNG.
@ -156,9 +159,6 @@ long int bro_random();
extern uint64 rand64bit();
#define UHASH_KEY_SIZE 32
extern uint8 uhash_key[UHASH_KEY_SIZE];
// Each event source that may generate events gets an internally unique ID.
// This is always LOCAL for a local Bro. For remote event sources, it gets
// assigned by the RemoteSerializer.
@ -233,16 +233,6 @@ extern struct timeval double_to_timeval(double t);
// Return > 0 if tv_a > tv_b, 0 if equal, < 0 if tv_a < tv_b.
extern int time_compare(struct timeval* tv_a, struct timeval* tv_b);
inline int min(int a, int b)
{
return a < b ? a : b;
}
inline int max(int a, int b)
{
return a > b ? a : b;
}
// For now, don't use hash_maps - they're not fully portable.
#if 0
// Use for hash_map's string keys.

View file

@ -0,0 +1,5 @@
1128727430.350788 ? 141.42.64.125 125.190.109.199 other 56729 12345 tcp ? ? S0 X 1 60 0 0 cc=1
1144876538.705610 5.921003 169.229.147.203 239.255.255.253 other 49370 427 udp 147 ? S0 X 3 231 0 0
1144876599.397603 0.815763 192.150.186.169 194.64.249.244 http 53063 80 tcp 377 445 SF X 6 677 5 713
1144876709.032670 9.000191 169.229.147.43 239.255.255.253 other 49370 427 udp 196 ? S0 X 4 308 0 0
1144876697.068273 0.000650 192.150.186.169 192.150.186.15 icmp-unreach 3 3 icmp 56 ? OTH X 2 112 0 0

View file

@ -0,0 +1,5 @@
1128727430.350788 ? 141.42.64.125 125.190.109.199 other 56729 12345 tcp ? ? S0 X 1 60 0 0
1144876538.705610 5.921003 169.229.147.203 239.255.255.253 other 49370 427 udp 147 ? S0 X 3 231 0 0
1144876599.397603 0.815763 192.150.186.169 194.64.249.244 http 53063 80 tcp 377 445 SF X 6 697 5 713
1144876709.032670 9.000191 169.229.147.43 239.255.255.253 other 49370 427 udp 196 ? S0 X 4 308 0 0
1144876697.068273 0.000650 192.150.186.169 192.150.186.15 icmp-unreach 3 3 icmp 56 ? OTH X 2 112 0 0

Some files were not shown because too many files have changed in this diff Show more