diff --git a/doc/file-analysis.rst b/doc/file-analysis.rst index f312e06471..0a96a8efb7 100644 --- a/doc/file-analysis.rst +++ b/doc/file-analysis.rst @@ -82,9 +82,9 @@ attached, they start receiving the contents of the file as Bro extracts it from an ongoing network connection. What they do with the file contents is up to the particular file analyzer implementation, but they'll typically either report further information about the file via -events (e.g. :bro:see:`FileAnalysis::ANALYZER_MD5` will report the +events (e.g. :bro:see:`Files::ANALYZER_MD5` will report the file's MD5 checksum via :bro:see:`file_hash` once calculated) or they'll -have some side effect (e.g. :bro:see:`FileAnalysis::ANALYZER_EXTRACT` +have some side effect (e.g. :bro:see:`Files::ANALYZER_EXTRACT` will write the contents of the file out to the local file system). In the future there may be file analyzers that automatically attach to @@ -98,7 +98,7 @@ explicit attachment decision: { print "new file", f$id; if ( f?$mime_type && f$mime_type == "text/plain" ) - FileAnalysis::add_analyzer(f, [$tag=FileAnalysis::ANALYZER_MD5]); + Files::add_analyzer(f, Files::ANALYZER_MD5); } event file_hash(f: fa_file, kind: string, hash: string) @@ -113,26 +113,27 @@ output:: file_hash, Cx92a0ym5R8, md5, 397168fd09991a0e712254df7bc639ac Some file analyzers might have tunable parameters that need to be -specified in the call to :bro:see:`FileAnalysis::add_analyzer`: +specified in the call to :bro:see:`Files::add_analyzer`: .. code:: bro event file_new(f: fa_file) { - FileAnalysis::add_analyzer(f, [$tag=FileAnalysis::ANALYZER_EXTRACT, - $extract_filename="./myfile"]); + Files::add_analyzer(f, Files::ANALYZER_EXTRACT, + [$extract_filename="myfile"]); } In this case, the file extraction analyzer doesn't generate any further -events, but does have the side effect of writing out the file contents -to the local file system at the specified location of ``./myfile``. Of -course, for a network with more than a single file being transferred, -it's probably preferable to specify a different extraction path for each -file, unlike this example. +events, but does have the effect of writing out the file contents to the +local file system at the location resulting from the concatenation of +the path specified by :bro:see:`FileExtract::prefix` and the string, +``myfile``. Of course, for a network with more than a single file being +transferred, it's probably preferable to specify a different extraction +path for each file, unlike this example. Regardless of which file analyzers end up acting on a file, general information about the file (e.g. size, time of last data transferred, -MIME type, etc.) are logged in ``file_analysis.log``. +MIME type, etc.) are logged in ``files.log``. Input Framework Integration =========================== @@ -150,7 +151,7 @@ a network interface it's monitoring. It only requires a call to event file_new(f: fa_file) { print "new file", f$id; - FileAnalysis::add_analyzer(f, [$tag=FileAnalysis::ANALYZER_MD5]); + Files::add_analyzer(f, Files::ANALYZER_MD5); } event file_state_remove(f: fa_file) diff --git a/doc/index.rst b/doc/index.rst index ad05f7bf82..aa33d8797d 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -47,6 +47,7 @@ Script Reference scripts/index scripts/builtins scripts/proto-analyzers + scripts/file-analyzers Other Bro Components -------------------- diff --git a/doc/scripts/CMakeLists.txt b/doc/scripts/CMakeLists.txt index e7e39d0b3f..fa234e74f2 100644 --- a/doc/scripts/CMakeLists.txt +++ b/doc/scripts/CMakeLists.txt @@ -124,28 +124,34 @@ endmacro(REST_TARGET) # Schedule Bro scripts for which to generate documentation. include(DocSourcesList.cmake) -# This reST target is independent of a particular Bro script... -add_custom_command(OUTPUT proto-analyzers.rst - # delete any leftover state from previous bro runs - COMMAND "${CMAKE_COMMAND}" - ARGS -E remove_directory .state - # generate the reST documentation using bro - COMMAND BROPATH=${BROPATH}:${srcDir} BROMAGIC=${CMAKE_SOURCE_DIR}/magic/database ${CMAKE_BINARY_DIR}/src/bro - ARGS -b -Z base/init-bare.bro || (rm -rf .state *.log *.rst && exit 1) - # move generated doc into a new directory tree that - # defines the final structure of documents - COMMAND "${CMAKE_COMMAND}" - ARGS -E make_directory ${dstDir} - COMMAND "${CMAKE_COMMAND}" - ARGS -E copy proto-analyzers.rst ${dstDir} - # clean up the build directory - COMMAND rm - ARGS -rf .state *.log *.rst - DEPENDS bro - WORKING_DIRECTORY ${CMAKE_BINARY_DIR} - COMMENT "[Bro] Generating reST docs for proto-analyzers.rst" -) -list(APPEND ALL_REST_OUTPUTS proto-analyzers.rst) +# Macro for generating reST docs that are independent of any particular Bro +# script. +macro(INDEPENDENT_REST_TARGET reST_file) + add_custom_command(OUTPUT ${reST_file} + # delete any leftover state from previous bro runs + COMMAND "${CMAKE_COMMAND}" + ARGS -E remove_directory .state + # generate the reST documentation using bro + COMMAND BROPATH=${BROPATH}:${srcDir} BROMAGIC=${CMAKE_SOURCE_DIR}/magic/database ${CMAKE_BINARY_DIR}/src/bro + ARGS -b -Z base/init-bare.bro || (rm -rf .state *.log *.rst && exit 1) + # move generated doc into a new directory tree that + # defines the final structure of documents + COMMAND "${CMAKE_COMMAND}" + ARGS -E make_directory ${dstDir} + COMMAND "${CMAKE_COMMAND}" + ARGS -E copy ${reST_file} ${dstDir} + # clean up the build directory + COMMAND rm + ARGS -rf .state *.log *.rst + DEPENDS bro + WORKING_DIRECTORY ${CMAKE_BINARY_DIR} + COMMENT "[Bro] Generating reST docs for ${reST_file}" + ) + list(APPEND ALL_REST_OUTPUTS ${reST_file}) +endmacro(INDEPENDENT_REST_TARGET) + +independent_rest_target(proto-analyzers.rst) +independent_rest_target(file-analyzers.rst) # create temporary list of all docs to include in the master policy/index file file(WRITE ${MASTER_POLICY_INDEX} "${MASTER_POLICY_INDEX_TEXT}") diff --git a/doc/scripts/DocSourcesList.cmake b/doc/scripts/DocSourcesList.cmake index b2c932d117..d61db99db1 100644 --- a/doc/scripts/DocSourcesList.cmake +++ b/doc/scripts/DocSourcesList.cmake @@ -140,6 +140,7 @@ rest_target(${psd} base/protocols/dns/consts.bro) rest_target(${psd} base/protocols/dns/main.bro) rest_target(${psd} base/protocols/ftp/files.bro) rest_target(${psd} base/protocols/ftp/gridftp.bro) +rest_target(${psd} base/protocols/ftp/info.bro) rest_target(${psd} base/protocols/ftp/main.bro) rest_target(${psd} base/protocols/ftp/utils-commands.bro) rest_target(${psd} base/protocols/ftp/utils.bro) diff --git a/scripts/base/frameworks/files/main.bro b/scripts/base/frameworks/files/main.bro index d0c381545b..a87608054d 100644 --- a/scripts/base/frameworks/files/main.bro +++ b/scripts/base/frameworks/files/main.bro @@ -204,7 +204,7 @@ export { ## ## tag: Tag for the protocol analyzer having a callback being registered. ## - ## reg: A :bro:see:`ProtoRegistration` record. + ## reg: A :bro:see:`Files::ProtoRegistration` record. ## ## Returns: true if the protocol being registered was not previously registered. global register_protocol: function(tag: Analyzer::Tag, reg: ProtoRegistration): bool; diff --git a/scripts/base/frameworks/packet-filter/main.bro b/scripts/base/frameworks/packet-filter/main.bro index 72b2b62f34..929b10fbe1 100644 --- a/scripts/base/frameworks/packet-filter/main.bro +++ b/scripts/base/frameworks/packet-filter/main.bro @@ -109,7 +109,7 @@ export { ## Enables the old filtering approach of "only watch common ports for ## analyzed protocols". - ## + ## ## Unless you know what you are doing, leave this set to F. const enable_auto_protocol_capture_filters = F &redef; diff --git a/scripts/base/init-bare.bro b/scripts/base/init-bare.bro index 594991c85a..92b806092c 100644 --- a/scripts/base/init-bare.bro +++ b/scripts/base/init-bare.bro @@ -531,22 +531,19 @@ type record_field_table: table[string] of record_field; # dependent on the names remaining as they are now. ## Set of BPF capture filters to use for capturing, indexed by a user-definable -## ID (which must be unique). If Bro is *not* configured to examine -## :bro:id:`PacketFilter::all_packets`, all packets matching at least -## one of the filters in this table (and all in :bro:id:`restrict_filters`) -## will be analyzed. +## ID (which must be unique). If Bro is *not* configured with +## :bro:id:`PacketFilter::enable_auto_protocol_capture_filters`, +## all packets matching at least one of the filters in this table (and all in +## :bro:id:`restrict_filters`) will be analyzed. ## -## .. bro:see:: PacketFilter PacketFilter::all_packets +## .. bro:see:: PacketFilter PacketFilter::enable_auto_protocol_capture_filters ## PacketFilter::unrestricted_filter restrict_filters global capture_filters: table[string] of string &redef; ## Set of BPF filters to restrict capturing, indexed by a user-definable ID (which -## must be unique). If Bro is *not* configured to examine -## :bro:id:`PacketFilter::all_packets`, only packets matching *all* of the -## filters in this table (and any in :bro:id:`capture_filters`) will be -## analyzed. +## must be unique). ## -## .. bro:see:: PacketFilter PacketFilter::all_packets +## .. bro:see:: PacketFilter PacketFilter::enable_auto_protocol_capture_filters ## PacketFilter::unrestricted_filter capture_filters global restrict_filters: table[string] of string &redef; diff --git a/scripts/base/protocols/ftp/__load__.bro b/scripts/base/protocols/ftp/__load__.bro index ebb09e702c..3ddd8a2dc2 100644 --- a/scripts/base/protocols/ftp/__load__.bro +++ b/scripts/base/protocols/ftp/__load__.bro @@ -1,4 +1,5 @@ @load ./utils-commands +@load ./info @load ./main @load ./utils @load ./files diff --git a/scripts/base/protocols/ftp/files.bro b/scripts/base/protocols/ftp/files.bro index 9ed17ab2a4..b507ca32a7 100644 --- a/scripts/base/protocols/ftp/files.bro +++ b/scripts/base/protocols/ftp/files.bro @@ -1,3 +1,4 @@ +@load ./info @load ./main @load ./utils @load base/utils/conn-ids diff --git a/scripts/base/protocols/ftp/gridftp.bro b/scripts/base/protocols/ftp/gridftp.bro index 57752b1cbd..73bd656544 100644 --- a/scripts/base/protocols/ftp/gridftp.bro +++ b/scripts/base/protocols/ftp/gridftp.bro @@ -19,6 +19,7 @@ ##! sizes are not logged, but at the benefit of saving CPU cycles that ##! otherwise go to analyzing the large (and likely benign) connections. +@load ./info @load ./main @load base/protocols/conn @load base/protocols/ssl diff --git a/scripts/base/protocols/ftp/info.bro b/scripts/base/protocols/ftp/info.bro new file mode 100644 index 0000000000..f6fceb071e --- /dev/null +++ b/scripts/base/protocols/ftp/info.bro @@ -0,0 +1,72 @@ +##! Defines data structures for tracking and logging FTP sessions. + +module FTP; + +@load ./utils-commands + +export { + + ## This setting changes if passwords used in FTP sessions are + ## captured or not. + const default_capture_password = F &redef; + + ## The expected endpoints of an FTP data channel. + type ExpectedDataChannel: record { + ## Whether PASV mode is toggled for control channel. + passive: bool &log; + ## The host that will be initiating the data connection. + orig_h: addr &log; + ## The host that will be accepting the data connection. + resp_h: addr &log; + ## The port at which the acceptor is listening for the data connection. + resp_p: port &log; + }; + + type Info: record { + ## Time when the command was sent. + ts: time &log; + ## Unique ID for the connection. + uid: string &log; + ## The connection's 4-tuple of endpoint addresses/ports. + id: conn_id &log; + ## User name for the current FTP session. + user: string &log &default=""; + ## Password for the current FTP session if captured. + password: string &log &optional; + ## Command given by the client. + command: string &log &optional; + ## Argument for the command if one is given. + arg: string &log &optional; + + ## Libmagic "sniffed" file type if the command indicates a file transfer. + mime_type: string &log &optional; + ## Size of the file if the command indicates a file transfer. + file_size: count &log &optional; + + ## Reply code from the server in response to the command. + reply_code: count &log &optional; + ## Reply message from the server in response to the command. + reply_msg: string &log &optional; + + ## Expected FTP data channel. + data_channel: ExpectedDataChannel &log &optional; + + ## Current working directory that this session is in. By making + ## the default value '.', we can indicate that unless something + ## more concrete is discovered that the existing but unknown + ## directory is ok to use. + cwd: string &default="."; + + ## Command that is currently waiting for a response. + cmdarg: CmdArg &optional; + ## Queue for commands that have been sent but not yet responded to + ## are tracked here. + pending_commands: PendingCmds; + + ## Indicates if the session is in active or passive mode. + passive: bool &default=F; + + ## Determines if the password will be captured for this request. + capture_password: bool &default=default_capture_password; + }; +} diff --git a/scripts/base/protocols/ftp/main.bro b/scripts/base/protocols/ftp/main.bro index c9549a14ec..df66235d49 100644 --- a/scripts/base/protocols/ftp/main.bro +++ b/scripts/base/protocols/ftp/main.bro @@ -3,6 +3,8 @@ ##! will take on the full path that the client is at along with the requested ##! file name. +@load ./info +@load ./utils @load ./utils-commands @load base/utils/paths @load base/utils/numbers @@ -20,72 +22,9 @@ export { "EPSV" } &redef; - ## This setting changes if passwords used in FTP sessions are captured or not. - const default_capture_password = F &redef; - ## User IDs that can be considered "anonymous". const guest_ids = { "anonymous", "ftp", "ftpuser", "guest" } &redef; - ## The expected endpoints of an FTP data channel. - type ExpectedDataChannel: record { - ## Whether PASV mode is toggled for control channel. - passive: bool &log; - ## The host that will be initiating the data connection. - orig_h: addr &log; - ## The host that will be accepting the data connection. - resp_h: addr &log; - ## The port at which the acceptor is listening for the data connection. - resp_p: port &log; - }; - - type Info: record { - ## Time when the command was sent. - ts: time &log; - ## Unique ID for the connection. - uid: string &log; - ## The connection's 4-tuple of endpoint addresses/ports. - id: conn_id &log; - ## User name for the current FTP session. - user: string &log &default=""; - ## Password for the current FTP session if captured. - password: string &log &optional; - ## Command given by the client. - command: string &log &optional; - ## Argument for the command if one is given. - arg: string &log &optional; - - ## Libmagic "sniffed" file type if the command indicates a file transfer. - mime_type: string &log &optional; - ## Size of the file if the command indicates a file transfer. - file_size: count &log &optional; - - ## Reply code from the server in response to the command. - reply_code: count &log &optional; - ## Reply message from the server in response to the command. - reply_msg: string &log &optional; - - ## Expected FTP data channel. - data_channel: ExpectedDataChannel &log &optional; - - ## Current working directory that this session is in. By making - ## the default value '.', we can indicate that unless something - ## more concrete is discovered that the existing but unknown - ## directory is ok to use. - cwd: string &default="."; - - ## Command that is currently waiting for a response. - cmdarg: CmdArg &optional; - ## Queue for commands that have been sent but not yet responded to - ## are tracked here. - pending_commands: PendingCmds; - - ## Indicates if the session is in active or passive mode. - passive: bool &default=F; - - ## Determines if the password will be captured for this request. - capture_password: bool &default=default_capture_password; - }; - ## This record is to hold a parsed FTP reply code. For example, for the ## 201 status code, the digits would be parsed as: x->2, y->0, z=>1. type ReplyCode: record { @@ -102,8 +41,6 @@ export { global log_ftp: event(rec: Info); } -@load ./utils - # Add the state tracking information variable to the connection record redef record connection += { ftp: Info &optional; diff --git a/scripts/base/protocols/ftp/utils.bro b/scripts/base/protocols/ftp/utils.bro index 629b87e5a8..a0b473e086 100644 --- a/scripts/base/protocols/ftp/utils.bro +++ b/scripts/base/protocols/ftp/utils.bro @@ -1,7 +1,8 @@ ##! Utilities specific for FTP processing. -@load ./main +@load ./info @load base/utils/addrs +@load base/utils/paths module FTP; @@ -44,4 +45,4 @@ function build_url_ftp(rec: Info): string function describe(rec: Info): string { return build_url_ftp(rec); - } \ No newline at end of file + } diff --git a/scripts/policy/frameworks/packet-filter/shunt.bro b/scripts/policy/frameworks/packet-filter/shunt.bro index b87369ee62..85ec189a17 100644 --- a/scripts/policy/frameworks/packet-filter/shunt.bro +++ b/scripts/policy/frameworks/packet-filter/shunt.bro @@ -34,8 +34,8 @@ export { global current_shunted_host_pairs: function(): set[conn_id]; redef enum Notice::Type += { - ## Indicative that :bro:id:`max_bpf_shunts` connections are already - ## being shunted with BPF filters and no more are allowed. + ## Indicative that :bro:id:`PacketFilter::max_bpf_shunts` connections + ## are already being shunted with BPF filters and no more are allowed. No_More_Conn_Shunts_Available, ## Limitations in BPF make shunting some connections with BPF impossible. diff --git a/scripts/policy/misc/load-balancing.bro b/scripts/policy/misc/load-balancing.bro index fe07dd64da..889d18119a 100644 --- a/scripts/policy/misc/load-balancing.bro +++ b/scripts/policy/misc/load-balancing.bro @@ -12,12 +12,12 @@ export { ## Apply BPF filters to each worker in a way that causes them to ## automatically flow balance traffic between them. AUTO_BPF, - ## Load balance traffic across the workers by making each one apply - ## a restrict filter to only listen to a single MAC address. This - ## is a somewhat common deployment option for sites doing network - ## based load balancing with MAC address rewriting and passing the - ## traffic to a single interface. Multiple MAC addresses will show - ## up on the same interface and need filtered to a single address. + # Load balance traffic across the workers by making each one apply + # a restrict filter to only listen to a single MAC address. This + # is a somewhat common deployment option for sites doing network + # based load balancing with MAC address rewriting and passing the + # traffic to a single interface. Multiple MAC addresses will show + # up on the same interface and need filtered to a single address. #MAC_ADDR_BPF, }; diff --git a/scripts/policy/tuning/defaults/packet-fragments.bro b/scripts/policy/tuning/defaults/packet-fragments.bro index 24b18d5917..f95c826547 100644 --- a/scripts/policy/tuning/defaults/packet-fragments.bro +++ b/scripts/policy/tuning/defaults/packet-fragments.bro @@ -1,10 +1,10 @@ -## Capture TCP fragments, but not UDP (or ICMP), since those are a lot more -## common due to high-volume, fragmenting protocols such as NFS :-(. +# Capture TCP fragments, but not UDP (or ICMP), since those are a lot more +# common due to high-volume, fragmenting protocols such as NFS :-(. -## This normally isn't used because of the default open packet filter -## but we set it anyway in case the user is using a packet filter. -## Note: This was removed because the default model now is to have a wide -## open packet filter. +# This normally isn't used because of the default open packet filter +# but we set it anyway in case the user is using a packet filter. +# Note: This was removed because the default model now is to have a wide +# open packet filter. #redef capture_filters += { ["frag"] = "(ip[6:2] & 0x3fff != 0) and tcp" }; ## Shorten the fragment timeout from never expiring to expiring fragments after diff --git a/src/BroDoc.cc b/src/BroDoc.cc index c04cd92eca..3cb271bdbf 100644 --- a/src/BroDoc.cc +++ b/src/BroDoc.cc @@ -11,6 +11,7 @@ #include "plugin/Manager.h" #include "analyzer/Manager.h" #include "analyzer/Component.h" +#include "file_analysis/Manager.h" BroDoc::BroDoc(const std::string& rel, const std::string& abs) { @@ -479,6 +480,17 @@ static void WriteAnalyzerComponent(FILE* f, const analyzer::Component* c) fprintf(f, ":bro:enum:`Analyzer::%s`\n\n", tag.c_str()); } +static void WriteAnalyzerComponent(FILE* f, const file_analysis::Component* c) + { + EnumType* atag = file_mgr->GetTagEnumType(); + string tag = fmt("ANALYZER_%s", c->CanonicalName()); + + if ( atag->Lookup("Files", tag.c_str()) < 0 ) + reporter->InternalError("missing analyzer tag for %s", tag.c_str()); + + fprintf(f, ":bro:enum:`Files::%s`\n\n", tag.c_str()); + } + static void WritePluginComponents(FILE* f, const plugin::Plugin* p) { plugin::Plugin::component_list components = p->Components(); @@ -494,6 +506,10 @@ static void WritePluginComponents(FILE* f, const plugin::Plugin* p) WriteAnalyzerComponent(f, dynamic_cast(*it)); break; + case plugin::component::FILE_ANALYZER: + WriteAnalyzerComponent(f, + dynamic_cast(*it)); + break; case plugin::component::READER: reporter->InternalError("docs for READER component unimplemented"); case plugin::component::WRITER: @@ -537,12 +553,13 @@ static void WritePluginBifItems(FILE* f, const plugin::Plugin* p, } } -static void WriteAnalyzerTagDefn(FILE* f, EnumType* e) +static void WriteAnalyzerTagDefn(FILE* f, EnumType* e, const string& module) { + string tag_id= module + "::Tag"; e = new CommentedEnumType(e); - e->SetTypeID(copy_string("Analyzer::Tag")); + e->SetTypeID(copy_string(tag_id.c_str())); - ID* dummy_id = new ID(copy_string("Analyzer::Tag"), SCOPE_GLOBAL, true); + ID* dummy_id = new ID(copy_string(tag_id.c_str()), SCOPE_GLOBAL, true); dummy_id->SetType(e); dummy_id->MakeType(); @@ -554,13 +571,17 @@ static void WriteAnalyzerTagDefn(FILE* f, EnumType* e) bdo.WriteReST(f); } -static bool IsAnalyzerPlugin(const plugin::Plugin* p) +static bool ComponentsMatch(const plugin::Plugin* p, plugin::component::Type t, + bool match_empty = false) { plugin::Plugin::component_list components = p->Components(); plugin::Plugin::component_list::const_iterator it; + if ( components.empty() ) + return match_empty; + for ( it = components.begin(); it != components.end(); ++it ) - if ( (*it)->Type() != plugin::component::ANALYZER ) + if ( (*it)->Type() != t ) return false; return true; @@ -573,14 +594,44 @@ void CreateProtoAnalyzerDoc(const char* filename) fprintf(f, "Protocol Analyzer Reference\n"); fprintf(f, "===========================\n\n"); - WriteAnalyzerTagDefn(f, analyzer_mgr->GetTagEnumType()); + WriteAnalyzerTagDefn(f, analyzer_mgr->GetTagEnumType(), "Analyzer"); plugin::Manager::plugin_list plugins = plugin_mgr->Plugins(); plugin::Manager::plugin_list::const_iterator it; for ( it = plugins.begin(); it != plugins.end(); ++it ) { - if ( ! IsAnalyzerPlugin(*it) ) + if ( ! ComponentsMatch(*it, plugin::component::ANALYZER, true) ) + continue; + + WritePluginSectionHeading(f, *it); + WritePluginComponents(f, *it); + WritePluginBifItems(f, *it, plugin::BifItem::CONSTANT, + "Options/Constants"); + WritePluginBifItems(f, *it, plugin::BifItem::GLOBAL, "Globals"); + WritePluginBifItems(f, *it, plugin::BifItem::TYPE, "Types"); + WritePluginBifItems(f, *it, plugin::BifItem::EVENT, "Events"); + WritePluginBifItems(f, *it, plugin::BifItem::FUNCTION, "Functions"); + } + + fclose(f); + } + +void CreateFileAnalyzerDoc(const char* filename) + { + FILE* f = fopen(filename, "w"); + + fprintf(f, "File Analyzer Reference\n"); + fprintf(f, "===========================\n\n"); + + WriteAnalyzerTagDefn(f, file_mgr->GetTagEnumType(), "Files"); + + plugin::Manager::plugin_list plugins = plugin_mgr->Plugins(); + plugin::Manager::plugin_list::const_iterator it; + + for ( it = plugins.begin(); it != plugins.end(); ++it ) + { + if ( ! ComponentsMatch(*it, plugin::component::FILE_ANALYZER) ) continue; WritePluginSectionHeading(f, *it); diff --git a/src/BroDoc.h b/src/BroDoc.h index 9f92f821f8..081df698d9 100644 --- a/src/BroDoc.h +++ b/src/BroDoc.h @@ -413,4 +413,10 @@ private: */ void CreateProtoAnalyzerDoc(const char* filename); +/** + * Writes out plugin index documentation for all file analyzer plugins. + * @param filename the name of the file to write. + */ +void CreateFileAnalyzerDoc(const char* filename); + #endif diff --git a/src/file_analysis/Manager.cc b/src/file_analysis/Manager.cc index 4e25bb0b0e..fb74a409b4 100644 --- a/src/file_analysis/Manager.cc +++ b/src/file_analysis/Manager.cc @@ -394,3 +394,8 @@ const char* Manager::GetAnalyzerName(int tag) const return it->second->CanonicalName(); } + +EnumType* Manager::GetTagEnumType() + { + return tag_enum_type; + } diff --git a/src/file_analysis/Manager.h b/src/file_analysis/Manager.h index 84b606173d..a93e78c638 100644 --- a/src/file_analysis/Manager.h +++ b/src/file_analysis/Manager.h @@ -214,6 +214,12 @@ public: */ const char* GetAnalyzerName(int tag) const; + /** + * Returns the enum type that corresponds to the script-level type + * \c Files::Tag. + */ + EnumType* GetTagEnumType(); + protected: friend class FileTimer; diff --git a/src/main.cc b/src/main.cc index 56193a935b..6a58832964 100644 --- a/src/main.cc +++ b/src/main.cc @@ -872,6 +872,7 @@ int main(int argc, char** argv) if ( generate_documentation ) { CreateProtoAnalyzerDoc("proto-analyzers.rst"); + CreateFileAnalyzerDoc("file-analyzers.rst"); std::list::iterator it; diff --git a/testing/btest/Baseline/coverage.default-load-baseline/canonified_loaded_scripts.log b/testing/btest/Baseline/coverage.default-load-baseline/canonified_loaded_scripts.log index f67d4b6158..c34e2e2e87 100644 --- a/testing/btest/Baseline/coverage.default-load-baseline/canonified_loaded_scripts.log +++ b/testing/btest/Baseline/coverage.default-load-baseline/canonified_loaded_scripts.log @@ -3,7 +3,7 @@ #empty_field (empty) #unset_field - #path loaded_scripts -#open 2013-07-23-05-48-10 +#open 2013-07-29-20-08-38 #fields name #types string scripts/base/init-bare.bro @@ -156,8 +156,9 @@ scripts/base/init-default.bro scripts/base/protocols/dns/main.bro scripts/base/protocols/ftp/__load__.bro scripts/base/protocols/ftp/utils-commands.bro + scripts/base/protocols/ftp/info.bro scripts/base/protocols/ftp/main.bro - scripts/base/protocols/ftp/utils.bro + scripts/base/protocols/ftp/utils.bro scripts/base/protocols/ftp/files.bro scripts/base/protocols/ftp/gridftp.bro scripts/base/protocols/ssl/__load__.bro @@ -196,4 +197,4 @@ scripts/base/init-default.bro scripts/base/files/extract/main.bro scripts/base/misc/find-checksum-offloading.bro scripts/policy/misc/loaded-scripts.bro -#close 2013-07-23-05-48-10 +#close 2013-07-29-20-08-38