diff --git a/CHANGES b/CHANGES
index 14f0ec041d..7cbbc74e4f 100644
--- a/CHANGES
+++ b/CHANGES
@@ -1,4 +1,169 @@
+2.1-824 | 2013-07-22 14:25:14 -0400
+
+ * Fixed a scriptland state issue that manifested especially badly on proxies. (Seth Hall)
+
+ * Another test fix. (Robin Sommer)
+
+ * Canonyfying the output of core.print-bpf-filters. (Robin Sommer)
+
+2.1-820 | 2013-07-18 12:30:04 -0700
+
+ * Extending external canonifier to remove fractional values from
+ capture_loss.log. (Robin Sommer)
+
+ * Canonifying internal order for plugins and their components to
+ make it deterministic. (Robin Sommer)
+
+ * Small raw reader tweaks that got left our earlier. (Robin Sommer)
+
+2.1-814 | 2013-07-15 18:18:20 -0700
+
+ * Fixing raw reader crash when accessing nonexistant file, and
+ memory leak when reading from file. Addresses #1038. (Bernhard
+ Amann)
+
+2.1-811 | 2013-07-14 08:01:54 -0700
+
+ * Bump sqlite to 3.7.17. (Bernhard Amann)
+
+ * Small test fixes. (Seth Hall)
+
+ * Fix a bug where the same analyzer tag was reused for two different
+ analyzers. (Seth Hall)
+
+ * Moved DPD signatures into script specific directories. Left out
+ the BitTorrent signatures pending further updates to that
+ analyzer. (Seth Hall)
+
+2.1-802 | 2013-07-10 10:55:14 -0700
+
+ * Const adjustment for methods. (Jon Siwek)
+
+2.1-798 | 2013-07-08 13:05:37 -0700
+
+ * Rewrite of the packet filter framework. (Seth Hall)
+
+ This includes:
+
+ - Plugin interface for adding filtering mechanisms.
+
+ - Integrated the packet filter framework with the analyzer
+ framework to retrieve well-known ports from there.
+
+ - Support for BPF-based load balancing (IPv4 and IPv6). This will
+ tie in with upcoming BroControl support for configuring this.
+
+ - Support for BPF-based connection sampling.
+
+ - Support for "shunting" traffic with BPF filters.
+
+ - Replaced PacketFilter::all_packets with
+ PacketFilter::enable_auto_protocol_capture_filters.
+
+2.1-784 | 2013-07-04 22:28:48 -0400
+
+ * Add a call to lookup_connection in SSH scripts to update connval. (Seth Hall)
+
+ * Updating submodule(s). (Robin Sommer)
+
+2.1-782 | 2013-07-03 17:00:39 -0700
+
+ * Remove the SSL log queueing mechanism that was included with the
+ log delay mechanism. (Seth Hall)
+
+2.1-780 | 2013-07-03 16:46:26 -0700
+
+ * Rewrite of the RAW input reader for improved robustness and new
+ features. (Bernhard Amann) This includes:
+
+ - Send "end_of_data" event for all kind of streams.
+ - Send "process_finished" event with exit code of child
+ process at process termination.
+ - Expose name of input stream to readers.
+ - Better error handling.
+ - New "force_kill" option which SIGKILLs processes on reader termination.
+ - Supports reading from stdout and stderr simultaneously.
+ - Support sending data to stdin of child process.
+ - Streaming reads from external commands work without blocking.
+
+2.1-762 | 2013-07-03 16:33:22 -0700
+
+ * Fix to correct support for TLS 1.2. Addresses #1020. (Seth Hall,
+ with help from Rafal Lesniak).
+
+2.1-760 | 2013-07-03 16:31:36 -0700
+
+ * Teach broxygen to generate protocol analyzer plugin reference.
+ (Jon Siwek)
+
+ * Adding 'const' to a number of C++ methods. (Jon Siwek)
+
+2.1-757 | 2013-07-03 16:28:10 -0700
+
+ * Fix redef of table index from clearing table.
+
+ `redef foo["x"] = 1` now acts like `redef foo += { ["x"] = 1 }`
+ instead of `redef foo = { ["x"] = 1 }`.
+
+ Addresses #1013. (Jon Siwek)
+
+
+2.1-755 | 2013-07-03 16:22:43 -0700
+
+ * Add a general file analysis overview/how-to document. (Jon Siwek)
+
+ * Improve file analysis doxygen comments. (Jon Siwek)
+
+ * Improve tracking of HTTP file extraction. http.log now has files
+ taken from request and response bodies in different fields for
+ each, and can now track multiple files per body. That is, the
+ "extraction_file" field is now "extracted_request_files" and
+ "extracted_response_files". Addresses #988. (Jon Siwek)
+
+ * Fix HTTP multipart body file analysis. Each part now gets assigned
+ a different file handle/id. (Jon Siwek)
+
+ * Remove logging of analyzers field of FileAnalysis::Info. (Jon
+ Siwek)
+
+ * Remove extraction counter in default file extraction scripts. (Jon
+ Siwek)
+
+ * Remove FileAnalysis::postpone_timeout.
+ FileAnalysis::set_timeout_interval can now perform same function.
+ (Jon Siwek)
+
+ * Make default get_file_handle handlers &priority=5 so they're
+ easier to override. (Jon Siwek)
+
+ * Add input interface to forward data for file analysis. The new
+ Input::add_analysis function is used to automatically forward
+ input data on to the file analysis framework. (Jon Siwek)
+
+ * File analysis framework interface simplifications. (Jon Siwek)
+
+ - Remove script-layer data input interface (will be managed directly
+ by input framework later).
+
+ - Only track files internally by file id hash. Chance of collision
+ too small to justify also tracking unique file string.
+
+
+2.1-741 | 2013-06-07 17:28:50 -0700
+
+ * Fixing typo that could cause an assertion to falsely trigger.
+ (Robin Sommer)
+
+2.1-740 | 2013-06-07 16:37:32 -0700
+
+ * Fix for CMake 2.6.x. (Robin Sommer)
+
+2.1-738 | 2013-06-07 08:38:13 -0700
+
+ * Remove invalid free on non-allocated pointer in hash function
+ object. Addresses #1018. (Matthias Vallentin)
+
2.1-736 | 2013-06-06 10:05:20 -0700
* New "magic constants" @DIR and @FILENAME that expand to the
diff --git a/NEWS b/NEWS
index f0d302156b..1fce6b1d9d 100644
--- a/NEWS
+++ b/NEWS
@@ -73,10 +73,12 @@ New Functionality
script file name without path, respectively. (Jon Siwek)
- The new file analysis framework moves most of the processing of file
- content from script-land into the core, where it belongs. Much of
- this is an internal change, the framework comes with the following
- user-visibible functionality (some of that was already available
- before, but done differently):
+ content from script-land into the core, where it belongs. See
+ doc/file-analysis.rst for more information.
+
+ Much of this is an internal change, but the framework also comes
+ with the following user-visibible functionality (some of that was
+ already available before, but done differently):
[TODO: This will probably change with further script updates.]
@@ -102,6 +104,10 @@ New Functionality
- IRC DCC transfers: Record to disk.
+- New packet filter framework supports BPF-based load-balancing,
+ shunting, and sampling; plus plugin support to customize filters
+ dynamically.
+
Changed Functionality
~~~~~~~~~~~~~~~~~~~~~
@@ -180,6 +186,12 @@ Changed Functionality
- The SSH::Login notice has been superseded by an corresponding
intelligence framework observation (SSH::SUCCESSFUL_LOGIN).
+- PacketFilter::all_packets has been replaced with
+ PacketFilter::enable_auto_protocol_capture_filters.
+
+- We removed the BitTorrent DPD signatures pending further updates to
+ that analyzer.
+
Bro 2.1
-------
diff --git a/VERSION b/VERSION
index 8fc9f0438e..d35eaf1454 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-2.1-736
+2.1-824
diff --git a/aux/broctl b/aux/broctl
index a1aaa1608e..0cd102805e 160000
--- a/aux/broctl
+++ b/aux/broctl
@@ -1 +1 @@
-Subproject commit a1aaa1608ef08761a211b1e251449d796ba5e4a0
+Subproject commit 0cd102805e73343cab3f9fd4a76552e13940dad9
diff --git a/aux/btest b/aux/btest
index d5b8df42cb..ce366206e3 160000
--- a/aux/btest
+++ b/aux/btest
@@ -1 +1 @@
-Subproject commit d5b8df42cb9c398142e02d4bf8ede835fd0227f4
+Subproject commit ce366206e3407e534a786ad572c342e9f9fef26b
diff --git a/doc/ext/bro.py b/doc/ext/bro.py
index 9bdd86bd9a..6ef11c37f6 100644
--- a/doc/ext/bro.py
+++ b/doc/ext/bro.py
@@ -82,7 +82,8 @@ class BroGeneric(ObjectDescription):
objects = self.env.domaindata['bro']['objects']
key = (self.objtype, name)
- if key in objects:
+ if ( key in objects and self.objtype != "id" and
+ self.objtype != "type" ):
self.env.warn(self.env.docname,
'duplicate description of %s %s, ' %
(self.objtype, name) +
@@ -150,6 +151,12 @@ class BroEnum(BroGeneric):
#self.indexnode['entries'].append(('single', indextext,
# targetname, targetname))
m = sig.split()
+
+ if len(m) < 2:
+ self.env.warn(self.env.docname,
+ "bro:enum directive missing argument(s)")
+ return
+
if m[1] == "Notice::Type":
if 'notices' not in self.env.domaindata['bro']:
self.env.domaindata['bro']['notices'] = []
diff --git a/doc/file-analysis.rst b/doc/file-analysis.rst
new file mode 100644
index 0000000000..f312e06471
--- /dev/null
+++ b/doc/file-analysis.rst
@@ -0,0 +1,184 @@
+=============
+File Analysis
+=============
+
+.. rst-class:: opening
+
+ In the past, writing Bro scripts with the intent of analyzing file
+ content could be cumbersome because of the fact that the content
+ would be presented in different ways, via events, at the
+ script-layer depending on which network protocol was involved in the
+ file transfer. Scripts written to analyze files over one protocol
+ would have to be copied and modified to fit other protocols. The
+ file analysis framework (FAF) instead provides a generalized
+ presentation of file-related information. The information regarding
+ the protocol involved in transporting a file over the network is
+ still available, but it no longer has to dictate how one organizes
+ their scripting logic to handle it. A goal of the FAF is to
+ provide analysis specifically for files that is analogous to the
+ analysis Bro provides for network connections.
+
+.. contents::
+
+File Lifecycle Events
+=====================
+
+The key events that may occur during the lifetime of a file are:
+:bro:see:`file_new`, :bro:see:`file_over_new_connection`,
+:bro:see:`file_timeout`, :bro:see:`file_gap`, and
+:bro:see:`file_state_remove`. Handling any of these events provides
+some information about the file such as which network
+:bro:see:`connection` and protocol are transporting the file, how many
+bytes have been transferred so far, and its MIME type.
+
+.. code:: bro
+
+ event connection_state_remove(c: connection)
+ {
+ print "connection_state_remove";
+ print c$uid;
+ print c$id;
+ for ( s in c$service )
+ print s;
+ }
+
+ event file_state_remove(f: fa_file)
+ {
+ print "file_state_remove";
+ print f$id;
+ for ( cid in f$conns )
+ {
+ print f$conns[cid]$uid;
+ print cid;
+ }
+ print f$source;
+ }
+
+might give output like::
+
+ file_state_remove
+ Cx92a0ym5R8
+ REs2LQfVW2j
+ [orig_h=10.0.0.7, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp]
+ HTTP
+ connection_state_remove
+ REs2LQfVW2j
+ [orig_h=10.0.0.7, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp]
+ HTTP
+
+This doesn't perform any interesting analysis yet, but does highlight
+the similarity between analysis of connections and files. Connections
+are identified by the usual 5-tuple or a convenient UID string while
+files are identified just by a string of the same format as the
+connection UID. So there's unique ways to identify both files and
+connections and files hold references to a connection (or connections)
+that transported it.
+
+Adding Analysis
+===============
+
+There are builtin file analyzers which can be attached to files. Once
+attached, they start receiving the contents of the file as Bro extracts
+it from an ongoing network connection. What they do with the file
+contents is up to the particular file analyzer implementation, but
+they'll typically either report further information about the file via
+events (e.g. :bro:see:`FileAnalysis::ANALYZER_MD5` will report the
+file's MD5 checksum via :bro:see:`file_hash` once calculated) or they'll
+have some side effect (e.g. :bro:see:`FileAnalysis::ANALYZER_EXTRACT`
+will write the contents of the file out to the local file system).
+
+In the future there may be file analyzers that automatically attach to
+files based on heuristics, similar to the Dynamic Protocol Detection
+(DPD) framework for connections, but many will always require an
+explicit attachment decision:
+
+.. code:: bro
+
+ event file_new(f: fa_file)
+ {
+ print "new file", f$id;
+ if ( f?$mime_type && f$mime_type == "text/plain" )
+ FileAnalysis::add_analyzer(f, [$tag=FileAnalysis::ANALYZER_MD5]);
+ }
+
+ event file_hash(f: fa_file, kind: string, hash: string)
+ {
+ print "file_hash", f$id, kind, hash;
+ }
+
+this script calculates MD5s for all plain text files and might give
+output::
+
+ new file, Cx92a0ym5R8
+ file_hash, Cx92a0ym5R8, md5, 397168fd09991a0e712254df7bc639ac
+
+Some file analyzers might have tunable parameters that need to be
+specified in the call to :bro:see:`FileAnalysis::add_analyzer`:
+
+.. code:: bro
+
+ event file_new(f: fa_file)
+ {
+ FileAnalysis::add_analyzer(f, [$tag=FileAnalysis::ANALYZER_EXTRACT,
+ $extract_filename="./myfile"]);
+ }
+
+In this case, the file extraction analyzer doesn't generate any further
+events, but does have the side effect of writing out the file contents
+to the local file system at the specified location of ``./myfile``. Of
+course, for a network with more than a single file being transferred,
+it's probably preferable to specify a different extraction path for each
+file, unlike this example.
+
+Regardless of which file analyzers end up acting on a file, general
+information about the file (e.g. size, time of last data transferred,
+MIME type, etc.) are logged in ``file_analysis.log``.
+
+Input Framework Integration
+===========================
+
+The FAF comes with a simple way to integrate with the :doc:`Input
+Framework `, so that Bro can analyze files from external sources
+in the same way it analyzes files that it sees coming over traffic from
+a network interface it's monitoring. It only requires a call to
+:bro:see:`Input::add_analysis`:
+
+.. code:: bro
+
+ redef exit_only_after_terminate = T;
+
+ event file_new(f: fa_file)
+ {
+ print "new file", f$id;
+ FileAnalysis::add_analyzer(f, [$tag=FileAnalysis::ANALYZER_MD5]);
+ }
+
+ event file_state_remove(f: fa_file)
+ {
+ Input::remove(f$source);
+ terminate();
+ }
+
+ event file_hash(f: fa_file, kind: string, hash: string)
+ {
+ print "file_hash", f$id, kind, hash;
+ }
+
+ event bro_init()
+ {
+ local source: string = "./myfile";
+ Input::add_analysis([$source=source, $name=source]);
+ }
+
+Note that the "source" field of :bro:see:`fa_file` corresponds to the
+"name" field of :bro:see:`Input::AnalysisDescription` since that is what
+the input framework uses to uniquely identify an input stream.
+
+The output of the above script may be::
+
+ new file, G1fS2xthS4l
+ file_hash, G1fS2xthS4l, md5, 54098b367d2e87b078671fad4afb9dbb
+
+Nothing that special, but it at least verifies the MD5 file analyzer
+saw all the bytes of the input file and calculated the checksum
+correctly!
diff --git a/doc/index.rst b/doc/index.rst
index 29b29541b4..ad05f7bf82 100644
--- a/doc/index.rst
+++ b/doc/index.rst
@@ -25,6 +25,7 @@ Frameworks
notice
logging
input
+ file-analysis
cluster
signatures
@@ -45,7 +46,7 @@ Script Reference
scripts/packages
scripts/index
scripts/builtins
- scripts/bifs
+ scripts/proto-analyzers
Other Bro Components
--------------------
diff --git a/doc/scripts/CMakeLists.txt b/doc/scripts/CMakeLists.txt
index 64c3de92eb..ddb09bb29c 100644
--- a/doc/scripts/CMakeLists.txt
+++ b/doc/scripts/CMakeLists.txt
@@ -15,11 +15,11 @@ endif ()
#
# srcDir: the directory which contains broInput
# broInput: the file name of a bro policy script, any path prefix of this
-# argument will be used to derive what path under policy/ the generated
+# argument will be used to derive what path under scripts/ the generated
# documentation will be placed.
# group: optional name of group that the script documentation will belong to.
-# If this is not given, .bif files automatically get their own group or
-# the group is automatically by any path portion of the broInput argument.
+# If this is not given, the group is automatically set to any path portion
+# of the broInput argument.
#
# In addition to adding the makefile target, several CMake variables are set:
#
@@ -64,8 +64,6 @@ macro(REST_TARGET srcDir broInput)
if (NOT "${ARGN}" STREQUAL "")
set(group ${ARGN})
- elseif (${broInput} MATCHES "\\.bif\\.bro$")
- set(group bifs)
elseif (relDstDir)
set(group ${relDstDir}/index)
# add package index to master package list if not already in it
@@ -126,6 +124,29 @@ endmacro(REST_TARGET)
# Schedule Bro scripts for which to generate documentation.
include(DocSourcesList.cmake)
+# This reST target is independent of a particular Bro script...
+add_custom_command(OUTPUT proto-analyzers.rst
+ # delete any leftover state from previous bro runs
+ COMMAND "${CMAKE_COMMAND}"
+ ARGS -E remove_directory .state
+ # generate the reST documentation using bro
+ COMMAND BROPATH=${BROPATH}:${srcDir} BROMAGIC=${CMAKE_SOURCE_DIR}/magic ${CMAKE_BINARY_DIR}/src/bro
+ ARGS -b -Z base/init-bare.bro || (rm -rf .state *.log *.rst && exit 1)
+ # move generated doc into a new directory tree that
+ # defines the final structure of documents
+ COMMAND "${CMAKE_COMMAND}"
+ ARGS -E make_directory ${dstDir}
+ COMMAND "${CMAKE_COMMAND}"
+ ARGS -E copy proto-analyzers.rst ${dstDir}
+ # clean up the build directory
+ COMMAND rm
+ ARGS -rf .state *.log *.rst
+ DEPENDS bro
+ WORKING_DIRECTORY ${CMAKE_BINARY_DIR}
+ COMMENT "[Bro] Generating reST docs for proto-analyzers.rst"
+)
+list(APPEND ALL_REST_OUTPUTS proto-analyzers.rst)
+
# create temporary list of all docs to include in the master policy/index file
file(WRITE ${MASTER_POLICY_INDEX} "${MASTER_POLICY_INDEX_TEXT}")
diff --git a/doc/scripts/DocSourcesList.cmake b/doc/scripts/DocSourcesList.cmake
index 0b077c2c50..529b03ca83 100644
--- a/doc/scripts/DocSourcesList.cmake
+++ b/doc/scripts/DocSourcesList.cmake
@@ -34,6 +34,7 @@ rest_target(${CMAKE_BINARY_DIR}/scripts base/bif/plugins/Bro_DNS.events.bif.bro)
rest_target(${CMAKE_BINARY_DIR}/scripts base/bif/plugins/Bro_FTP.events.bif.bro)
rest_target(${CMAKE_BINARY_DIR}/scripts base/bif/plugins/Bro_FTP.functions.bif.bro)
rest_target(${CMAKE_BINARY_DIR}/scripts base/bif/plugins/Bro_File.events.bif.bro)
+rest_target(${CMAKE_BINARY_DIR}/scripts base/bif/plugins/Bro_FileHash.events.bif.bro)
rest_target(${CMAKE_BINARY_DIR}/scripts base/bif/plugins/Bro_Finger.events.bif.bro)
rest_target(${CMAKE_BINARY_DIR}/scripts base/bif/plugins/Bro_GTPv1.events.bif.bro)
rest_target(${CMAKE_BINARY_DIR}/scripts base/bif/plugins/Bro_Gnutella.events.bif.bro)
@@ -111,6 +112,7 @@ rest_target(${psd} base/frameworks/notice/non-cluster.bro)
rest_target(${psd} base/frameworks/notice/weird.bro)
rest_target(${psd} base/frameworks/packet-filter/main.bro)
rest_target(${psd} base/frameworks/packet-filter/netstats.bro)
+rest_target(${psd} base/frameworks/packet-filter/utils.bro)
rest_target(${psd} base/frameworks/reporter/main.bro)
rest_target(${psd} base/frameworks/signatures/main.bro)
rest_target(${psd} base/frameworks/software/main.bro)
@@ -189,6 +191,7 @@ rest_target(${psd} policy/frameworks/intel/smtp-url-extraction.bro)
rest_target(${psd} policy/frameworks/intel/smtp.bro)
rest_target(${psd} policy/frameworks/intel/ssl.bro)
rest_target(${psd} policy/frameworks/intel/where-locations.bro)
+rest_target(${psd} policy/frameworks/packet-filter/shunt.bro)
rest_target(${psd} policy/frameworks/software/version-changes.bro)
rest_target(${psd} policy/frameworks/software/vulnerable.bro)
rest_target(${psd} policy/integration/barnyard2/main.bro)
@@ -197,6 +200,7 @@ rest_target(${psd} policy/integration/collective-intel/main.bro)
rest_target(${psd} policy/misc/app-metrics.bro)
rest_target(${psd} policy/misc/capture-loss.bro)
rest_target(${psd} policy/misc/detect-traceroute/main.bro)
+rest_target(${psd} policy/misc/load-balancing.bro)
rest_target(${psd} policy/misc/loaded-scripts.bro)
rest_target(${psd} policy/misc/profiling.bro)
rest_target(${psd} policy/misc/scan.bro)
diff --git a/doc/scripts/bifs.rst b/doc/scripts/bifs.rst
deleted file mode 100644
index eaae0e13b8..0000000000
--- a/doc/scripts/bifs.rst
+++ /dev/null
@@ -1,5 +0,0 @@
-.. This is a stub doc to which broxygen appends during the build process
-
-Built-In Functions (BIFs)
-=========================
-
diff --git a/scripts/base/frameworks/analyzer/main.bro b/scripts/base/frameworks/analyzer/main.bro
index e2dcf151c7..c4ee5c943b 100644
--- a/scripts/base/frameworks/analyzer/main.bro
+++ b/scripts/base/frameworks/analyzer/main.bro
@@ -9,9 +9,9 @@
##! :bro:enum:`Analyzer::ANALYZER_HTTP`. These tags are defined internally by
##! the analyzers themselves, and documented in their analyzer-specific
##! description along with the events that they generate.
-##!
-##! .. todo: ``The ANALYZER_*`` are in fact not yet documented, we need to
-##! add that to Broxygen.
+
+@load base/frameworks/packet-filter/utils
+
module Analyzer;
export {
@@ -98,7 +98,21 @@ export {
##
## Returns: True if succesful.
global schedule_analyzer: function(orig: addr, resp: addr, resp_p: port,
- analyzer: Analyzer::Tag, tout: interval) : bool;
+ analyzer: Analyzer::Tag, tout: interval) : bool;
+
+ ## Automatically creates a BPF filter for the specified protocol based
+ ## on the data supplied for the protocol through the
+ ## :bro:see:`Analyzer::register_for_ports` function.
+ ##
+ ## tag: The analyzer tag.
+ ##
+ ## Returns: BPF filter string.
+ global analyzer_to_bpf: function(tag: Analyzer::Tag): string;
+
+ ## Create a BPF filter which matches all of the ports defined
+ ## by the various protocol analysis scripts as "registered ports"
+ ## for the protocol.
+ global get_bpf: function(): string;
## A set of analyzers to disable by default at startup. The default set
## contains legacy analyzers that are no longer supported.
@@ -179,3 +193,25 @@ function schedule_analyzer(orig: addr, resp: addr, resp_p: port,
return __schedule_analyzer(orig, resp, resp_p, analyzer, tout);
}
+function analyzer_to_bpf(tag: Analyzer::Tag): string
+ {
+ # Return an empty string if an undefined analyzer was given.
+ if ( tag !in ports )
+ return "";
+
+ local output = "";
+ for ( p in ports[tag] )
+ output = PacketFilter::combine_filters(output, "or", PacketFilter::port_to_bpf(p));
+ return output;
+ }
+
+function get_bpf(): string
+ {
+ local output = "";
+ for ( tag in ports )
+ {
+ output = PacketFilter::combine_filters(output, "or", analyzer_to_bpf(tag));
+ }
+ return output;
+ }
+
diff --git a/scripts/base/frameworks/communication/main.bro b/scripts/base/frameworks/communication/main.bro
index 7ded67688a..caf5119d9d 100644
--- a/scripts/base/frameworks/communication/main.bro
+++ b/scripts/base/frameworks/communication/main.bro
@@ -216,12 +216,9 @@ function setup_peer(p: event_peer, node: Node)
request_remote_events(p, node$events);
}
- if ( node?$capture_filter )
+ if ( node?$capture_filter && node$capture_filter != "" )
{
local filter = node$capture_filter;
- if ( filter == "" )
- filter = PacketFilter::default_filter;
-
do_script_log(p, fmt("sending capture_filter: %s", filter));
send_capture_filter(p, filter);
}
diff --git a/scripts/base/frameworks/dpd/dpd.sig b/scripts/base/frameworks/dpd/dpd.sig
deleted file mode 100644
index 49e24cefc6..0000000000
--- a/scripts/base/frameworks/dpd/dpd.sig
+++ /dev/null
@@ -1,212 +0,0 @@
-# Signatures to initiate dynamic protocol detection.
-
-signature dpd_ftp_client {
- ip-proto == tcp
- payload /(|.*[\n\r]) *[uU][sS][eE][rR] /
- tcp-state originator
-}
-
-# Match for server greeting (220, 120) and for login or passwd
-# required (230, 331).
-signature dpd_ftp_server {
- ip-proto == tcp
- payload /[\n\r ]*(120|220)[^0-9].*[\n\r] *(230|331)[^0-9]/
- tcp-state responder
- requires-reverse-signature dpd_ftp_client
- enable "ftp"
-}
-
-signature dpd_http_client {
- ip-proto == tcp
- payload /^[[:space:]]*(GET|HEAD|POST)[[:space:]]*/
- tcp-state originator
-}
-
-signature dpd_http_server {
- ip-proto == tcp
- payload /^HTTP\/[0-9]/
- tcp-state responder
- requires-reverse-signature dpd_http_client
- enable "http"
-}
-
-signature dpd_bittorrenttracker_client {
- ip-proto == tcp
- payload /^.*\/announce\?.*info_hash/
- tcp-state originator
-}
-
-signature dpd_bittorrenttracker_server {
- ip-proto == tcp
- payload /^HTTP\/[0-9]/
- tcp-state responder
- requires-reverse-signature dpd_bittorrenttracker_client
- enable "bittorrenttracker"
-}
-
-signature dpd_bittorrent_peer1 {
- ip-proto == tcp
- payload /^\x13BitTorrent protocol/
- tcp-state originator
-}
-
-signature dpd_bittorrent_peer2 {
- ip-proto == tcp
- payload /^\x13BitTorrent protocol/
- tcp-state responder
- requires-reverse-signature dpd_bittorrent_peer1
- enable "bittorrent"
-}
-
-signature irc_client1 {
- ip-proto == tcp
- payload /(|.*[\r\n]) *[Uu][Ss][Ee][Rr] +.+[\n\r]+ *[Nn][Ii][Cc][Kk] +.*[\r\n]/
- requires-reverse-signature irc_server_reply
- tcp-state originator
- enable "irc"
-}
-
-signature irc_client2 {
- ip-proto == tcp
- payload /(|.*[\r\n]) *[Nn][Ii][Cc][Kk] +.+[\r\n]+ *[Uu][Ss][Ee][Rr] +.+[\r\n]/
- requires-reverse-signature irc_server_reply
- tcp-state originator
- enable "irc"
-}
-
-signature irc_server_reply {
- ip-proto == tcp
- payload /^(|.*[\n\r])(:[^ \n\r]+ )?[0-9][0-9][0-9] /
- tcp-state responder
-}
-
-signature irc_server_to_server1 {
- ip-proto == tcp
- payload /(|.*[\r\n]) *[Ss][Ee][Rr][Vv][Ee][Rr] +[^ ]+ +[0-9]+ +:.+[\r\n]/
-}
-
-signature irc_server_to_server2 {
- ip-proto == tcp
- payload /(|.*[\r\n]) *[Ss][Ee][Rr][Vv][Ee][Rr] +[^ ]+ +[0-9]+ +:.+[\r\n]/
- requires-reverse-signature irc_server_to_server1
- enable "irc"
-}
-
-signature dpd_smtp_client {
- ip-proto == tcp
- payload /(|.*[\n\r])[[:space:]]*([hH][eE][lL][oO]|[eE][hH][lL][oO])/
- requires-reverse-signature dpd_smtp_server
- enable "smtp"
- tcp-state originator
-}
-
-signature dpd_smtp_server {
- ip-proto == tcp
- payload /^[[:space:]]*220[[:space:]-]/
- tcp-state responder
-}
-
-signature dpd_ssh_client {
- ip-proto == tcp
- payload /^[sS][sS][hH]-/
- requires-reverse-signature dpd_ssh_server
- enable "ssh"
- tcp-state originator
-}
-
-signature dpd_ssh_server {
- ip-proto == tcp
- payload /^[sS][sS][hH]-/
- tcp-state responder
-}
-
-signature dpd_pop3_server {
- ip-proto == tcp
- payload /^\+OK/
- requires-reverse-signature dpd_pop3_client
- enable "pop3"
- tcp-state responder
-}
-
-signature dpd_pop3_client {
- ip-proto == tcp
- payload /(|.*[\r\n])[[:space:]]*([uU][sS][eE][rR][[:space:]]|[aA][pP][oO][pP][[:space:]]|[cC][aA][pP][aA]|[aA][uU][tT][hH])/
- tcp-state originator
-}
-
-signature dpd_ssl_server {
- ip-proto == tcp
- # Server hello.
- payload /^(\x16\x03[\x00\x01\x02]..\x02...\x03[\x00\x01\x02]|...?\x04..\x00\x02).*/
- requires-reverse-signature dpd_ssl_client
- enable "ssl"
- tcp-state responder
-}
-
-signature dpd_ssl_client {
- ip-proto == tcp
- # Client hello.
- payload /^(\x16\x03[\x00\x01\x02]..\x01...\x03[\x00\x01\x02]|...?\x01[\x00\x01\x02][\x02\x03]).*/
- tcp-state originator
-}
-
-signature dpd_ayiya {
- ip-proto = udp
- payload /^..\x11\x29/
- enable "ayiya"
-}
-
-signature dpd_teredo {
- ip-proto = udp
- payload /^(\x00\x00)|(\x00\x01)|([\x60-\x6f])/
- enable "teredo"
-}
-
-signature dpd_socks4_client {
- ip-proto == tcp
- # '32' is a rather arbitrary max length for the user name.
- payload /^\x04[\x01\x02].{0,32}\x00/
- tcp-state originator
-}
-
-signature dpd_socks4_server {
- ip-proto == tcp
- requires-reverse-signature dpd_socks4_client
- payload /^\x00[\x5a\x5b\x5c\x5d]/
- tcp-state responder
- enable "socks"
-}
-
-signature dpd_socks4_reverse_client {
- ip-proto == tcp
- # '32' is a rather arbitrary max length for the user name.
- payload /^\x04[\x01\x02].{0,32}\x00/
- tcp-state responder
-}
-
-signature dpd_socks4_reverse_server {
- ip-proto == tcp
- requires-reverse-signature dpd_socks4_reverse_client
- payload /^\x00[\x5a\x5b\x5c\x5d]/
- tcp-state originator
- enable "socks"
-}
-
-signature dpd_socks5_client {
- ip-proto == tcp
- # Watch for a few authentication methods to reduce false positives.
- payload /^\x05.[\x00\x01\x02]/
- tcp-state originator
-}
-
-signature dpd_socks5_server {
- ip-proto == tcp
- requires-reverse-signature dpd_socks5_client
- # Watch for a single authentication method to be chosen by the server or
- # the server to indicate the no authentication is required.
- payload /^\x05(\x00|\x01[\x00\x01\x02])/
- tcp-state responder
- enable "socks"
-}
-
-
diff --git a/scripts/base/frameworks/dpd/main.bro b/scripts/base/frameworks/dpd/main.bro
index c3282a1da4..9df8a45e5e 100644
--- a/scripts/base/frameworks/dpd/main.bro
+++ b/scripts/base/frameworks/dpd/main.bro
@@ -3,8 +3,6 @@
module DPD;
-@load-sigs ./dpd.sig
-
export {
## Add the DPD logging stream identifier.
redef enum Log::ID += { LOG };
diff --git a/scripts/base/frameworks/file-analysis/main.bro b/scripts/base/frameworks/file-analysis/main.bro
index 0ed66464fe..3352787cba 100644
--- a/scripts/base/frameworks/file-analysis/main.bro
+++ b/scripts/base/frameworks/file-analysis/main.bro
@@ -15,18 +15,20 @@ export {
## A structure which represents a desired type of file analysis.
type AnalyzerArgs: record {
## The type of analysis.
- tag: Analyzer;
+ tag: FileAnalysis::Tag;
## The local filename to which to write an extracted file. Must be
## set when *tag* is :bro:see:`FileAnalysis::ANALYZER_EXTRACT`.
extract_filename: string &optional;
## An event which will be generated for all new file contents,
- ## chunk-wise.
+ ## chunk-wise. Used when *tag* is
+ ## :bro:see:`FileAnalysis::ANALYZER_DATA_EVENT`.
chunk_event: event(f: fa_file, data: string, off: count) &optional;
## An event which will be generated for all new file contents,
- ## stream-wise.
+ ## stream-wise. Used when *tag* is
+ ## :bro:see:`FileAnalysis::ANALYZER_DATA_EVENT`.
stream_event: event(f: fa_file, data: string) &optional;
} &redef;
@@ -87,7 +89,7 @@ export {
conn_uids: set[string] &log;
## A set of analysis types done during the file analysis.
- analyzers: set[Analyzer] &log;
+ analyzers: set[FileAnalysis::Tag];
## Local filenames of extracted files.
extracted_files: set[string] &log;
@@ -120,7 +122,9 @@ export {
## Sets the *timeout_interval* field of :bro:see:`fa_file`, which is
## used to determine the length of inactivity that is allowed for a file
- ## before internal state related to it is cleaned up.
+ ## before internal state related to it is cleaned up. When used within a
+ ## :bro:see:`file_timeout` handler, the analysis will delay timing out
+ ## again for the period specified by *t*.
##
## f: the file.
##
@@ -130,18 +134,6 @@ export {
## for the *id* isn't currently active.
global set_timeout_interval: function(f: fa_file, t: interval): bool;
- ## Postpones the timeout of file analysis for a given file.
- ## When used within a :bro:see:`file_timeout` handler for, the analysis
- ## the analysis will delay timing out for the period of time indicated by
- ## the *timeout_interval* field of :bro:see:`fa_file`, which can be set
- ## with :bro:see:`FileAnalysis::set_timeout_interval`.
- ##
- ## f: the file.
- ##
- ## Returns: true if the timeout will be postponed, or false if analysis
- ## for the *id* isn't currently active.
- global postpone_timeout: function(f: fa_file): bool;
-
## Adds an analyzer to the analysis of a given file.
##
## f: the file.
@@ -171,58 +163,6 @@ export {
## rest of it's contents, or false if analysis for the *id*
## isn't currently active.
global stop: function(f: fa_file): bool;
-
- ## Sends a sequential stream of data in for file analysis.
- ## Meant for use when providing external file analysis input (e.g.
- ## from the input framework).
- ##
- ## source: a string that uniquely identifies the logical file that the
- ## data is a part of and describes its source.
- ##
- ## data: bytestring contents of the file to analyze.
- global data_stream: function(source: string, data: string);
-
- ## Sends a non-sequential chunk of data in for file analysis.
- ## Meant for use when providing external file analysis input (e.g.
- ## from the input framework).
- ##
- ## source: a string that uniquely identifies the logical file that the
- ## data is a part of and describes its source.
- ##
- ## data: bytestring contents of the file to analyze.
- ##
- ## offset: the offset within the file that this chunk starts.
- global data_chunk: function(source: string, data: string, offset: count);
-
- ## Signals a content gap in the file bytestream.
- ## Meant for use when providing external file analysis input (e.g.
- ## from the input framework).
- ##
- ## source: a string that uniquely identifies the logical file that the
- ## data is a part of and describes its source.
- ##
- ## offset: the offset within the file that this gap starts.
- ##
- ## len: the number of bytes that are missing.
- global gap: function(source: string, offset: count, len: count);
-
- ## Signals the total size of a file.
- ## Meant for use when providing external file analysis input (e.g.
- ## from the input framework).
- ##
- ## source: a string that uniquely identifies the logical file that the
- ## data is a part of and describes its source.
- ##
- ## size: the number of bytes that comprise the full file.
- global set_size: function(source: string, size: count);
-
- ## Signals the end of a file.
- ## Meant for use when providing external file analysis input (e.g.
- ## from the input framework).
- ##
- ## source: a string that uniquely identifies the logical file that the
- ## data is a part of and describes its source.
- global eof: function(source: string);
}
redef record fa_file += {
@@ -259,11 +199,6 @@ function set_timeout_interval(f: fa_file, t: interval): bool
return __set_timeout_interval(f$id, t);
}
-function postpone_timeout(f: fa_file): bool
- {
- return __postpone_timeout(f$id);
- }
-
function add_analyzer(f: fa_file, args: AnalyzerArgs): bool
{
if ( ! __add_analyzer(f$id, args) ) return F;
@@ -287,31 +222,6 @@ function stop(f: fa_file): bool
return __stop(f$id);
}
-function data_stream(source: string, data: string)
- {
- __data_stream(source, data);
- }
-
-function data_chunk(source: string, data: string, offset: count)
- {
- __data_chunk(source, data, offset);
- }
-
-function gap(source: string, offset: count, len: count)
- {
- __gap(source, offset, len);
- }
-
-function set_size(source: string, size: count)
- {
- __set_size(source, size);
- }
-
-function eof(source: string)
- {
- __eof(source);
- }
-
event bro_init() &priority=5
{
Log::create_stream(FileAnalysis::LOG,
diff --git a/scripts/base/frameworks/input/main.bro b/scripts/base/frameworks/input/main.bro
index 4de98ea0f2..e5d74cbc36 100644
--- a/scripts/base/frameworks/input/main.bro
+++ b/scripts/base/frameworks/input/main.bro
@@ -122,6 +122,34 @@ export {
config: table[string] of string &default=table();
};
+ ## A file analyis input stream type used to forward input data to the
+ ## file analysis framework.
+ type AnalysisDescription: record {
+ ## String that allows the reader to find the source.
+ ## For `READER_ASCII`, this is the filename.
+ source: string;
+
+ ## Reader to use for this steam. Compatible readers must be
+ ## able to accept a filter of a single string type (i.e.
+ ## they read a byte stream).
+ reader: Reader &default=Input::READER_BINARY;
+
+ ## Read mode to use for this stream
+ mode: Mode &default=default_mode;
+
+ ## Descriptive name that uniquely identifies the input source.
+ ## Can be used used to remove a stream at a later time.
+ ## This will also be used for the unique *source* field of
+ ## :bro:see:`fa_file`. Most of the time, the best choice for this
+ ## field will be the same value as the *source* field.
+ name: string;
+
+ ## A key/value table that will be passed on the reader.
+ ## Interpretation of the values is left to the writer, but
+ ## usually they will be used for configuration purposes.
+ config: table[string] of string &default=table();
+ };
+
## Create a new table input from a given source. Returns true on success.
##
## description: `TableDescription` record describing the source.
@@ -132,6 +160,14 @@ export {
## description: `TableDescription` record describing the source.
global add_event: function(description: Input::EventDescription) : bool;
+ ## Create a new file analysis input from a given source. Data read from
+ ## the source is automatically forwarded to the file analysis framework.
+ ##
+ ## description: A record describing the source
+ ##
+ ## Returns: true on sucess.
+ global add_analysis: function(description: Input::AnalysisDescription) : bool;
+
## Remove a input stream. Returns true on success and false if the named stream was
## not found.
##
@@ -164,6 +200,11 @@ function add_event(description: Input::EventDescription) : bool
return __create_event_stream(description);
}
+function add_analysis(description: Input::AnalysisDescription) : bool
+ {
+ return __create_analysis_stream(description);
+ }
+
function remove(id: string) : bool
{
return __remove_stream(id);
diff --git a/scripts/base/frameworks/input/readers/raw.bro b/scripts/base/frameworks/input/readers/raw.bro
index 45deed3eda..9fa7d1a2fb 100644
--- a/scripts/base/frameworks/input/readers/raw.bro
+++ b/scripts/base/frameworks/input/readers/raw.bro
@@ -6,4 +6,12 @@ export {
## Separator between input records.
## Please note that the separator has to be exactly one character long
const record_separator = "\n" &redef;
+
+ ## Event that is called when a process created by the raw reader exits.
+ ##
+ ## name: name of the input stream
+ ## source: source of the input stream
+ ## exit_code: exit code of the program, or number of the signal that forced the program to exit
+ ## signal_exit: false when program exitted normally, true when program was forced to exit by a signal
+ global process_finished: event(name: string, source:string, exit_code:count, signal_exit:bool);
}
diff --git a/scripts/base/frameworks/packet-filter/__load__.bro b/scripts/base/frameworks/packet-filter/__load__.bro
index 1d72e1ebe0..011885e8b7 100644
--- a/scripts/base/frameworks/packet-filter/__load__.bro
+++ b/scripts/base/frameworks/packet-filter/__load__.bro
@@ -1,2 +1,3 @@
+@load ./utils
@load ./main
@load ./netstats
diff --git a/scripts/base/frameworks/packet-filter/main.bro b/scripts/base/frameworks/packet-filter/main.bro
index afcb5bd700..72b2b62f34 100644
--- a/scripts/base/frameworks/packet-filter/main.bro
+++ b/scripts/base/frameworks/packet-filter/main.bro
@@ -1,10 +1,12 @@
##! This script supports how Bro sets it's BPF capture filter. By default
-##! Bro sets an unrestricted filter that allows all traffic. If a filter
+##! Bro sets a capture filter that allows all traffic. If a filter
##! is set on the command line, that filter takes precedence over the default
##! open filter and all filters defined in Bro scripts with the
##! :bro:id:`capture_filters` and :bro:id:`restrict_filters` variables.
@load base/frameworks/notice
+@load base/frameworks/analyzer
+@load ./utils
module PacketFilter;
@@ -14,11 +16,14 @@ export {
## Add notice types related to packet filter errors.
redef enum Notice::Type += {
- ## This notice is generated if a packet filter is unable to be compiled.
+ ## This notice is generated if a packet filter cannot be compiled.
Compile_Failure,
- ## This notice is generated if a packet filter is fails to install.
+ ## Generated if a packet filter is fails to install.
Install_Failure,
+
+ ## Generated when a notice takes too long to compile.
+ Too_Long_To_Compile_Filter
};
## The record type defining columns to be logged in the packet filter
@@ -42,83 +47,248 @@ export {
success: bool &log &default=T;
};
- ## By default, Bro will examine all packets. If this is set to false,
- ## it will dynamically build a BPF filter that only select protocols
- ## for which the user has loaded a corresponding analysis script.
- ## The latter used to be default for Bro versions < 2.0. That has now
- ## changed however to enable port-independent protocol analysis.
- const all_packets = T &redef;
+ ## The BPF filter that is used by default to define what traffic should
+ ## be captured. Filters defined in :bro:id:`restrict_filters` will still
+ ## be applied to reduce the captured traffic.
+ const default_capture_filter = "ip or not ip" &redef;
## Filter string which is unconditionally or'ed to the beginning of every
## dynamically built filter.
const unrestricted_filter = "" &redef;
+ ## Filter string which is unconditionally and'ed to the beginning of every
+ ## dynamically built filter. This is mostly used when a custom filter is being
+ ## used but MPLS or VLAN tags are on the traffic.
+ const restricted_filter = "" &redef;
+
+ ## The maximum amount of time that you'd like to allow for BPF filters to compile.
+ ## If this time is exceeded, compensation measures may be taken by the framework
+ ## to reduce the filter size. This threshold being crossed also results in
+ ## the :bro:see:`PacketFilter::Too_Long_To_Compile_Filter` notice.
+ const max_filter_compile_time = 100msec &redef;
+
+ ## Install a BPF filter to exclude some traffic. The filter should positively
+ ## match what is to be excluded, it will be wrapped in a "not".
+ ##
+ ## filter_id: An arbitrary string that can be used to identify
+ ## the filter.
+ ##
+ ## filter: A BPF expression of traffic that should be excluded.
+ ##
+ ## Returns: A boolean value to indicate if the filter was successfully
+ ## installed or not.
+ global exclude: function(filter_id: string, filter: string): bool;
+
+ ## Install a temporary filter to traffic which should not be passed through
+ ## the BPF filter. The filter should match the traffic you don't want
+ ## to see (it will be wrapped in a "not" condition).
+ ##
+ ## filter_id: An arbitrary string that can be used to identify
+ ## the filter.
+ ##
+ ## filter: A BPF expression of traffic that should be excluded.
+ ##
+ ## length: The duration for which this filter should be put in place.
+ ##
+ ## Returns: A boolean value to indicate if the filter was successfully
+ ## installed or not.
+ global exclude_for: function(filter_id: string, filter: string, span: interval): bool;
+
## Call this function to build and install a new dynamically built
## packet filter.
- global install: function();
+ global install: function(): bool;
+
+ ## A data structure to represent filter generating plugins.
+ type FilterPlugin: record {
+ ## A function that is directly called when generating the complete filter.
+ func : function();
+ };
+
+ ## API function to register a new plugin for dynamic restriction filters.
+ global register_filter_plugin: function(fp: FilterPlugin);
+
+ ## Enables the old filtering approach of "only watch common ports for
+ ## analyzed protocols".
+ ##
+ ## Unless you know what you are doing, leave this set to F.
+ const enable_auto_protocol_capture_filters = F &redef;
## This is where the default packet filter is stored and it should not
## normally be modified by users.
- global default_filter = "";
+ global current_filter = "";
}
+global dynamic_restrict_filters: table[string] of string = {};
+
+# Track if a filter is currently building so functions that would ultimately
+# install a filter immediately can still be used but they won't try to build or
+# install the filter.
+global currently_building = F;
+
+# Internal tracking for if the the filter being built has possibly been changed.
+global filter_changed = F;
+
+global filter_plugins: set[FilterPlugin] = {};
+
redef enum PcapFilterID += {
DefaultPcapFilter,
+ FilterTester,
};
-function combine_filters(lfilter: string, rfilter: string, op: string): string
+function test_filter(filter: string): bool
{
- if ( lfilter == "" && rfilter == "" )
- return "";
- else if ( lfilter == "" )
- return rfilter;
- else if ( rfilter == "" )
- return lfilter;
- else
- return fmt("(%s) %s (%s)", lfilter, op, rfilter);
+ if ( ! precompile_pcap_filter(FilterTester, filter) )
+ {
+ # The given filter was invalid
+ # TODO: generate a notice.
+ return F;
+ }
+ return T;
}
-function build_default_filter(): string
+# This tracks any changes for filtering mechanisms that play along nice
+# and set filter_changed to T.
+event filter_change_tracking()
+ {
+ if ( filter_changed )
+ install();
+
+ schedule 5min { filter_change_tracking() };
+ }
+
+event bro_init() &priority=5
+ {
+ Log::create_stream(PacketFilter::LOG, [$columns=Info]);
+
+ # Preverify the capture and restrict filters to give more granular failure messages.
+ for ( id in capture_filters )
+ {
+ if ( ! test_filter(capture_filters[id]) )
+ Reporter::fatal(fmt("Invalid capture_filter named '%s' - '%s'", id, capture_filters[id]));
+ }
+
+ for ( id in restrict_filters )
+ {
+ if ( ! test_filter(restrict_filters[id]) )
+ Reporter::fatal(fmt("Invalid restrict filter named '%s' - '%s'", id, restrict_filters[id]));
+ }
+ }
+
+event bro_init() &priority=-5
+ {
+ install();
+
+ event filter_change_tracking();
+ }
+
+function register_filter_plugin(fp: FilterPlugin)
+ {
+ add filter_plugins[fp];
+ }
+
+event remove_dynamic_filter(filter_id: string)
+ {
+ if ( filter_id in dynamic_restrict_filters )
+ {
+ delete dynamic_restrict_filters[filter_id];
+ install();
+ }
+ }
+
+function exclude(filter_id: string, filter: string): bool
+ {
+ if ( ! test_filter(filter) )
+ return F;
+
+ dynamic_restrict_filters[filter_id] = filter;
+ install();
+ return T;
+ }
+
+function exclude_for(filter_id: string, filter: string, span: interval): bool
+ {
+ if ( exclude(filter_id, filter) )
+ {
+ schedule span { remove_dynamic_filter(filter_id) };
+ return T;
+ }
+ return F;
+ }
+
+function build(): string
{
if ( cmd_line_bpf_filter != "" )
# Return what the user specified on the command line;
return cmd_line_bpf_filter;
- if ( all_packets )
- # Return an "always true" filter.
- return "ip or not ip";
+ currently_building = T;
- # Build filter dynamically.
+ # Generate all of the plugin based filters.
+ for ( plugin in filter_plugins )
+ {
+ plugin$func();
+ }
- # First the capture_filter.
local cfilter = "";
- for ( id in capture_filters )
- cfilter = combine_filters(cfilter, capture_filters[id], "or");
+ if ( |capture_filters| == 0 && ! enable_auto_protocol_capture_filters )
+ cfilter = default_capture_filter;
- # Then the restrict_filter.
+ for ( id in capture_filters )
+ cfilter = combine_filters(cfilter, "or", capture_filters[id]);
+
+ if ( enable_auto_protocol_capture_filters )
+ cfilter = combine_filters(cfilter, "or", Analyzer::get_bpf());
+
+ # Apply the restriction filters.
local rfilter = "";
for ( id in restrict_filters )
- rfilter = combine_filters(rfilter, restrict_filters[id], "and");
+ rfilter = combine_filters(rfilter, "and", restrict_filters[id]);
+
+ # Apply the dynamic restriction filters.
+ for ( filt in dynamic_restrict_filters )
+ rfilter = combine_filters(rfilter, "and", string_cat("not (", dynamic_restrict_filters[filt], ")"));
# Finally, join them into one filter.
- local filter = combine_filters(rfilter, cfilter, "and");
- if ( unrestricted_filter != "" )
- filter = combine_filters(unrestricted_filter, filter, "or");
+ local filter = combine_filters(cfilter, "and", rfilter);
+ if ( unrestricted_filter != "" )
+ filter = combine_filters(unrestricted_filter, "or", filter);
+ if ( restricted_filter != "" )
+ filter = combine_filters(restricted_filter, "and", filter);
+
+ currently_building = F;
return filter;
}
-function install()
+function install(): bool
{
- default_filter = build_default_filter();
+ if ( currently_building )
+ return F;
- if ( ! precompile_pcap_filter(DefaultPcapFilter, default_filter) )
+ local tmp_filter = build();
+
+ # No need to proceed if the filter hasn't changed.
+ if ( tmp_filter == current_filter )
+ return F;
+
+ local ts = current_time();
+ if ( ! precompile_pcap_filter(DefaultPcapFilter, tmp_filter) )
{
NOTICE([$note=Compile_Failure,
$msg=fmt("Compiling packet filter failed"),
- $sub=default_filter]);
- Reporter::fatal(fmt("Bad pcap filter '%s'", default_filter));
+ $sub=tmp_filter]);
+ if ( network_time() == 0.0 )
+ Reporter::fatal(fmt("Bad pcap filter '%s'", tmp_filter));
+ else
+ Reporter::warning(fmt("Bad pcap filter '%s'", tmp_filter));
}
+ local diff = current_time()-ts;
+ if ( diff > max_filter_compile_time )
+ NOTICE([$note=Too_Long_To_Compile_Filter,
+ $msg=fmt("A BPF filter is taking longer than %0.1f seconds to compile", diff)]);
+
+ # Set it to the current filter if it passed precompiling
+ current_filter = tmp_filter;
# Do an audit log for the packet filter.
local info: Info;
@@ -129,7 +299,7 @@ function install()
info$ts = current_time();
info$init = T;
}
- info$filter = default_filter;
+ info$filter = current_filter;
if ( ! install_pcap_filter(DefaultPcapFilter) )
{
@@ -137,15 +307,13 @@ function install()
info$success = F;
NOTICE([$note=Install_Failure,
$msg=fmt("Installing packet filter failed"),
- $sub=default_filter]);
+ $sub=current_filter]);
}
if ( reading_live_traffic() || reading_traces() )
Log::write(PacketFilter::LOG, info);
- }
-event bro_init() &priority=10
- {
- Log::create_stream(PacketFilter::LOG, [$columns=Info]);
- PacketFilter::install();
+ # Update the filter change tracking
+ filter_changed = F;
+ return T;
}
diff --git a/scripts/base/frameworks/packet-filter/netstats.bro b/scripts/base/frameworks/packet-filter/netstats.bro
index 9fbaa5cd1d..b5ffe24f54 100644
--- a/scripts/base/frameworks/packet-filter/netstats.bro
+++ b/scripts/base/frameworks/packet-filter/netstats.bro
@@ -13,7 +13,7 @@ export {
};
## This is the interval between individual statistics collection.
- const stats_collection_interval = 10secs;
+ const stats_collection_interval = 5min;
}
event net_stats_update(last_stat: NetStats)
diff --git a/scripts/base/frameworks/packet-filter/utils.bro b/scripts/base/frameworks/packet-filter/utils.bro
new file mode 100644
index 0000000000..7728ebf9f9
--- /dev/null
+++ b/scripts/base/frameworks/packet-filter/utils.bro
@@ -0,0 +1,58 @@
+module PacketFilter;
+
+export {
+ ## Takes a :bro:type:`port` and returns a BPF expression which will
+ ## match the port.
+ ##
+ ## p: The port.
+ ##
+ ## Returns: A valid BPF filter string for matching the port.
+ global port_to_bpf: function(p: port): string;
+
+ ## Create a BPF filter to sample IPv4 and IPv6 traffic.
+ ##
+ ## num_parts: The number of parts the traffic should be split into.
+ ##
+ ## this_part: The part of the traffic this filter will accept. 0-based.
+ global sampling_filter: function(num_parts: count, this_part: count): string;
+
+ ## Combines two valid BPF filter strings with a string based operator
+ ## to form a new filter.
+ ##
+ ## lfilter: Filter which will go on the left side.
+ ##
+ ## op: Operation being applied (typically "or" or "and").
+ ##
+ ## rfilter: Filter which will go on the right side.
+ ##
+ ## Returns: A new string representing the two filters combined with
+ ## the operator. Either filter being an empty string will
+ ## still result in a valid filter.
+ global combine_filters: function(lfilter: string, op: string, rfilter: string): string;
+}
+
+function port_to_bpf(p: port): string
+ {
+ local tp = get_port_transport_proto(p);
+ return cat(tp, " and ", fmt("port %d", p));
+ }
+
+function combine_filters(lfilter: string, op: string, rfilter: string): string
+ {
+ if ( lfilter == "" && rfilter == "" )
+ return "";
+ else if ( lfilter == "" )
+ return rfilter;
+ else if ( rfilter == "" )
+ return lfilter;
+ else
+ return fmt("(%s) %s (%s)", lfilter, op, rfilter);
+ }
+
+function sampling_filter(num_parts: count, this_part: count): string
+ {
+ local v4_filter = fmt("ip and ((ip[14:2]+ip[18:2]) - (%d*((ip[14:2]+ip[18:2])/%d)) == %d)", num_parts, num_parts, this_part);
+ # TODO: this is probably a fairly suboptimal filter, but it should work for now.
+ local v6_filter = fmt("ip6 and ((ip6[22:2]+ip6[38:2]) - (%d*((ip6[22:2]+ip6[38:2])/%d)) == %d)", num_parts, num_parts, this_part);
+ return combine_filters(v4_filter, "or", v6_filter);
+ }
diff --git a/scripts/base/init-bare.bro b/scripts/base/init-bare.bro
index 2110110a40..60ed0d2fd1 100644
--- a/scripts/base/init-bare.bro
+++ b/scripts/base/init-bare.bro
@@ -222,17 +222,6 @@ type endpoint_stats: record {
endian_type: count;
};
-## A unique analyzer instance ID. Each time instantiates a protocol analyzers
-## for a connection, it assigns it a unique ID that can be used to reference
-## that instance.
-##
-## .. bro:see:: Analyzer::name Analyzer::disable_analyzer protocol_confirmation
-## protocol_violation
-##
-## .. todo::While we declare an alias for the type here, the events/functions still
-## use ``count``. That should be changed.
-type AnalyzerID: count;
-
module Tunnel;
export {
## Records the identity of an encapsulating parent of a tunneled connection.
@@ -777,19 +766,6 @@ global signature_files = "" &add_func = add_signature_file;
## ``p0f`` fingerprint file to use. Will be searched relative to ``BROPATH``.
const passive_fingerprint_file = "base/misc/p0f.fp" &redef;
-# todo::testing to see if I can remove these without causing problems.
-#const ftp = 21/tcp;
-#const ssh = 22/tcp;
-#const telnet = 23/tcp;
-#const smtp = 25/tcp;
-#const domain = 53/tcp; # note, doesn't include UDP version
-#const gopher = 70/tcp;
-#const finger = 79/tcp;
-#const http = 80/tcp;
-#const ident = 113/tcp;
-#const bgp = 179/tcp;
-#const rlogin = 513/tcp;
-
# TCP values for :bro:see:`endpoint` *state* field.
# todo::these should go into an enum to make them autodoc'able.
const TCP_INACTIVE = 0; ##< Endpoint is still inactive.
@@ -3065,12 +3041,12 @@ module GLOBAL;
## Number of bytes per packet to capture from live interfaces.
const snaplen = 8192 &redef;
+# Load BiFs defined by plugins.
+@load base/bif/plugins
+
# Load these frameworks here because they use fairly deep integration with
# BiFs and script-land defined types.
@load base/frameworks/logging
@load base/frameworks/input
@load base/frameworks/analyzer
@load base/frameworks/file-analysis
-
-# Load BiFs defined by plugins.
-@load base/bif/plugins
diff --git a/scripts/base/init-default.bro b/scripts/base/init-default.bro
index 9c3995673c..6aa8ff5e26 100644
--- a/scripts/base/init-default.bro
+++ b/scripts/base/init-default.bro
@@ -41,10 +41,12 @@
@load base/protocols/http
@load base/protocols/irc
@load base/protocols/modbus
+@load base/protocols/pop3
@load base/protocols/smtp
@load base/protocols/socks
@load base/protocols/ssh
@load base/protocols/ssl
@load base/protocols/syslog
+@load base/protocols/tunnels
@load base/misc/find-checksum-offloading
diff --git a/scripts/base/protocols/dns/main.bro b/scripts/base/protocols/dns/main.bro
index 15da9aa7b7..bf47519cd8 100644
--- a/scripts/base/protocols/dns/main.bro
+++ b/scripts/base/protocols/dns/main.bro
@@ -122,14 +122,6 @@ redef record connection += {
dns_state: State &optional;
};
-# DPD configuration.
-redef capture_filters += {
- ["dns"] = "port 53",
- ["mdns"] = "udp and port 5353",
- ["llmns"] = "udp and port 5355",
- ["netbios-ns"] = "udp port 137",
-};
-
const ports = { 53/udp, 53/tcp, 137/udp, 5353/udp, 5355/udp };
redef likely_server_ports += { ports };
@@ -215,6 +207,11 @@ event DNS::do_reply(c: connection, msg: dns_msg, ans: dns_answer, reply: string)
{
if ( ans$answer_type == DNS_ANS )
{
+ if ( ! c?$dns )
+ {
+ event conn_weird("dns_unmatched_reply", c, "");
+ hook set_session(c, msg, F);
+ }
c$dns$AA = msg$AA;
c$dns$RA = msg$RA;
diff --git a/scripts/base/protocols/ftp/__load__.bro b/scripts/base/protocols/ftp/__load__.bro
index 464571dc7d..f3226de69d 100644
--- a/scripts/base/protocols/ftp/__load__.bro
+++ b/scripts/base/protocols/ftp/__load__.bro
@@ -3,3 +3,5 @@
@load ./file-analysis
@load ./file-extract
@load ./gridftp
+
+@load-sigs ./dpd.sig
\ No newline at end of file
diff --git a/scripts/base/protocols/ftp/dpd.sig b/scripts/base/protocols/ftp/dpd.sig
new file mode 100644
index 0000000000..3a6ceadd18
--- /dev/null
+++ b/scripts/base/protocols/ftp/dpd.sig
@@ -0,0 +1,15 @@
+signature dpd_ftp_client {
+ ip-proto == tcp
+ payload /(|.*[\n\r]) *[uU][sS][eE][rR] /
+ tcp-state originator
+}
+
+# Match for server greeting (220, 120) and for login or passwd
+# required (230, 331).
+signature dpd_ftp_server {
+ ip-proto == tcp
+ payload /[\n\r ]*(120|220)[^0-9].*[\n\r] *(230|331)[^0-9]/
+ tcp-state responder
+ requires-reverse-signature dpd_ftp_client
+ enable "ftp"
+}
diff --git a/scripts/base/protocols/ftp/file-analysis.bro b/scripts/base/protocols/ftp/file-analysis.bro
index 2096af9a75..2d7609197a 100644
--- a/scripts/base/protocols/ftp/file-analysis.bro
+++ b/scripts/base/protocols/ftp/file-analysis.bro
@@ -41,6 +41,7 @@ function get_file_handle(c: connection, is_orig: bool): string
module GLOBAL;
event get_file_handle(tag: Analyzer::Tag, c: connection, is_orig: bool)
+ &priority=5
{
if ( tag != Analyzer::ANALYZER_FTP_DATA ) return;
set_file_handle(FTP::get_file_handle(c, is_orig));
diff --git a/scripts/base/protocols/ftp/file-extract.bro b/scripts/base/protocols/ftp/file-extract.bro
index f14839b616..2b7bb8cd50 100644
--- a/scripts/base/protocols/ftp/file-extract.bro
+++ b/scripts/base/protocols/ftp/file-extract.bro
@@ -13,8 +13,6 @@ export {
const extraction_prefix = "ftp-item" &redef;
}
-global extract_count: count = 0;
-
redef record Info += {
## On disk file where it was extracted to.
extraction_file: string &log &optional;
@@ -26,8 +24,7 @@ redef record Info += {
function get_extraction_name(f: fa_file): string
{
- local r = fmt("%s-%s-%d.dat", extraction_prefix, f$id, extract_count);
- ++extract_count;
+ local r = fmt("%s-%s.dat", extraction_prefix, f$id);
return r;
}
diff --git a/scripts/base/protocols/ftp/main.bro b/scripts/base/protocols/ftp/main.bro
index 88e1fbeeb8..7bf9d6cc4c 100644
--- a/scripts/base/protocols/ftp/main.bro
+++ b/scripts/base/protocols/ftp/main.bro
@@ -110,21 +110,18 @@ redef record connection += {
ftp_data_reuse: bool &default=F;
};
-# Configure DPD
-redef capture_filters += { ["ftp"] = "port 21 and port 2811" };
-
const ports = { 21/tcp, 2811/tcp };
redef likely_server_ports += { ports };
-# Establish the variable for tracking expected connections.
-global ftp_data_expected: table[addr, port] of Info &read_expire=5mins;
-
event bro_init() &priority=5
{
Log::create_stream(FTP::LOG, [$columns=Info, $ev=log_ftp]);
Analyzer::register_for_ports(Analyzer::ANALYZER_FTP, ports);
}
+# Establish the variable for tracking expected connections.
+global ftp_data_expected: table[addr, port] of Info &read_expire=5mins;
+
## A set of commands where the argument can be expected to refer
## to a file or directory.
const file_cmds = {
diff --git a/scripts/base/protocols/http/__load__.bro b/scripts/base/protocols/http/__load__.bro
index 58618dedc7..8f426c1521 100644
--- a/scripts/base/protocols/http/__load__.bro
+++ b/scripts/base/protocols/http/__load__.bro
@@ -4,3 +4,5 @@
@load ./file-ident
@load ./file-hash
@load ./file-extract
+
+@load-sigs ./dpd.sig
\ No newline at end of file
diff --git a/scripts/base/protocols/http/dpd.sig b/scripts/base/protocols/http/dpd.sig
new file mode 100644
index 0000000000..13470f4e95
--- /dev/null
+++ b/scripts/base/protocols/http/dpd.sig
@@ -0,0 +1,13 @@
+signature dpd_http_client {
+ ip-proto == tcp
+ payload /^[[:space:]]*(GET|HEAD|POST)[[:space:]]*/
+ tcp-state originator
+}
+
+signature dpd_http_server {
+ ip-proto == tcp
+ payload /^HTTP\/[0-9]/
+ tcp-state responder
+ requires-reverse-signature dpd_http_client
+ enable "http"
+}
diff --git a/scripts/base/protocols/http/file-analysis.bro b/scripts/base/protocols/http/file-analysis.bro
index 51b3ea8dd5..d6da8c4f69 100644
--- a/scripts/base/protocols/http/file-analysis.bro
+++ b/scripts/base/protocols/http/file-analysis.bro
@@ -6,25 +6,48 @@
module HTTP;
export {
+ redef record HTTP::Info += {
+ ## Number of MIME entities in the HTTP request message body so far.
+ request_mime_level: count &default=0;
+ ## Number of MIME entities in the HTTP response message body so far.
+ response_mime_level: count &default=0;
+ };
+
## Default file handle provider for HTTP.
global get_file_handle: function(c: connection, is_orig: bool): string;
}
+event http_begin_entity(c: connection, is_orig: bool) &priority=5
+ {
+ if ( ! c?$http )
+ return;
+
+ if ( is_orig )
+ ++c$http$request_mime_level;
+ else
+ ++c$http$response_mime_level;
+ }
+
function get_file_handle(c: connection, is_orig: bool): string
{
if ( ! c?$http ) return "";
+ local mime_level: count =
+ is_orig ? c$http$request_mime_level : c$http$response_mime_level;
+ local mime_level_str: string = mime_level > 1 ? cat(mime_level) : "";
+
if ( c$http$range_request )
return cat(Analyzer::ANALYZER_HTTP, " ", is_orig, " ", c$id$orig_h, " ",
build_url(c$http));
return cat(Analyzer::ANALYZER_HTTP, " ", c$start_time, " ", is_orig, " ",
- c$http$trans_depth, " ", id_string(c$id));
+ c$http$trans_depth, mime_level_str, " ", id_string(c$id));
}
module GLOBAL;
event get_file_handle(tag: Analyzer::Tag, c: connection, is_orig: bool)
+ &priority=5
{
if ( tag != Analyzer::ANALYZER_HTTP ) return;
set_file_handle(HTTP::get_file_handle(c, is_orig));
diff --git a/scripts/base/protocols/http/file-extract.bro b/scripts/base/protocols/http/file-extract.bro
index 9c0899b2b6..a8c6039395 100644
--- a/scripts/base/protocols/http/file-extract.bro
+++ b/scripts/base/protocols/http/file-extract.bro
@@ -14,8 +14,11 @@ export {
const extraction_prefix = "http-item" &redef;
redef record Info += {
- ## On-disk file where the response body was extracted to.
- extraction_file: string &log &optional;
+ ## On-disk location where files in request body were extracted.
+ extracted_request_files: vector of string &log &optional;
+
+ ## On-disk location where files in response body were extracted.
+ extracted_response_files: vector of string &log &optional;
## Indicates if the response body is to be extracted or not. Must be
## set before or by the first :bro:see:`file_new` for the file content.
@@ -23,15 +26,28 @@ export {
};
}
-global extract_count: count = 0;
-
function get_extraction_name(f: fa_file): string
{
- local r = fmt("%s-%s-%d.dat", extraction_prefix, f$id, extract_count);
- ++extract_count;
+ local r = fmt("%s-%s.dat", extraction_prefix, f$id);
return r;
}
+function add_extraction_file(c: connection, is_orig: bool, fn: string)
+ {
+ if ( is_orig )
+ {
+ if ( ! c$http?$extracted_request_files )
+ c$http$extracted_request_files = vector();
+ c$http$extracted_request_files[|c$http$extracted_request_files|] = fn;
+ }
+ else
+ {
+ if ( ! c$http?$extracted_response_files )
+ c$http$extracted_response_files = vector();
+ c$http$extracted_response_files[|c$http$extracted_response_files|] = fn;
+ }
+ }
+
event file_new(f: fa_file) &priority=5
{
if ( ! f?$source ) return;
@@ -51,7 +67,7 @@ event file_new(f: fa_file) &priority=5
{
c = f$conns[cid];
if ( ! c?$http ) next;
- c$http$extraction_file = fname;
+ add_extraction_file(c, f$is_orig, fname);
}
return;
@@ -79,6 +95,6 @@ event file_new(f: fa_file) &priority=5
{
c = f$conns[cid];
if ( ! c?$http ) next;
- c$http$extraction_file = fname;
+ add_extraction_file(c, f$is_orig, fname);
}
}
diff --git a/scripts/base/protocols/http/main.bro b/scripts/base/protocols/http/main.bro
index 1c9c1cad2d..6d06376183 100644
--- a/scripts/base/protocols/http/main.bro
+++ b/scripts/base/protocols/http/main.bro
@@ -123,19 +123,12 @@ redef record connection += {
http_state: State &optional;
};
-# DPD configuration.
-redef capture_filters += {
- ["http"] = "tcp and port (80 or 81 or 631 or 1080 or 3138 or 8000 or 8080 or 8888)"
-};
-
const ports = {
80/tcp, 81/tcp, 631/tcp, 1080/tcp, 3128/tcp,
8000/tcp, 8080/tcp, 8888/tcp,
};
-
redef likely_server_ports += { ports };
-
# Initialize the HTTP logging stream and ports.
event bro_init() &priority=5
{
diff --git a/scripts/base/protocols/irc/__load__.bro b/scripts/base/protocols/irc/__load__.bro
index 5123385b0c..2e60cda0a6 100644
--- a/scripts/base/protocols/irc/__load__.bro
+++ b/scripts/base/protocols/irc/__load__.bro
@@ -1,3 +1,5 @@
@load ./main
@load ./dcc-send
@load ./file-analysis
+
+@load-sigs ./dpd.sig
\ No newline at end of file
diff --git a/scripts/base/protocols/irc/dcc-send.bro b/scripts/base/protocols/irc/dcc-send.bro
index 0e1d52af59..3194766946 100644
--- a/scripts/base/protocols/irc/dcc-send.bro
+++ b/scripts/base/protocols/irc/dcc-send.bro
@@ -39,8 +39,6 @@ export {
global dcc_expected_transfers: table[addr, port] of Info &read_expire=5mins;
-global extract_count: count = 0;
-
function set_dcc_mime(f: fa_file)
{
if ( ! f?$conns ) return;
@@ -75,8 +73,7 @@ function set_dcc_extraction_file(f: fa_file, filename: string)
function get_extraction_name(f: fa_file): string
{
- local r = fmt("%s-%s-%d.dat", extraction_prefix, f$id, extract_count);
- ++extract_count;
+ local r = fmt("%s-%s.dat", extraction_prefix, f$id);
return r;
}
@@ -188,5 +185,6 @@ event expected_connection_seen(c: connection, a: Analyzer::Tag) &priority=10
event connection_state_remove(c: connection) &priority=-5
{
- delete dcc_expected_transfers[c$id$resp_h, c$id$resp_p];
+ if ( [c$id$resp_h, c$id$resp_p] in dcc_expected_transfers )
+ delete dcc_expected_transfers[c$id$resp_h, c$id$resp_p];
}
diff --git a/scripts/base/protocols/irc/dpd.sig b/scripts/base/protocols/irc/dpd.sig
new file mode 100644
index 0000000000..308358d619
--- /dev/null
+++ b/scripts/base/protocols/irc/dpd.sig
@@ -0,0 +1,33 @@
+signature irc_client1 {
+ ip-proto == tcp
+ payload /(|.*[\r\n]) *[Uu][Ss][Ee][Rr] +.+[\n\r]+ *[Nn][Ii][Cc][Kk] +.*[\r\n]/
+ requires-reverse-signature irc_server_reply
+ tcp-state originator
+ enable "irc"
+}
+
+signature irc_client2 {
+ ip-proto == tcp
+ payload /(|.*[\r\n]) *[Nn][Ii][Cc][Kk] +.+[\r\n]+ *[Uu][Ss][Ee][Rr] +.+[\r\n]/
+ requires-reverse-signature irc_server_reply
+ tcp-state originator
+ enable "irc"
+}
+
+signature irc_server_reply {
+ ip-proto == tcp
+ payload /^(|.*[\n\r])(:[^ \n\r]+ )?[0-9][0-9][0-9] /
+ tcp-state responder
+}
+
+signature irc_server_to_server1 {
+ ip-proto == tcp
+ payload /(|.*[\r\n]) *[Ss][Ee][Rr][Vv][Ee][Rr] +[^ ]+ +[0-9]+ +:.+[\r\n]/
+}
+
+signature irc_server_to_server2 {
+ ip-proto == tcp
+ payload /(|.*[\r\n]) *[Ss][Ee][Rr][Vv][Ee][Rr] +[^ ]+ +[0-9]+ +:.+[\r\n]/
+ requires-reverse-signature irc_server_to_server1
+ enable "irc"
+}
diff --git a/scripts/base/protocols/irc/file-analysis.bro b/scripts/base/protocols/irc/file-analysis.bro
index e1fdc9c484..89cbe7990c 100644
--- a/scripts/base/protocols/irc/file-analysis.bro
+++ b/scripts/base/protocols/irc/file-analysis.bro
@@ -18,6 +18,7 @@ function get_file_handle(c: connection, is_orig: bool): string
module GLOBAL;
event get_file_handle(tag: Analyzer::Tag, c: connection, is_orig: bool)
+ &priority=5
{
if ( tag != Analyzer::ANALYZER_IRC_DATA ) return;
set_file_handle(IRC::get_file_handle(c, is_orig));
diff --git a/scripts/base/protocols/irc/main.bro b/scripts/base/protocols/irc/main.bro
index 490c39f54f..a57fc95448 100644
--- a/scripts/base/protocols/irc/main.bro
+++ b/scripts/base/protocols/irc/main.bro
@@ -38,13 +38,6 @@ redef record connection += {
irc: Info &optional;
};
-# Some common IRC ports.
-redef capture_filters += { ["irc-6666"] = "port 6666" };
-redef capture_filters += { ["irc-6667"] = "port 6667" };
-redef capture_filters += { ["irc-6668"] = "port 6668" };
-redef capture_filters += { ["irc-6669"] = "port 6669" };
-
-# DPD configuration.
const ports = { 6666/tcp, 6667/tcp, 6668/tcp, 6669/tcp };
redef likely_server_ports += { ports };
diff --git a/scripts/base/protocols/modbus/main.bro b/scripts/base/protocols/modbus/main.bro
index a418873306..d484e7582b 100644
--- a/scripts/base/protocols/modbus/main.bro
+++ b/scripts/base/protocols/modbus/main.bro
@@ -29,9 +29,6 @@ redef record connection += {
modbus: Info &optional;
};
-# Configure DPD and the packet filter.
-redef capture_filters += { ["modbus"] = "tcp port 502" };
-
const ports = { 502/tcp };
redef likely_server_ports += { ports };
diff --git a/scripts/base/protocols/pop3/__load__.bro b/scripts/base/protocols/pop3/__load__.bro
new file mode 100644
index 0000000000..c5ddf0e788
--- /dev/null
+++ b/scripts/base/protocols/pop3/__load__.bro
@@ -0,0 +1,2 @@
+
+@load-sigs ./dpd.sig
diff --git a/scripts/base/protocols/pop3/dpd.sig b/scripts/base/protocols/pop3/dpd.sig
new file mode 100644
index 0000000000..8d7e3567da
--- /dev/null
+++ b/scripts/base/protocols/pop3/dpd.sig
@@ -0,0 +1,13 @@
+signature dpd_pop3_server {
+ ip-proto == tcp
+ payload /^\+OK/
+ requires-reverse-signature dpd_pop3_client
+ enable "pop3"
+ tcp-state responder
+}
+
+signature dpd_pop3_client {
+ ip-proto == tcp
+ payload /(|.*[\r\n])[[:space:]]*([uU][sS][eE][rR][[:space:]]|[aA][pP][oO][pP][[:space:]]|[cC][aA][pP][aA]|[aA][uU][tT][hH])/
+ tcp-state originator
+}
diff --git a/scripts/base/protocols/smtp/__load__.bro b/scripts/base/protocols/smtp/__load__.bro
index bac9cc118f..3e3fde6947 100644
--- a/scripts/base/protocols/smtp/__load__.bro
+++ b/scripts/base/protocols/smtp/__load__.bro
@@ -2,3 +2,5 @@
@load ./entities
@load ./entities-excerpt
@load ./file-analysis
+
+@load-sigs ./dpd.sig
\ No newline at end of file
diff --git a/scripts/base/protocols/smtp/dpd.sig b/scripts/base/protocols/smtp/dpd.sig
new file mode 100644
index 0000000000..6fbde59059
--- /dev/null
+++ b/scripts/base/protocols/smtp/dpd.sig
@@ -0,0 +1,13 @@
+signature dpd_smtp_client {
+ ip-proto == tcp
+ payload /(|.*[\n\r])[[:space:]]*([hH][eE][lL][oO]|[eE][hH][lL][oO])/
+ requires-reverse-signature dpd_smtp_server
+ enable "smtp"
+ tcp-state originator
+}
+
+signature dpd_smtp_server {
+ ip-proto == tcp
+ payload /^[[:space:]]*220[[:space:]-]/
+ tcp-state responder
+}
\ No newline at end of file
diff --git a/scripts/base/protocols/smtp/entities.bro b/scripts/base/protocols/smtp/entities.bro
index 19cca30db1..b58766e51d 100644
--- a/scripts/base/protocols/smtp/entities.bro
+++ b/scripts/base/protocols/smtp/entities.bro
@@ -66,8 +66,6 @@ export {
global log_mime: event(rec: EntityInfo);
}
-global extract_count: count = 0;
-
event bro_init() &priority=5
{
Log::create_stream(SMTP::ENTITIES_LOG, [$columns=EntityInfo, $ev=log_mime]);
@@ -90,8 +88,7 @@ function set_session(c: connection, new_entity: bool)
function get_extraction_name(f: fa_file): string
{
- local r = fmt("%s-%s-%d.dat", extraction_prefix, f$id, extract_count);
- ++extract_count;
+ local r = fmt("%s-%s.dat", extraction_prefix, f$id);
return r;
}
@@ -127,7 +124,6 @@ event file_new(f: fa_file) &priority=5
[$tag=FileAnalysis::ANALYZER_EXTRACT,
$extract_filename=fname]);
extracting = T;
- ++extract_count;
}
c$smtp$current_entity$extraction_file = fname;
diff --git a/scripts/base/protocols/smtp/file-analysis.bro b/scripts/base/protocols/smtp/file-analysis.bro
index 17f9a32498..68ec6390dd 100644
--- a/scripts/base/protocols/smtp/file-analysis.bro
+++ b/scripts/base/protocols/smtp/file-analysis.bro
@@ -20,6 +20,7 @@ function get_file_handle(c: connection, is_orig: bool): string
module GLOBAL;
event get_file_handle(tag: Analyzer::Tag, c: connection, is_orig: bool)
+ &priority=5
{
if ( tag != Analyzer::ANALYZER_SMTP ) return;
set_file_handle(SMTP::get_file_handle(c, is_orig));
diff --git a/scripts/base/protocols/smtp/main.bro b/scripts/base/protocols/smtp/main.bro
index c7b3a452d2..d53128b06c 100644
--- a/scripts/base/protocols/smtp/main.bro
+++ b/scripts/base/protocols/smtp/main.bro
@@ -81,9 +81,6 @@ redef record connection += {
smtp_state: State &optional;
};
-# Configure DPD
-redef capture_filters += { ["smtp"] = "tcp port 25 or tcp port 587" };
-
const ports = { 25/tcp, 587/tcp };
redef likely_server_ports += { ports };
diff --git a/scripts/base/protocols/socks/__load__.bro b/scripts/base/protocols/socks/__load__.bro
index 0098b81a7a..80193afb6f 100644
--- a/scripts/base/protocols/socks/__load__.bro
+++ b/scripts/base/protocols/socks/__load__.bro
@@ -1,2 +1,4 @@
@load ./consts
-@load ./main
\ No newline at end of file
+@load ./main
+
+@load-sigs ./dpd.sig
\ No newline at end of file
diff --git a/scripts/base/protocols/socks/dpd.sig b/scripts/base/protocols/socks/dpd.sig
new file mode 100644
index 0000000000..3dcd7a945a
--- /dev/null
+++ b/scripts/base/protocols/socks/dpd.sig
@@ -0,0 +1,48 @@
+signature dpd_socks4_client {
+ ip-proto == tcp
+ # '32' is a rather arbitrary max length for the user name.
+ payload /^\x04[\x01\x02].{0,32}\x00/
+ tcp-state originator
+}
+
+signature dpd_socks4_server {
+ ip-proto == tcp
+ requires-reverse-signature dpd_socks4_client
+ payload /^\x00[\x5a\x5b\x5c\x5d]/
+ tcp-state responder
+ enable "socks"
+}
+
+signature dpd_socks4_reverse_client {
+ ip-proto == tcp
+ # '32' is a rather arbitrary max length for the user name.
+ payload /^\x04[\x01\x02].{0,32}\x00/
+ tcp-state responder
+}
+
+signature dpd_socks4_reverse_server {
+ ip-proto == tcp
+ requires-reverse-signature dpd_socks4_reverse_client
+ payload /^\x00[\x5a\x5b\x5c\x5d]/
+ tcp-state originator
+ enable "socks"
+}
+
+signature dpd_socks5_client {
+ ip-proto == tcp
+ # Watch for a few authentication methods to reduce false positives.
+ payload /^\x05.[\x00\x01\x02]/
+ tcp-state originator
+}
+
+signature dpd_socks5_server {
+ ip-proto == tcp
+ requires-reverse-signature dpd_socks5_client
+ # Watch for a single authentication method to be chosen by the server or
+ # the server to indicate the no authentication is required.
+ payload /^\x05(\x00|\x01[\x00\x01\x02])/
+ tcp-state responder
+ enable "socks"
+}
+
+
diff --git a/scripts/base/protocols/socks/main.bro b/scripts/base/protocols/socks/main.bro
index a188646515..f697b355c1 100644
--- a/scripts/base/protocols/socks/main.bro
+++ b/scripts/base/protocols/socks/main.bro
@@ -47,10 +47,6 @@ redef record connection += {
socks: SOCKS::Info &optional;
};
-# Configure DPD
-redef capture_filters += { ["socks"] = "tcp port 1080" };
-redef likely_server_ports += { 1080/tcp };
-
function set_session(c: connection, version: count)
{
if ( ! c?$socks )
diff --git a/scripts/base/protocols/ssh/__load__.bro b/scripts/base/protocols/ssh/__load__.bro
index d551be57d3..0f3cb011f8 100644
--- a/scripts/base/protocols/ssh/__load__.bro
+++ b/scripts/base/protocols/ssh/__load__.bro
@@ -1 +1,3 @@
-@load ./main
\ No newline at end of file
+@load ./main
+
+@load-sigs ./dpd.sig
\ No newline at end of file
diff --git a/scripts/base/protocols/ssh/dpd.sig b/scripts/base/protocols/ssh/dpd.sig
new file mode 100644
index 0000000000..95e22908ab
--- /dev/null
+++ b/scripts/base/protocols/ssh/dpd.sig
@@ -0,0 +1,13 @@
+signature dpd_ssh_client {
+ ip-proto == tcp
+ payload /^[sS][sS][hH]-/
+ requires-reverse-signature dpd_ssh_server
+ enable "ssh"
+ tcp-state originator
+}
+
+signature dpd_ssh_server {
+ ip-proto == tcp
+ payload /^[sS][sS][hH]-/
+ tcp-state responder
+}
diff --git a/scripts/base/protocols/ssh/main.bro b/scripts/base/protocols/ssh/main.bro
index 8e1c5515b5..53b61f00d8 100644
--- a/scripts/base/protocols/ssh/main.bro
+++ b/scripts/base/protocols/ssh/main.bro
@@ -70,17 +70,13 @@ export {
global log_ssh: event(rec: Info);
}
-# Configure DPD and the packet filter
-
-const ports = { 22/tcp };
-
-redef capture_filters += { ["ssh"] = "tcp port 22" };
-redef likely_server_ports += { ports };
-
redef record connection += {
ssh: Info &optional;
};
+const ports = { 22/tcp };
+redef likely_server_ports += { ports };
+
event bro_init() &priority=5
{
Log::create_stream(SSH::LOG, [$columns=Info, $ev=log_ssh]);
@@ -118,7 +114,7 @@ function check_ssh_connection(c: connection, done: bool)
# Responder must have sent fewer than 40 packets.
c$resp$num_pkts < 40 &&
# If there was a content gap we can't reliably do this heuristic.
- c?$conn && c$conn$missed_bytes == 0)# &&
+ c?$conn && c$conn$missed_bytes == 0 )# &&
# Only "normal" connections can count.
#c$conn?$conn_state && c$conn$conn_state in valid_states )
{
@@ -178,6 +174,7 @@ event ssh_watcher(c: connection)
if ( ! connection_exists(id) )
return;
+ lookup_connection(c$id);
check_ssh_connection(c, F);
if ( ! c$ssh$done )
schedule +15secs { ssh_watcher(c) };
diff --git a/scripts/base/protocols/ssl/__load__.bro b/scripts/base/protocols/ssl/__load__.bro
index 239438047c..80cb4e216a 100644
--- a/scripts/base/protocols/ssl/__load__.bro
+++ b/scripts/base/protocols/ssl/__load__.bro
@@ -1,3 +1,5 @@
@load ./consts
@load ./main
@load ./mozilla-ca-list
+
+@load-sigs ./dpd.sig
\ No newline at end of file
diff --git a/scripts/base/protocols/ssl/dpd.sig b/scripts/base/protocols/ssl/dpd.sig
new file mode 100644
index 0000000000..b36b9a5aa5
--- /dev/null
+++ b/scripts/base/protocols/ssl/dpd.sig
@@ -0,0 +1,15 @@
+signature dpd_ssl_server {
+ ip-proto == tcp
+ # Server hello.
+ payload /^(\x16\x03[\x00\x01\x02]..\x02...\x03[\x00\x01\x02]|...?\x04..\x00\x02).*/
+ requires-reverse-signature dpd_ssl_client
+ enable "ssl"
+ tcp-state responder
+}
+
+signature dpd_ssl_client {
+ ip-proto == tcp
+ # Client hello.
+ payload /^(\x16\x03[\x00\x01\x02]..\x01...\x03[\x00\x01\x02]|...?\x01[\x00\x01\x02][\x02\x03]).*/
+ tcp-state originator
+}
diff --git a/scripts/base/protocols/ssl/main.bro b/scripts/base/protocols/ssl/main.bro
index 36d0c3f54d..65526182ac 100644
--- a/scripts/base/protocols/ssl/main.bro
+++ b/scripts/base/protocols/ssl/main.bro
@@ -94,35 +94,12 @@ redef record Info += {
delay_tokens: set[string] &optional;
};
-redef capture_filters += {
- ["ssl"] = "tcp port 443",
- ["nntps"] = "tcp port 563",
- ["imap4-ssl"] = "tcp port 585",
- ["sshell"] = "tcp port 614",
- ["ldaps"] = "tcp port 636",
- ["ftps-data"] = "tcp port 989",
- ["ftps"] = "tcp port 990",
- ["telnets"] = "tcp port 992",
- ["imaps"] = "tcp port 993",
- ["ircs"] = "tcp port 994",
- ["pop3s"] = "tcp port 995",
- ["xmpps"] = "tcp port 5223",
-};
-
const ports = {
443/tcp, 563/tcp, 585/tcp, 614/tcp, 636/tcp,
989/tcp, 990/tcp, 992/tcp, 993/tcp, 995/tcp, 5223/tcp
-} &redef;
-
+};
redef likely_server_ports += { ports };
-# A queue that buffers log records.
-global log_delay_queue: table[count] of Info;
-# The top queue index where records are added.
-global log_delay_queue_head = 0;
-# The bottom queue index that points to the next record to be flushed.
-global log_delay_queue_tail = 0;
-
event bro_init() &priority=5
{
Log::create_stream(SSL::LOG, [$columns=Info, $ev=log_ssl]);
@@ -138,26 +115,17 @@ function set_session(c: connection)
function delay_log(info: Info, token: string)
{
- info$delay_tokens = set();
+ if ( ! info?$delay_tokens )
+ info$delay_tokens = set();
add info$delay_tokens[token];
-
- log_delay_queue[log_delay_queue_head] = info;
- ++log_delay_queue_head;
}
function undelay_log(info: Info, token: string)
{
- if ( token in info$delay_tokens )
+ if ( info?$delay_tokens && token in info$delay_tokens )
delete info$delay_tokens[token];
}
-global log_record: function(info: Info);
-
-event delay_logging(info: Info)
- {
- log_record(info);
- }
-
function log_record(info: Info)
{
if ( ! info?$delay_tokens || |info$delay_tokens| == 0 )
@@ -166,26 +134,14 @@ function log_record(info: Info)
}
else
{
- for ( unused_index in log_delay_queue )
+ when ( |info$delay_tokens| == 0 )
{
- if ( log_delay_queue_head == log_delay_queue_tail )
- return;
- if ( |log_delay_queue[log_delay_queue_tail]$delay_tokens| > 0 )
- {
- if ( info$ts + max_log_delay > network_time() )
- {
- schedule 1sec { delay_logging(info) };
- return;
- }
- else
- {
- Reporter::info(fmt("SSL delay tokens not released in time (%s)",
- info$delay_tokens));
- }
- }
- Log::write(SSL::LOG, log_delay_queue[log_delay_queue_tail]);
- delete log_delay_queue[log_delay_queue_tail];
- ++log_delay_queue_tail;
+ log_record(info);
+ }
+ timeout SSL::max_log_delay
+ {
+ Reporter::info(fmt("SSL delay tokens not released in time (%s tokens remaining)",
+ |info$delay_tokens|));
}
}
}
@@ -295,15 +251,3 @@ event protocol_violation(c: connection, atype: Analyzer::Tag, aid: count,
if ( c?$ssl )
finish(c);
}
-
-event bro_done()
- {
- if ( |log_delay_queue| == 0 )
- return;
- for ( unused_index in log_delay_queue )
- {
- Log::write(SSL::LOG, log_delay_queue[log_delay_queue_tail]);
- delete log_delay_queue[log_delay_queue_tail];
- ++log_delay_queue_tail;
- }
- }
diff --git a/scripts/base/protocols/syslog/main.bro b/scripts/base/protocols/syslog/main.bro
index 7c15fb4fae..afe562c890 100644
--- a/scripts/base/protocols/syslog/main.bro
+++ b/scripts/base/protocols/syslog/main.bro
@@ -26,15 +26,13 @@ export {
};
}
-redef capture_filters += { ["syslog"] = "port 514" };
-
-const ports = { 514/udp };
-redef likely_server_ports += { ports };
-
redef record connection += {
syslog: Info &optional;
};
+const ports = { 514/udp };
+redef likely_server_ports += { ports };
+
event bro_init() &priority=5
{
Log::create_stream(Syslog::LOG, [$columns=Info]);
diff --git a/scripts/base/protocols/tunnels/__load__.bro b/scripts/base/protocols/tunnels/__load__.bro
new file mode 100644
index 0000000000..9de7b6ff19
--- /dev/null
+++ b/scripts/base/protocols/tunnels/__load__.bro
@@ -0,0 +1 @@
+@load-sigs ./dpd.sig
\ No newline at end of file
diff --git a/scripts/base/protocols/tunnels/dpd.sig b/scripts/base/protocols/tunnels/dpd.sig
new file mode 100644
index 0000000000..0c66775f5d
--- /dev/null
+++ b/scripts/base/protocols/tunnels/dpd.sig
@@ -0,0 +1,14 @@
+# Provide DPD signatures for tunneling protocols that otherwise
+# wouldn't be detected at all.
+
+signature dpd_ayiya {
+ ip-proto = udp
+ payload /^..\x11\x29/
+ enable "ayiya"
+}
+
+signature dpd_teredo {
+ ip-proto = udp
+ payload /^(\x00\x00)|(\x00\x01)|([\x60-\x6f])/
+ enable "teredo"
+}
diff --git a/scripts/policy/frameworks/packet-filter/shunt.bro b/scripts/policy/frameworks/packet-filter/shunt.bro
new file mode 100644
index 0000000000..b87369ee62
--- /dev/null
+++ b/scripts/policy/frameworks/packet-filter/shunt.bro
@@ -0,0 +1,169 @@
+@load base/frameworks/notice
+@load base/frameworks/packet-filter
+
+module PacketFilter;
+
+export {
+ ## The maximum number of BPF based shunts that Bro is allowed to perform.
+ const max_bpf_shunts = 100 &redef;
+
+ ## Call this function to use BPF to shunt a connection (to prevent the
+ ## data packets from reaching Bro). For TCP connections, control packets
+ ## are still allowed through so that Bro can continue logging the connection
+ ## and it can stop shunting once the connection ends.
+ global shunt_conn: function(id: conn_id): bool;
+
+ ## This function will use a BPF expresssion to shunt traffic between
+ ## the two hosts given in the `conn_id` so that the traffic is never
+ ## exposed to Bro's traffic processing.
+ global shunt_host_pair: function(id: conn_id): bool;
+
+ ## Remove shunting for a host pair given as a `conn_id`. The filter
+ ## is not immediately removed. It waits for the occassional filter
+ ## update done by the `PacketFilter` framework.
+ global unshunt_host_pair: function(id: conn_id): bool;
+
+ ## Performs the same function as the `unshunt_host_pair` function, but
+ ## it forces an immediate filter update.
+ global force_unshunt_host_pair: function(id: conn_id): bool;
+
+ ## Retrieve the currently shunted connections.
+ global current_shunted_conns: function(): set[conn_id];
+
+ ## Retrieve the currently shunted host pairs.
+ global current_shunted_host_pairs: function(): set[conn_id];
+
+ redef enum Notice::Type += {
+ ## Indicative that :bro:id:`max_bpf_shunts` connections are already
+ ## being shunted with BPF filters and no more are allowed.
+ No_More_Conn_Shunts_Available,
+
+ ## Limitations in BPF make shunting some connections with BPF impossible.
+ ## This notice encompasses those various cases.
+ Cannot_BPF_Shunt_Conn,
+ };
+}
+
+global shunted_conns: set[conn_id];
+global shunted_host_pairs: set[conn_id];
+
+function shunt_filters()
+ {
+ # NOTE: this could wrongly match if a connection happens with the ports reversed.
+ local tcp_filter = "";
+ local udp_filter = "";
+ for ( id in shunted_conns )
+ {
+ local prot = get_port_transport_proto(id$resp_p);
+
+ local filt = fmt("host %s and port %d and host %s and port %d", id$orig_h, id$orig_p, id$resp_h, id$resp_p);
+ if ( prot == udp )
+ udp_filter = combine_filters(udp_filter, "and", filt);
+ else if ( prot == tcp )
+ tcp_filter = combine_filters(tcp_filter, "and", filt);
+ }
+ if ( tcp_filter != "" )
+ tcp_filter = combine_filters("tcp and tcp[tcpflags] & (tcp-syn|tcp-fin|tcp-rst) == 0", "and", tcp_filter);
+ local conn_shunt_filter = combine_filters(tcp_filter, "and", udp_filter);
+
+ local hp_shunt_filter = "";
+ for ( id in shunted_host_pairs )
+ hp_shunt_filter = combine_filters(hp_shunt_filter, "and", fmt("host %s and host %s", id$orig_h, id$resp_h));
+
+ local filter = combine_filters(conn_shunt_filter, "and", hp_shunt_filter);
+ if ( filter != "" )
+ PacketFilter::exclude("shunt_filters", filter);
+}
+
+event bro_init() &priority=5
+ {
+ register_filter_plugin([
+ $func()={ return shunt_filters(); }
+ ]);
+ }
+
+function current_shunted_conns(): set[conn_id]
+ {
+ return shunted_conns;
+ }
+
+function current_shunted_host_pairs(): set[conn_id]
+ {
+ return shunted_host_pairs;
+ }
+
+function reached_max_shunts(): bool
+ {
+ if ( |shunted_conns| + |shunted_host_pairs| > max_bpf_shunts )
+ {
+ NOTICE([$note=No_More_Conn_Shunts_Available,
+ $msg=fmt("%d BPF shunts are in place and no more will be added until space clears.", max_bpf_shunts)]);
+ return T;
+ }
+ else
+ return F;
+ }
+
+function shunt_host_pair(id: conn_id): bool
+ {
+ PacketFilter::filter_changed = T;
+
+ if ( reached_max_shunts() )
+ return F;
+
+ add shunted_host_pairs[id];
+ install();
+ return T;
+ }
+
+function unshunt_host_pair(id: conn_id): bool
+ {
+ PacketFilter::filter_changed = T;
+
+ if ( id in shunted_host_pairs )
+ {
+ delete shunted_host_pairs[id];
+ return T;
+ }
+ else
+ return F;
+ }
+
+function force_unshunt_host_pair(id: conn_id): bool
+ {
+ if ( unshunt_host_pair(id) )
+ {
+ install();
+ return T;
+ }
+ else
+ return F;
+ }
+
+function shunt_conn(id: conn_id): bool
+ {
+ if ( is_v6_addr(id$orig_h) )
+ {
+ NOTICE([$note=Cannot_BPF_Shunt_Conn,
+ $msg="IPv6 connections can't be shunted with BPF due to limitations in BPF",
+ $sub="ipv6_conn",
+ $id=id, $identifier=cat(id)]);
+ return F;
+ }
+
+ if ( reached_max_shunts() )
+ return F;
+
+ PacketFilter::filter_changed = T;
+ add shunted_conns[id];
+ install();
+ return T;
+ }
+
+event connection_state_remove(c: connection) &priority=-5
+ {
+ # Don't rebuild the filter right away because the packet filter framework
+ # will check every few minutes and update the filter if things have changed.
+ if ( c$id in shunted_conns )
+ delete shunted_conns[c$id];
+ }
diff --git a/scripts/policy/misc/load-balancing.bro b/scripts/policy/misc/load-balancing.bro
new file mode 100644
index 0000000000..fe07dd64da
--- /dev/null
+++ b/scripts/policy/misc/load-balancing.bro
@@ -0,0 +1,132 @@
+##! This script implements the "Bro side" of several load balancing
+##! approaches for Bro clusters.
+
+@load base/frameworks/cluster
+@load base/frameworks/packet-filter
+
+module LoadBalancing;
+
+export {
+
+ type Method: enum {
+ ## Apply BPF filters to each worker in a way that causes them to
+ ## automatically flow balance traffic between them.
+ AUTO_BPF,
+ ## Load balance traffic across the workers by making each one apply
+ ## a restrict filter to only listen to a single MAC address. This
+ ## is a somewhat common deployment option for sites doing network
+ ## based load balancing with MAC address rewriting and passing the
+ ## traffic to a single interface. Multiple MAC addresses will show
+ ## up on the same interface and need filtered to a single address.
+ #MAC_ADDR_BPF,
+ };
+
+ ## Defines the method of load balancing to use.
+ const method = AUTO_BPF &redef;
+
+ # Configure the cluster framework to enable the load balancing filter configuration.
+ #global send_filter: event(for_node: string, filter: string);
+ #global confirm_filter_installation: event(success: bool);
+
+ redef record Cluster::Node += {
+ ## A BPF filter for load balancing traffic sniffed on a single interface
+ ## across a number of processes. In normal uses, this will be assigned
+ ## dynamically by the manager and installed by the workers.
+ lb_filter: string &optional;
+ };
+}
+
+#redef Cluster::manager2worker_events += /LoadBalancing::send_filter/;
+#redef Cluster::worker2manager_events += /LoadBalancing::confirm_filter_installation/;
+
+@if ( Cluster::is_enabled() )
+
+@if ( Cluster::local_node_type() == Cluster::MANAGER )
+
+event bro_init() &priority=5
+ {
+ if ( method != AUTO_BPF )
+ return;
+
+ local worker_ip_interface: table[addr, string] of count = table();
+ for ( n in Cluster::nodes )
+ {
+ local this_node = Cluster::nodes[n];
+
+ # Only workers!
+ if ( this_node$node_type != Cluster::WORKER ||
+ ! this_node?$interface )
+ next;
+
+ if ( [this_node$ip, this_node$interface] !in worker_ip_interface )
+ worker_ip_interface[this_node$ip, this_node$interface] = 0;
+ ++worker_ip_interface[this_node$ip, this_node$interface];
+ }
+
+ # Now that we've counted up how many processes are running on an interface
+ # let's create the filters for each worker.
+ local lb_proc_track: table[addr, string] of count = table();
+ for ( no in Cluster::nodes )
+ {
+ local that_node = Cluster::nodes[no];
+ if ( that_node$node_type == Cluster::WORKER &&
+ that_node?$interface && [that_node$ip, that_node$interface] in worker_ip_interface )
+ {
+ if ( [that_node$ip, that_node$interface] !in lb_proc_track )
+ lb_proc_track[that_node$ip, that_node$interface] = 0;
+
+ local this_lb_proc = lb_proc_track[that_node$ip, that_node$interface];
+ local total_lb_procs = worker_ip_interface[that_node$ip, that_node$interface];
+
+ ++lb_proc_track[that_node$ip, that_node$interface];
+ if ( total_lb_procs > 1 )
+ {
+ that_node$lb_filter = PacketFilter::sample_filter(total_lb_procs, this_lb_proc);
+ Communication::nodes[no]$capture_filter = that_node$lb_filter;
+ }
+ }
+ }
+ }
+
+#event remote_connection_established(p: event_peer) &priority=-5
+# {
+# if ( is_remote_event() )
+# return;
+#
+# local for_node = p$descr;
+# # Send the filter to the peer.
+# if ( for_node in Cluster::nodes &&
+# Cluster::nodes[for_node]?$lb_filter )
+# {
+# local filter = Cluster::nodes[for_node]$lb_filter;
+# event LoadBalancing::send_filter(for_node, filter);
+# }
+# }
+
+#event LoadBalancing::confirm_filter_installation(success: bool)
+# {
+# # This doesn't really matter yet since we aren't getting back a meaningful success response.
+# }
+
+@endif
+
+
+@if ( Cluster::local_node_type() == Cluster::WORKER )
+
+#event LoadBalancing::send_filter(for_node: string, filter: string)
+event remote_capture_filter(p: event_peer, filter: string)
+ {
+ #if ( for_node !in Cluster::nodes )
+ # return;
+ #
+ #if ( Cluster::node == for_node )
+ # {
+ restrict_filters["lb_filter"] = filter;
+ PacketFilter::install();
+ #event LoadBalancing::confirm_filter_installation(T);
+ # }
+ }
+
+@endif
+
+@endif
diff --git a/scripts/policy/tuning/defaults/packet-fragments.bro b/scripts/policy/tuning/defaults/packet-fragments.bro
index 30d7e23729..24b18d5917 100644
--- a/scripts/policy/tuning/defaults/packet-fragments.bro
+++ b/scripts/policy/tuning/defaults/packet-fragments.bro
@@ -3,7 +3,9 @@
## This normally isn't used because of the default open packet filter
## but we set it anyway in case the user is using a packet filter.
-redef capture_filters += { ["frag"] = "(ip[6:2] & 0x3fff != 0) and tcp" };
+## Note: This was removed because the default model now is to have a wide
+## open packet filter.
+#redef capture_filters += { ["frag"] = "(ip[6:2] & 0x3fff != 0) and tcp" };
## Shorten the fragment timeout from never expiring to expiring fragments after
## five minutes.
diff --git a/scripts/test-all-policy.bro b/scripts/test-all-policy.bro
index daad03d9b6..1fd34d6f2f 100644
--- a/scripts/test-all-policy.bro
+++ b/scripts/test-all-policy.bro
@@ -24,6 +24,7 @@
@load frameworks/intel/smtp.bro
@load frameworks/intel/ssl.bro
@load frameworks/intel/where-locations.bro
+@load frameworks/packet-filter/shunt.bro
@load frameworks/software/version-changes.bro
@load frameworks/software/vulnerable.bro
@load integration/barnyard2/__load__.bro
@@ -35,6 +36,7 @@
@load misc/capture-loss.bro
@load misc/detect-traceroute/__load__.bro
@load misc/detect-traceroute/main.bro
+@load misc/load-balancing.bro
@load misc/loaded-scripts.bro
@load misc/profiling.bro
@load misc/scan.bro
diff --git a/src/3rdparty/sqlite3.c b/src/3rdparty/sqlite3.c
index ba6a30e132..8d473d32b7 100644
--- a/src/3rdparty/sqlite3.c
+++ b/src/3rdparty/sqlite3.c
@@ -1,9 +1,6 @@
-# define SQLITE_THREADSAFE 2
-# define SQLITE_DEFAULT_MEMSTATUS 0
-
/******************************************************************************
** This file is an amalgamation of many separate C source files from SQLite
-** version 3.7.16.2. By combining all the individual C code files into this
+** version 3.7.17. By combining all the individual C code files into this
** single large file, the entire code can be compiled as a single translation
** unit. This allows many compilers to do optimizations that would not be
** possible if the files were compiled separately. Performance improvements
@@ -365,11 +362,11 @@
** We support that for legacy.
*/
#if !defined(SQLITE_THREADSAFE)
-#if defined(THREADSAFE)
-# define SQLITE_THREADSAFE THREADSAFE
-#else
-# define SQLITE_THREADSAFE 1 /* IMP: R-07272-22309 */
-#endif
+# if defined(THREADSAFE)
+# define SQLITE_THREADSAFE THREADSAFE
+# else
+# define SQLITE_THREADSAFE 1 /* IMP: R-07272-22309 */
+# endif
#endif
/*
@@ -681,9 +678,9 @@ extern "C" {
** [sqlite3_libversion_number()], [sqlite3_sourceid()],
** [sqlite_version()] and [sqlite_source_id()].
*/
-#define SQLITE_VERSION "3.7.16.2"
-#define SQLITE_VERSION_NUMBER 3007016
-#define SQLITE_SOURCE_ID "2013-04-12 11:52:43 cbea02d93865ce0e06789db95fd9168ebac970c7"
+#define SQLITE_VERSION "3.7.17"
+#define SQLITE_VERSION_NUMBER 3007017
+#define SQLITE_SOURCE_ID "2013-05-20 00:56:22 118a3b35693b134d56ebd780123b7fd6f1497668"
/*
** CAPI3REF: Run-Time Library Version Numbers
@@ -999,6 +996,8 @@ SQLITE_API int sqlite3_exec(
#define SQLITE_FORMAT 24 /* Auxiliary database format error */
#define SQLITE_RANGE 25 /* 2nd parameter to sqlite3_bind out of range */
#define SQLITE_NOTADB 26 /* File opened that is not a database file */
+#define SQLITE_NOTICE 27 /* Notifications from sqlite3_log() */
+#define SQLITE_WARNING 28 /* Warnings from sqlite3_log() */
#define SQLITE_ROW 100 /* sqlite3_step() has another row ready */
#define SQLITE_DONE 101 /* sqlite3_step() has finished executing */
/* end-of-error-codes */
@@ -1049,6 +1048,7 @@ SQLITE_API int sqlite3_exec(
#define SQLITE_IOERR_SHMMAP (SQLITE_IOERR | (21<<8))
#define SQLITE_IOERR_SEEK (SQLITE_IOERR | (22<<8))
#define SQLITE_IOERR_DELETE_NOENT (SQLITE_IOERR | (23<<8))
+#define SQLITE_IOERR_MMAP (SQLITE_IOERR | (24<<8))
#define SQLITE_LOCKED_SHAREDCACHE (SQLITE_LOCKED | (1<<8))
#define SQLITE_BUSY_RECOVERY (SQLITE_BUSY | (1<<8))
#define SQLITE_CANTOPEN_NOTEMPDIR (SQLITE_CANTOPEN | (1<<8))
@@ -1068,6 +1068,8 @@ SQLITE_API int sqlite3_exec(
#define SQLITE_CONSTRAINT_TRIGGER (SQLITE_CONSTRAINT | (7<<8))
#define SQLITE_CONSTRAINT_UNIQUE (SQLITE_CONSTRAINT | (8<<8))
#define SQLITE_CONSTRAINT_VTAB (SQLITE_CONSTRAINT | (9<<8))
+#define SQLITE_NOTICE_RECOVER_WAL (SQLITE_NOTICE | (1<<8))
+#define SQLITE_NOTICE_RECOVER_ROLLBACK (SQLITE_NOTICE | (2<<8))
/*
** CAPI3REF: Flags For File Open Operations
@@ -1307,6 +1309,9 @@ struct sqlite3_io_methods {
void (*xShmBarrier)(sqlite3_file*);
int (*xShmUnmap)(sqlite3_file*, int deleteFlag);
/* Methods above are valid for version 2 */
+ int (*xFetch)(sqlite3_file*, sqlite3_int64 iOfst, int iAmt, void **pp);
+ int (*xUnfetch)(sqlite3_file*, sqlite3_int64 iOfst, void *p);
+ /* Methods above are valid for version 3 */
/* Additional methods may be added in future releases */
};
@@ -1443,7 +1448,8 @@ struct sqlite3_io_methods {
** it is able to override built-in [PRAGMA] statements.
**
**
[[SQLITE_FCNTL_BUSYHANDLER]]
-** ^This file-control may be invoked by SQLite on the database file handle
+** ^The [SQLITE_FCNTL_BUSYHANDLER]
+** file-control may be invoked by SQLite on the database file handle
** shortly after it is opened in order to provide a custom VFS with access
** to the connections busy-handler callback. The argument is of type (void **)
** - an array of two (void *) values. The first (void *) actually points
@@ -1454,13 +1460,24 @@ struct sqlite3_io_methods {
** current operation.
**
**
[[SQLITE_FCNTL_TEMPFILENAME]]
-** ^Application can invoke this file-control to have SQLite generate a
+** ^Application can invoke the [SQLITE_FCNTL_TEMPFILENAME] file-control
+** to have SQLite generate a
** temporary filename using the same algorithm that is followed to generate
** temporary filenames for TEMP tables and other internal uses. The
** argument should be a char** which will be filled with the filename
** written into memory obtained from [sqlite3_malloc()]. The caller should
** invoke [sqlite3_free()] on the result to avoid a memory leak.
**
+**
[[SQLITE_FCNTL_MMAP_SIZE]]
+** The [SQLITE_FCNTL_MMAP_SIZE] file control is used to query or set the
+** maximum number of bytes that will be used for memory-mapped I/O.
+** The argument is a pointer to a value of type sqlite3_int64 that
+** is an advisory maximum number of bytes in the file to memory map. The
+** pointer is overwritten with the old value. The limit is not changed if
+** the value originally pointed to is negative, and so the current limit
+** can be queried by passing in a pointer to a negative number. This
+** file-control is used internally to implement [PRAGMA mmap_size].
+**
**
*/
#define SQLITE_FCNTL_LOCKSTATE 1
@@ -1479,6 +1496,7 @@ struct sqlite3_io_methods {
#define SQLITE_FCNTL_PRAGMA 14
#define SQLITE_FCNTL_BUSYHANDLER 15
#define SQLITE_FCNTL_TEMPFILENAME 16
+#define SQLITE_FCNTL_MMAP_SIZE 18
/*
** CAPI3REF: Mutex Handle
@@ -2145,7 +2163,9 @@ struct sqlite3_mem_methods {
** page cache implementation into that object.)^
**
** [[SQLITE_CONFIG_LOG]]
SQLITE_CONFIG_LOG
-**
^The SQLITE_CONFIG_LOG option takes two arguments: a pointer to a
+**
The SQLITE_CONFIG_LOG option is used to configure the SQLite
+** global [error log].
+** (^The SQLITE_CONFIG_LOG option takes two arguments: a pointer to a
** function with a call signature of void(*)(void*,int,const char*),
** and a pointer to void. ^If the function pointer is not NULL, it is
** invoked by [sqlite3_log()] to process each logging event. ^If the
@@ -2191,12 +2211,12 @@ struct sqlite3_mem_methods {
**
SQLITE_CONFIG_PCACHE and SQLITE_CONFIG_GETPCACHE
**
These options are obsolete and should not be used by new code.
** They are retained for backwards compatibility but are now no-ops.
-**
+**
**
** [[SQLITE_CONFIG_SQLLOG]]
**
SQLITE_CONFIG_SQLLOG
**
This option is only available if sqlite is compiled with the
-** SQLITE_ENABLE_SQLLOG pre-processor macro defined. The first argument should
+** [SQLITE_ENABLE_SQLLOG] pre-processor macro defined. The first argument should
** be a pointer to a function of type void(*)(void*,sqlite3*,const char*, int).
** The second should be of type (void*). The callback is invoked by the library
** in three separate circumstances, identified by the value passed as the
@@ -2206,7 +2226,23 @@ struct sqlite3_mem_methods {
** fourth parameter is 1, then the SQL statement that the third parameter
** points to has just been executed. Or, if the fourth parameter is 2, then
** the connection being passed as the second parameter is being closed. The
-** third parameter is passed NULL In this case.
+** third parameter is passed NULL In this case. An example of using this
+** configuration option can be seen in the "test_sqllog.c" source file in
+** the canonical SQLite source tree.
+**
+** [[SQLITE_CONFIG_MMAP_SIZE]]
+**
SQLITE_CONFIG_MMAP_SIZE
+**
SQLITE_CONFIG_MMAP_SIZE takes two 64-bit integer (sqlite3_int64) values
+** that are the default mmap size limit (the default setting for
+** [PRAGMA mmap_size]) and the maximum allowed mmap size limit.
+** The default setting can be overridden by each database connection using
+** either the [PRAGMA mmap_size] command, or by using the
+** [SQLITE_FCNTL_MMAP_SIZE] file control. The maximum allowed mmap size
+** cannot be changed at run-time. Nor may the maximum allowed mmap size
+** exceed the compile-time maximum mmap size set by the
+** [SQLITE_MAX_MMAP_SIZE] compile-time option.
+** If either argument to this option is negative, then that argument is
+** changed to its compile-time default.
**
*/
#define SQLITE_CONFIG_SINGLETHREAD 1 /* nil */
@@ -2230,6 +2266,7 @@ struct sqlite3_mem_methods {
#define SQLITE_CONFIG_GETPCACHE2 19 /* sqlite3_pcache_methods2* */
#define SQLITE_CONFIG_COVERING_INDEX_SCAN 20 /* int */
#define SQLITE_CONFIG_SQLLOG 21 /* xSqllog, void* */
+#define SQLITE_CONFIG_MMAP_SIZE 22 /* sqlite3_int64, sqlite3_int64 */
/*
** CAPI3REF: Database Connection Configuration Options
@@ -3063,6 +3100,9 @@ SQLITE_API int sqlite3_set_authorizer(
** as each triggered subprogram is entered. The callbacks for triggers
** contain a UTF-8 SQL comment that identifies the trigger.)^
**
+** The [SQLITE_TRACE_SIZE_LIMIT] compile-time option can be used to limit
+** the length of [bound parameter] expansion in the output of sqlite3_trace().
+**
** ^The callback function registered by sqlite3_profile() is invoked
** as each SQL statement finishes. ^The profile callback contains
** the original statement text and an estimate of wall-clock time
@@ -3601,7 +3641,8 @@ SQLITE_API int sqlite3_limit(sqlite3*, int id, int newVal);
**
** ^If the database schema changes, instead of returning [SQLITE_SCHEMA] as it
** always used to do, [sqlite3_step()] will automatically recompile the SQL
-** statement and try to run it again.
+** statement and try to run it again. As many as [SQLITE_MAX_SCHEMA_RETRY]
+** retries will occur before sqlite3_step() gives up and returns an error.
**
**
**
@@ -3805,6 +3846,9 @@ typedef struct sqlite3_context sqlite3_context;
** parameter [SQLITE_LIMIT_VARIABLE_NUMBER] (default value: 999).
**
** ^The third argument is the value to bind to the parameter.
+** ^If the third parameter to sqlite3_bind_text() or sqlite3_bind_text16()
+** or sqlite3_bind_blob() is a NULL pointer then the fourth parameter
+** is ignored and the end result is the same as sqlite3_bind_null().
**
** ^(In those routines that have a fourth argument, its value is the
** number of bytes in the parameter. To be clear: the value is the
@@ -4761,7 +4805,7 @@ SQLITE_API void sqlite3_set_auxdata(sqlite3_context*, int N, void*, void (*)(voi
** the content before returning.
**
** The typedef is necessary to work around problems in certain
-** C++ compilers. See ticket #2191.
+** C++ compilers.
*/
typedef void (*sqlite3_destructor_type)(void*);
#define SQLITE_STATIC ((sqlite3_destructor_type)0)
@@ -5560,11 +5604,20 @@ SQLITE_API int sqlite3_table_column_metadata(
** ^This interface loads an SQLite extension library from the named file.
**
** ^The sqlite3_load_extension() interface attempts to load an
-** SQLite extension library contained in the file zFile.
+** [SQLite extension] library contained in the file zFile. If
+** the file cannot be loaded directly, attempts are made to load
+** with various operating-system specific extensions added.
+** So for example, if "samplelib" cannot be loaded, then names like
+** "samplelib.so" or "samplelib.dylib" or "samplelib.dll" might
+** be tried also.
**
** ^The entry point is zProc.
-** ^zProc may be 0, in which case the name of the entry point
-** defaults to "sqlite3_extension_init".
+** ^(zProc may be 0, in which case SQLite will try to come up with an
+** entry point name on its own. It first tries "sqlite3_extension_init".
+** If that does not work, it constructs a name "sqlite3_X_init" where the
+** X is consists of the lower-case equivalent of all ASCII alphabetic
+** characters in the filename from the last "/" to the first following
+** "." and omitting any initial "lib".)^
** ^The sqlite3_load_extension() interface returns
** [SQLITE_OK] on success and [SQLITE_ERROR] if something goes wrong.
** ^If an error occurs and pzErrMsg is not 0, then the
@@ -5590,11 +5643,11 @@ SQLITE_API int sqlite3_load_extension(
** CAPI3REF: Enable Or Disable Extension Loading
**
** ^So as not to open security holes in older applications that are
-** unprepared to deal with extension loading, and as a means of disabling
-** extension loading while evaluating user-entered SQL, the following API
+** unprepared to deal with [extension loading], and as a means of disabling
+** [extension loading] while evaluating user-entered SQL, the following API
** is provided to turn the [sqlite3_load_extension()] mechanism on and off.
**
-** ^Extension loading is off by default. See ticket #1863.
+** ^Extension loading is off by default.
** ^Call the sqlite3_enable_load_extension() routine with onoff==1
** to turn extension loading on and call it with onoff==0 to turn
** it back off again.
@@ -5606,7 +5659,7 @@ SQLITE_API int sqlite3_enable_load_extension(sqlite3 *db, int onoff);
**
** ^This interface causes the xEntryPoint() function to be invoked for
** each new [database connection] that is created. The idea here is that
-** xEntryPoint() is the entry point for a statically linked SQLite extension
+** xEntryPoint() is the entry point for a statically linked [SQLite extension]
** that is to be automatically loaded into all new database connections.
**
** ^(Even though the function prototype shows that xEntryPoint() takes
@@ -7386,10 +7439,25 @@ SQLITE_API int sqlite3_unlock_notify(
SQLITE_API int sqlite3_stricmp(const char *, const char *);
SQLITE_API int sqlite3_strnicmp(const char *, const char *, int);
+/*
+** CAPI3REF: String Globbing
+*
+** ^The [sqlite3_strglob(P,X)] interface returns zero if string X matches
+** the glob pattern P, and it returns non-zero if string X does not match
+** the glob pattern P. ^The definition of glob pattern matching used in
+** [sqlite3_strglob(P,X)] is the same as for the "X GLOB P" operator in the
+** SQL dialect used by SQLite. ^The sqlite3_strglob(P,X) function is case
+** sensitive.
+**
+** Note that this routine returns zero on a match and non-zero if the strings
+** do not match, the same as [sqlite3_stricmp()] and [sqlite3_strnicmp()].
+*/
+SQLITE_API int sqlite3_strglob(const char *zGlob, const char *zStr);
+
/*
** CAPI3REF: Error Logging Interface
**
-** ^The [sqlite3_log()] interface writes a message into the error log
+** ^The [sqlite3_log()] interface writes a message into the [error log]
** established by the [SQLITE_CONFIG_LOG] option to [sqlite3_config()].
** ^If logging is enabled, the zFormat string and subsequent arguments are
** used with [sqlite3_snprintf()] to generate the final output string.
@@ -8074,6 +8142,7 @@ SQLITE_PRIVATE void sqlite3HashClear(Hash*);
*/
#ifndef SQLITE_TEMP_STORE
# define SQLITE_TEMP_STORE 1
+# define SQLITE_TEMP_STORE_xc 1 /* Exclude from ctime.c */
#endif
/*
@@ -8221,6 +8290,49 @@ SQLITE_PRIVATE const int sqlite3one;
# define EIGHT_BYTE_ALIGNMENT(X) ((((char*)(X) - (char*)0)&7)==0)
#endif
+/*
+** Disable MMAP on platforms where it is known to not work
+*/
+#if defined(__OpenBSD__) || defined(__QNXNTO__)
+# undef SQLITE_MAX_MMAP_SIZE
+# define SQLITE_MAX_MMAP_SIZE 0
+#endif
+
+/*
+** Default maximum size of memory used by memory-mapped I/O in the VFS
+*/
+#ifdef __APPLE__
+# include
+# if TARGET_OS_IPHONE
+# undef SQLITE_MAX_MMAP_SIZE
+# define SQLITE_MAX_MMAP_SIZE 0
+# endif
+#endif
+#ifndef SQLITE_MAX_MMAP_SIZE
+# if defined(__linux__) \
+ || defined(_WIN32) \
+ || (defined(__APPLE__) && defined(__MACH__)) \
+ || defined(__sun)
+# define SQLITE_MAX_MMAP_SIZE 0x7fff0000 /* 2147418112 */
+# else
+# define SQLITE_MAX_MMAP_SIZE 0
+# endif
+# define SQLITE_MAX_MMAP_SIZE_xc 1 /* exclude from ctime.c */
+#endif
+
+/*
+** The default MMAP_SIZE is zero on all platforms. Or, even if a larger
+** default MMAP_SIZE is specified at compile-time, make sure that it does
+** not exceed the maximum mmap size.
+*/
+#ifndef SQLITE_DEFAULT_MMAP_SIZE
+# define SQLITE_DEFAULT_MMAP_SIZE 0
+# define SQLITE_DEFAULT_MMAP_SIZE_xc 1 /* Exclude from ctime.c */
+#endif
+#if SQLITE_DEFAULT_MMAP_SIZE>SQLITE_MAX_MMAP_SIZE
+# undef SQLITE_DEFAULT_MMAP_SIZE
+# define SQLITE_DEFAULT_MMAP_SIZE SQLITE_MAX_MMAP_SIZE
+#endif
/*
** An instance of the following structure is used to store the busy-handler
@@ -8442,6 +8554,7 @@ SQLITE_PRIVATE int sqlite3BtreeOpen(
SQLITE_PRIVATE int sqlite3BtreeClose(Btree*);
SQLITE_PRIVATE int sqlite3BtreeSetCacheSize(Btree*,int);
+SQLITE_PRIVATE int sqlite3BtreeSetMmapLimit(Btree*,sqlite3_int64);
SQLITE_PRIVATE int sqlite3BtreeSetSafetyLevel(Btree*,int,int,int);
SQLITE_PRIVATE int sqlite3BtreeSyncDisabled(Btree*);
SQLITE_PRIVATE int sqlite3BtreeSetPageSize(Btree *p, int nPagesize, int nReserve, int eFix);
@@ -8518,6 +8631,7 @@ SQLITE_PRIVATE int sqlite3BtreeNewDb(Btree *p);
#define BTREE_TEXT_ENCODING 5
#define BTREE_USER_VERSION 6
#define BTREE_INCR_VACUUM 7
+#define BTREE_APPLICATION_ID 8
/*
** Values that may be OR'd together to form the second argument of an
@@ -9142,6 +9256,12 @@ typedef struct PgHdr DbPage;
#define PAGER_JOURNALMODE_MEMORY 4 /* In-memory journal file */
#define PAGER_JOURNALMODE_WAL 5 /* Use write-ahead logging */
+/*
+** Flags that make up the mask passed to sqlite3PagerAcquire().
+*/
+#define PAGER_ACQUIRE_NOCONTENT 0x01 /* Do not load data from disk */
+#define PAGER_ACQUIRE_READONLY 0x02 /* Read-only page is acceptable */
+
/*
** The remainder of this file contains the declarations of the functions
** that make up the Pager sub-system API. See source code comments for
@@ -9166,6 +9286,7 @@ SQLITE_PRIVATE void sqlite3PagerSetBusyhandler(Pager*, int(*)(void *), void *);
SQLITE_PRIVATE int sqlite3PagerSetPagesize(Pager*, u32*, int);
SQLITE_PRIVATE int sqlite3PagerMaxPageCount(Pager*, int);
SQLITE_PRIVATE void sqlite3PagerSetCachesize(Pager*, int);
+SQLITE_PRIVATE void sqlite3PagerSetMmapLimit(Pager *, sqlite3_int64);
SQLITE_PRIVATE void sqlite3PagerShrink(Pager*);
SQLITE_PRIVATE void sqlite3PagerSetSafetyLevel(Pager*,int,int,int);
SQLITE_PRIVATE int sqlite3PagerLockingMode(Pager *, int);
@@ -9312,6 +9433,8 @@ struct PgHdr {
#define PGHDR_REUSE_UNLIKELY 0x010 /* A hint that reuse is unlikely */
#define PGHDR_DONT_WRITE 0x020 /* Do not write content to disk */
+#define PGHDR_MMAP 0x040 /* This is an mmap page object */
+
/* Initialize and shutdown the page cache subsystem */
SQLITE_PRIVATE int sqlite3PcacheInitialize(void);
SQLITE_PRIVATE void sqlite3PcacheShutdown(void);
@@ -9523,14 +9646,6 @@ SQLITE_PRIVATE void sqlite3PCacheSetDefault(void);
# define SQLITE_OS_WINRT 0
#endif
-/*
-** When compiled for WinCE or WinRT, there is no concept of the current
-** directory.
- */
-#if !SQLITE_OS_WINCE && !SQLITE_OS_WINRT
-# define SQLITE_CURDIR 1
-#endif
-
/* If the SET_FULLSYNC macro is not defined above, then make it
** a no-op
*/
@@ -9683,6 +9798,8 @@ SQLITE_PRIVATE int sqlite3OsShmMap(sqlite3_file *,int,int,int,void volatile **);
SQLITE_PRIVATE int sqlite3OsShmLock(sqlite3_file *id, int, int, int);
SQLITE_PRIVATE void sqlite3OsShmBarrier(sqlite3_file *id);
SQLITE_PRIVATE int sqlite3OsShmUnmap(sqlite3_file *id, int);
+SQLITE_PRIVATE int sqlite3OsFetch(sqlite3_file *id, i64, int, void **);
+SQLITE_PRIVATE int sqlite3OsUnfetch(sqlite3_file *, i64, void *);
/*
@@ -9922,6 +10039,7 @@ struct sqlite3 {
int nDb; /* Number of backends currently in use */
int flags; /* Miscellaneous flags. See below */
i64 lastRowid; /* ROWID of most recent insert (see above) */
+ i64 szMmap; /* Default mmap_size setting */
unsigned int openFlags; /* Flags passed to sqlite3_vfs.xOpen() */
int errCode; /* Most recent error code (SQLITE_*) */
int errMask; /* & result codes with this before returning */
@@ -11158,6 +11276,8 @@ struct NameContext {
#define NC_HasAgg 0x02 /* One or more aggregate functions seen */
#define NC_IsCheck 0x04 /* True if resolving names in a CHECK constraint */
#define NC_InAggFunc 0x08 /* True if analyzing arguments to an agg func */
+#define NC_AsMaybe 0x10 /* Resolve to AS terms of the result set only
+ ** if no other resolution is available */
/*
** An instance of the following structure contains all information
@@ -11593,6 +11713,8 @@ struct Sqlite3Config {
void *pHeap; /* Heap storage space */
int nHeap; /* Size of pHeap[] */
int mnReq, mxReq; /* Min and max heap requests sizes */
+ sqlite3_int64 szMmap; /* mmap() space per open file */
+ sqlite3_int64 mxMmap; /* Maximum value for szMmap */
void *pScratch; /* Scratch memory */
int szScratch; /* Size of each scratch buffer */
int nScratch; /* Number of scratch buffers */
@@ -11627,6 +11749,7 @@ struct Walker {
int (*xSelectCallback)(Walker*,Select*); /* Callback for SELECTs */
Parse *pParse; /* Parser context. */
int walkerDepth; /* Number of subqueries */
+ u8 bSelectDepthFirst; /* Do subqueries first */
union { /* Extra data for callback */
NameContext *pNC; /* Naming context */
int i; /* Integer value */
@@ -12130,6 +12253,12 @@ SQLITE_PRIVATE void sqlite3Error(sqlite3*, int, const char*,...);
SQLITE_PRIVATE void *sqlite3HexToBlob(sqlite3*, const char *z, int n);
SQLITE_PRIVATE u8 sqlite3HexToInt(int h);
SQLITE_PRIVATE int sqlite3TwoPartName(Parse *, Token *, Token *, Token **);
+
+#if defined(SQLITE_DEBUG) || defined(SQLITE_TEST) || \
+ defined(SQLITE_DEBUG_OS_TRACE)
+SQLITE_PRIVATE const char *sqlite3ErrName(int);
+#endif
+
SQLITE_PRIVATE const char *sqlite3ErrStr(int);
SQLITE_PRIVATE int sqlite3ReadSchema(Parse *pParse);
SQLITE_PRIVATE CollSeq *sqlite3FindCollSeq(sqlite3*,u8 enc, const char*,int);
@@ -12614,6 +12743,8 @@ SQLITE_PRIVATE SQLITE_WSD struct Sqlite3Config sqlite3Config = {
(void*)0, /* pHeap */
0, /* nHeap */
0, 0, /* mnHeap, mxHeap */
+ SQLITE_DEFAULT_MMAP_SIZE, /* szMmap */
+ SQLITE_MAX_MMAP_SIZE, /* mxMmap */
(void*)0, /* pScratch */
0, /* szScratch */
0, /* nScratch */
@@ -12737,15 +12868,15 @@ static const char * const azCompileOpt[] = {
#ifdef SQLITE_COVERAGE_TEST
"COVERAGE_TEST",
#endif
-#ifdef SQLITE_CURDIR
- "CURDIR",
-#endif
#ifdef SQLITE_DEBUG
"DEBUG",
#endif
#ifdef SQLITE_DEFAULT_LOCKING_MODE
"DEFAULT_LOCKING_MODE=" CTIMEOPT_VAL(SQLITE_DEFAULT_LOCKING_MODE),
#endif
+#if defined(SQLITE_DEFAULT_MMAP_SIZE) && !defined(SQLITE_DEFAULT_MMAP_SIZE_xc)
+ "DEFAULT_MMAP_SIZE=" CTIMEOPT_VAL(SQLITE_DEFAULT_MMAP_SIZE),
+#endif
#ifdef SQLITE_DISABLE_DIRSYNC
"DISABLE_DIRSYNC",
#endif
@@ -12836,6 +12967,9 @@ static const char * const azCompileOpt[] = {
#ifdef SQLITE_LOCK_TRACE
"LOCK_TRACE",
#endif
+#if defined(SQLITE_MAX_MMAP_SIZE) && !defined(SQLITE_MAX_MMAP_SIZE_xc)
+ "MAX_MMAP_SIZE=" CTIMEOPT_VAL(SQLITE_MAX_MMAP_SIZE),
+#endif
#ifdef SQLITE_MAX_SCHEMA_RETRY
"MAX_SCHEMA_RETRY=" CTIMEOPT_VAL(SQLITE_MAX_SCHEMA_RETRY),
#endif
@@ -12893,11 +13027,6 @@ static const char * const azCompileOpt[] = {
#ifdef SQLITE_OMIT_CHECK
"OMIT_CHECK",
#endif
-/* // redundant
-** #ifdef SQLITE_OMIT_COMPILEOPTION_DIAGS
-** "OMIT_COMPILEOPTION_DIAGS",
-** #endif
-*/
#ifdef SQLITE_OMIT_COMPLETE
"OMIT_COMPLETE",
#endif
@@ -13039,13 +13168,13 @@ static const char * const azCompileOpt[] = {
#ifdef SQLITE_TCL
"TCL",
#endif
-#ifdef SQLITE_TEMP_STORE
+#if defined(SQLITE_TEMP_STORE) && !defined(SQLITE_TEMP_STORE_xc)
"TEMP_STORE=" CTIMEOPT_VAL(SQLITE_TEMP_STORE),
#endif
#ifdef SQLITE_TEST
"TEST",
#endif
-#ifdef SQLITE_THREADSAFE
+#if defined(SQLITE_THREADSAFE)
"THREADSAFE=" CTIMEOPT_VAL(SQLITE_THREADSAFE),
#endif
#ifdef SQLITE_USE_ALLOCA
@@ -13071,8 +13200,11 @@ SQLITE_API int sqlite3_compileoption_used(const char *zOptName){
/* Since ArraySize(azCompileOpt) is normally in single digits, a
** linear search is adequate. No need for a binary search. */
for(i=0; ipMethods->xShmMap(id, iPage, pgsz, bExtend, pp);
}
+#if SQLITE_MAX_MMAP_SIZE>0
+/* The real implementation of xFetch and xUnfetch */
+SQLITE_PRIVATE int sqlite3OsFetch(sqlite3_file *id, i64 iOff, int iAmt, void **pp){
+ DO_OS_MALLOC_TEST(id);
+ return id->pMethods->xFetch(id, iOff, iAmt, pp);
+}
+SQLITE_PRIVATE int sqlite3OsUnfetch(sqlite3_file *id, i64 iOff, void *p){
+ return id->pMethods->xUnfetch(id, iOff, p);
+}
+#else
+/* No-op stubs to use when memory-mapped I/O is disabled */
+SQLITE_PRIVATE int sqlite3OsFetch(sqlite3_file *id, i64 iOff, int iAmt, void **pp){
+ *pp = 0;
+ return SQLITE_OK;
+}
+SQLITE_PRIVATE int sqlite3OsUnfetch(sqlite3_file *id, i64 iOff, void *p){
+ return SQLITE_OK;
+}
+#endif
+
/*
** The next group of routines are convenience wrappers around the
** VFS methods.
@@ -22851,7 +23011,7 @@ SQLITE_PRIVATE const char *sqlite3OpcodeName(int i){
/* #include */
#include
#include
-#ifndef SQLITE_OMIT_WAL
+#if !defined(SQLITE_OMIT_WAL) || SQLITE_MAX_MMAP_SIZE>0
#include
#endif
@@ -22950,6 +23110,11 @@ struct unixFile {
const char *zPath; /* Name of the file */
unixShm *pShm; /* Shared memory segment information */
int szChunk; /* Configured by FCNTL_CHUNK_SIZE */
+ int nFetchOut; /* Number of outstanding xFetch refs */
+ sqlite3_int64 mmapSize; /* Usable size of mapping at pMapRegion */
+ sqlite3_int64 mmapSizeActual; /* Actual size of mapping at pMapRegion */
+ sqlite3_int64 mmapSizeMax; /* Configured FCNTL_MMAP_SIZE value */
+ void *pMapRegion; /* Memory mapped region */
#ifdef __QNXNTO__
int sectorSize; /* Device sector size */
int deviceCharacteristics; /* Precomputed device characteristics */
@@ -22974,7 +23139,9 @@ struct unixFile {
unsigned char transCntrChng; /* True if the transaction counter changed */
unsigned char dbUpdate; /* True if any part of database file changed */
unsigned char inNormalWrite; /* True if in a normal write operation */
+
#endif
+
#ifdef SQLITE_TEST
/* In test mode, increase the size of this structure a bit so that
** it is larger than the struct CrashFile defined in test6.c.
@@ -22998,6 +23165,7 @@ struct unixFile {
#define UNIXFILE_DELETE 0x20 /* Delete on close */
#define UNIXFILE_URI 0x40 /* Filename might have query parameters */
#define UNIXFILE_NOLOCK 0x80 /* Do no file locking */
+#define UNIXFILE_WARNED 0x0100 /* verifyDbFile() warnings have been issued */
/*
** Include code that is common to all os_*.c files
@@ -23239,6 +23407,17 @@ SQLITE_API int sqlite3_open_file_count = 0;
#define threadid 0
#endif
+/*
+** HAVE_MREMAP defaults to true on Linux and false everywhere else.
+*/
+#if !defined(HAVE_MREMAP)
+# if defined(__linux__) && defined(_GNU_SOURCE)
+# define HAVE_MREMAP 1
+# else
+# define HAVE_MREMAP 0
+# endif
+#endif
+
/*
** Different Unix systems declare open() in different ways. Same use
** open(const char*,int,mode_t). Others use open(const char*,int,...).
@@ -23263,7 +23442,7 @@ static int posixFchown(int fd, uid_t uid, gid_t gid){
/* Forward reference */
static int openDirectory(const char*, int*);
-/* Fix for "error: 'fchmod' undeclared here (not in a function)" on FreeBSD 9 */
+/* fix compile on FreeBSD, not sure why needed... */
int fchmod(int, mode_t);
/*
@@ -23373,6 +23552,19 @@ static struct unix_syscall {
{ "fchown", (sqlite3_syscall_ptr)posixFchown, 0 },
#define osFchown ((int(*)(int,uid_t,gid_t))aSyscall[20].pCurrent)
+ { "mmap", (sqlite3_syscall_ptr)mmap, 0 },
+#define osMmap ((void*(*)(void*,size_t,int,int,int,off_t))aSyscall[21].pCurrent)
+
+ { "munmap", (sqlite3_syscall_ptr)munmap, 0 },
+#define osMunmap ((void*(*)(void*,size_t))aSyscall[22].pCurrent)
+
+#if HAVE_MREMAP
+ { "mremap", (sqlite3_syscall_ptr)mremap, 0 },
+#else
+ { "mremap", (sqlite3_syscall_ptr)0, 0 },
+#endif
+#define osMremap ((void*(*)(void*,size_t,size_t,int,...))aSyscall[23].pCurrent)
+
}; /* End of the overrideable system calls */
/*
@@ -23704,7 +23896,6 @@ static int sqliteErrorFromPosixError(int posixError, int sqliteIOErr) {
}
-
/******************************************************************************
****************** Begin Unique File ID Utility Used By VxWorks ***************
**
@@ -24040,7 +24231,6 @@ static int unixLogErrorAtLine(
zErr = strerror(iErrno);
#endif
- assert( errcode!=SQLITE_OK );
if( zPath==0 ) zPath = "";
sqlite3_log(errcode,
"os_unix.c:%d: (%d) %s(%s) - %s",
@@ -24206,6 +24396,50 @@ static int findInodeInfo(
}
+/*
+** Check a unixFile that is a database. Verify the following:
+**
+** (1) There is exactly one hard link on the file
+** (2) The file is not a symbolic link
+** (3) The file has not been renamed or unlinked
+**
+** Issue sqlite3_log(SQLITE_WARNING,...) messages if anything is not right.
+*/
+static void verifyDbFile(unixFile *pFile){
+ struct stat buf;
+ int rc;
+ if( pFile->ctrlFlags & UNIXFILE_WARNED ){
+ /* One or more of the following warnings have already been issued. Do not
+ ** repeat them so as not to clutter the error log */
+ return;
+ }
+ rc = osFstat(pFile->h, &buf);
+ if( rc!=0 ){
+ sqlite3_log(SQLITE_WARNING, "cannot fstat db file %s", pFile->zPath);
+ pFile->ctrlFlags |= UNIXFILE_WARNED;
+ return;
+ }
+ if( buf.st_nlink==0 && (pFile->ctrlFlags & UNIXFILE_DELETE)==0 ){
+ sqlite3_log(SQLITE_WARNING, "file unlinked while open: %s", pFile->zPath);
+ pFile->ctrlFlags |= UNIXFILE_WARNED;
+ return;
+ }
+ if( buf.st_nlink>1 ){
+ sqlite3_log(SQLITE_WARNING, "multiple links to file: %s", pFile->zPath);
+ pFile->ctrlFlags |= UNIXFILE_WARNED;
+ return;
+ }
+ if( pFile->pInode!=0
+ && ((rc = osStat(pFile->zPath, &buf))!=0
+ || buf.st_ino!=pFile->pInode->fileId.ino)
+ ){
+ sqlite3_log(SQLITE_WARNING, "file renamed while open: %s", pFile->zPath);
+ pFile->ctrlFlags |= UNIXFILE_WARNED;
+ return;
+ }
+}
+
+
/*
** This routine checks if there is a RESERVED lock held on the specified
** file by this or any other process. If such a lock is held, set *pResOut
@@ -24736,9 +24970,13 @@ end_unlock:
** the requested locking level, this routine is a no-op.
*/
static int unixUnlock(sqlite3_file *id, int eFileLock){
+ assert( eFileLock==SHARED_LOCK || ((unixFile *)id)->nFetchOut==0 );
return posixUnlock(id, eFileLock, 0);
}
+static int unixMapfile(unixFile *pFd, i64 nByte);
+static void unixUnmapfile(unixFile *pFd);
+
/*
** This function performs the parts of the "close file" operation
** common to all locking schemes. It closes the directory and file
@@ -24751,6 +24989,7 @@ static int unixUnlock(sqlite3_file *id, int eFileLock){
*/
static int closeUnixFile(sqlite3_file *id){
unixFile *pFile = (unixFile*)id;
+ unixUnmapfile(pFile);
if( pFile->h>=0 ){
robust_close(pFile, pFile->h, __LINE__);
pFile->h = -1;
@@ -24777,6 +25016,7 @@ static int closeUnixFile(sqlite3_file *id){
static int unixClose(sqlite3_file *id){
int rc = SQLITE_OK;
unixFile *pFile = (unixFile *)id;
+ verifyDbFile(pFile);
unixUnlock(id, NO_LOCK);
unixEnterMutex();
@@ -26008,6 +26248,8 @@ static int unixRead(
unixFile *pFile = (unixFile *)id;
int got;
assert( id );
+ assert( offset>=0 );
+ assert( amt>0 );
/* If this is a database file (not a journal, master-journal or temp
** file), the bytes in the locking range should never be read or written. */
@@ -26018,6 +26260,23 @@ static int unixRead(
);
#endif
+#if SQLITE_MAX_MMAP_SIZE>0
+ /* Deal with as much of this read request as possible by transfering
+ ** data from the memory mapping using memcpy(). */
+ if( offsetmmapSize ){
+ if( offset+amt <= pFile->mmapSize ){
+ memcpy(pBuf, &((u8 *)(pFile->pMapRegion))[offset], amt);
+ return SQLITE_OK;
+ }else{
+ int nCopy = pFile->mmapSize - offset;
+ memcpy(pBuf, &((u8 *)(pFile->pMapRegion))[offset], nCopy);
+ pBuf = &((u8 *)pBuf)[nCopy];
+ amt -= nCopy;
+ offset += nCopy;
+ }
+ }
+#endif
+
got = seekAndRead(pFile, offset, pBuf, amt);
if( got==amt ){
return SQLITE_OK;
@@ -26032,6 +26291,51 @@ static int unixRead(
}
}
+/*
+** Attempt to seek the file-descriptor passed as the first argument to
+** absolute offset iOff, then attempt to write nBuf bytes of data from
+** pBuf to it. If an error occurs, return -1 and set *piErrno. Otherwise,
+** return the actual number of bytes written (which may be less than
+** nBuf).
+*/
+static int seekAndWriteFd(
+ int fd, /* File descriptor to write to */
+ i64 iOff, /* File offset to begin writing at */
+ const void *pBuf, /* Copy data from this buffer to the file */
+ int nBuf, /* Size of buffer pBuf in bytes */
+ int *piErrno /* OUT: Error number if error occurs */
+){
+ int rc = 0; /* Value returned by system call */
+
+ assert( nBuf==(nBuf&0x1ffff) );
+ nBuf &= 0x1ffff;
+ TIMER_START;
+
+#if defined(USE_PREAD)
+ do{ rc = osPwrite(fd, pBuf, nBuf, iOff); }while( rc<0 && errno==EINTR );
+#elif defined(USE_PREAD64)
+ do{ rc = osPwrite64(fd, pBuf, nBuf, iOff);}while( rc<0 && errno==EINTR);
+#else
+ do{
+ i64 iSeek = lseek(fd, iOff, SEEK_SET);
+ SimulateIOError( iSeek-- );
+
+ if( iSeek!=iOff ){
+ if( piErrno ) *piErrno = (iSeek==-1 ? errno : 0);
+ return -1;
+ }
+ rc = osWrite(fd, pBuf, nBuf);
+ }while( rc<0 && errno==EINTR );
+#endif
+
+ TIMER_END;
+ OSTRACE(("WRITE %-3d %5d %7lld %llu\n", fd, rc, iOff, TIMER_ELAPSED));
+
+ if( rc<0 && piErrno ) *piErrno = errno;
+ return rc;
+}
+
+
/*
** Seek to the offset in id->offset then read cnt bytes into pBuf.
** Return the number of bytes actually read. Update the offset.
@@ -26040,39 +26344,7 @@ static int unixRead(
** is set before returning.
*/
static int seekAndWrite(unixFile *id, i64 offset, const void *pBuf, int cnt){
- int got;
-#if (!defined(USE_PREAD) && !defined(USE_PREAD64))
- i64 newOffset;
-#endif
- assert( cnt==(cnt&0x1ffff) );
- cnt &= 0x1ffff;
- TIMER_START;
-#if defined(USE_PREAD)
- do{ got = osPwrite(id->h, pBuf, cnt, offset); }while( got<0 && errno==EINTR );
-#elif defined(USE_PREAD64)
- do{ got = osPwrite64(id->h, pBuf, cnt, offset);}while( got<0 && errno==EINTR);
-#else
- do{
- newOffset = lseek(id->h, offset, SEEK_SET);
- SimulateIOError( newOffset-- );
- if( newOffset!=offset ){
- if( newOffset == -1 ){
- ((unixFile*)id)->lastErrno = errno;
- }else{
- ((unixFile*)id)->lastErrno = 0;
- }
- return -1;
- }
- got = osWrite(id->h, pBuf, cnt);
- }while( got<0 && errno==EINTR );
-#endif
- TIMER_END;
- if( got<0 ){
- ((unixFile*)id)->lastErrno = errno;
- }
-
- OSTRACE(("WRITE %-3d %5d %7lld %llu\n", id->h, got, offset, TIMER_ELAPSED));
- return got;
+ return seekAndWriteFd(id->h, offset, pBuf, cnt, &id->lastErrno);
}
@@ -26122,6 +26394,23 @@ static int unixWrite(
}
#endif
+#if SQLITE_MAX_MMAP_SIZE>0
+ /* Deal with as much of this write request as possible by transfering
+ ** data from the memory mapping using memcpy(). */
+ if( offsetmmapSize ){
+ if( offset+amt <= pFile->mmapSize ){
+ memcpy(&((u8 *)(pFile->pMapRegion))[offset], pBuf, amt);
+ return SQLITE_OK;
+ }else{
+ int nCopy = pFile->mmapSize - offset;
+ memcpy(&((u8 *)(pFile->pMapRegion))[offset], pBuf, nCopy);
+ pBuf = &((u8 *)pBuf)[nCopy];
+ amt -= nCopy;
+ offset += nCopy;
+ }
+ }
+#endif
+
while( amt>0 && (wrote = seekAndWrite(pFile, offset, pBuf, amt))>0 ){
amt -= wrote;
offset += wrote;
@@ -26404,6 +26693,14 @@ static int unixTruncate(sqlite3_file *id, i64 nByte){
}
#endif
+ /* If the file was just truncated to a size smaller than the currently
+ ** mapped region, reduce the effective mapping size as well. SQLite will
+ ** use read() and write() to access data beyond this point from now on.
+ */
+ if( nBytemmapSize ){
+ pFile->mmapSize = nByte;
+ }
+
return SQLITE_OK;
}
}
@@ -26492,6 +26789,19 @@ static int fcntlSizeHint(unixFile *pFile, i64 nByte){
}
}
+ if( pFile->mmapSizeMax>0 && nByte>pFile->mmapSize ){
+ int rc;
+ if( pFile->szChunk<=0 ){
+ if( robust_ftruncate(pFile->h, nByte) ){
+ pFile->lastErrno = errno;
+ return unixLogError(SQLITE_IOERR_TRUNCATE, "ftruncate", pFile->zPath);
+ }
+ }
+
+ rc = unixMapfile(pFile, nByte);
+ return rc;
+ }
+
return SQLITE_OK;
}
@@ -26559,6 +26869,18 @@ static int unixFileControl(sqlite3_file *id, int op, void *pArg){
}
return SQLITE_OK;
}
+ case SQLITE_FCNTL_MMAP_SIZE: {
+ i64 newLimit = *(i64*)pArg;
+ if( newLimit>sqlite3GlobalConfig.mxMmap ){
+ newLimit = sqlite3GlobalConfig.mxMmap;
+ }
+ *(i64*)pArg = pFile->mmapSizeMax;
+ if( newLimit>=0 ){
+ pFile->mmapSizeMax = newLimit;
+ if( newLimitmmapSize ) pFile->mmapSize = newLimit;
+ }
+ return SQLITE_OK;
+ }
#ifdef SQLITE_DEBUG
/* The pager calls this method to signal that it has done
** a rollback and that the database is therefore unchanged and
@@ -26871,7 +27193,7 @@ static void unixShmPurge(unixFile *pFd){
sqlite3_mutex_free(p->mutex);
for(i=0; inRegion; i++){
if( p->h>=0 ){
- munmap(p->apRegion[i], p->szRegion);
+ osMunmap(p->apRegion[i], p->szRegion);
}else{
sqlite3_free(p->apRegion[i]);
}
@@ -27111,24 +27433,32 @@ static int unixShmMap(
if( sStat.st_sizeh, sStat.st_size, nByte)!=0 ){
- rc = unixLogError(SQLITE_IOERR_SHMSIZE, "fallocate",
- pShmNode->zFilename);
+ if( !bExtend ){
goto shmpage_out;
}
-#else
- if( robust_ftruncate(pShmNode->h, nByte) ){
- rc = unixLogError(SQLITE_IOERR_SHMSIZE, "ftruncate",
- pShmNode->zFilename);
- goto shmpage_out;
+
+ /* Alternatively, if bExtend is true, extend the file. Do this by
+ ** writing a single byte to the end of each (OS) page being
+ ** allocated or extended. Technically, we need only write to the
+ ** last page in order to extend the file. But writing to all new
+ ** pages forces the OS to allocate them immediately, which reduces
+ ** the chances of SIGBUS while accessing the mapped region later on.
+ */
+ else{
+ static const int pgsz = 4096;
+ int iPg;
+
+ /* Write to the last byte of each newly allocated or extended page */
+ assert( (nByte % pgsz)==0 );
+ for(iPg=(sStat.st_size/pgsz); iPg<(nByte/pgsz); iPg++){
+ if( seekAndWriteFd(pShmNode->h, iPg*pgsz + pgsz-1, "", 1, 0)!=1 ){
+ const char *zFile = pShmNode->zFilename;
+ rc = unixLogError(SQLITE_IOERR_SHMSIZE, "write", zFile);
+ goto shmpage_out;
+ }
+ }
}
-#endif
}
}
@@ -27144,7 +27474,7 @@ static int unixShmMap(
while(pShmNode->nRegion<=iRegion){
void *pMem;
if( pShmNode->h>=0 ){
- pMem = mmap(0, szRegion,
+ pMem = osMmap(0, szRegion,
pShmNode->isReadonly ? PROT_READ : PROT_READ|PROT_WRITE,
MAP_SHARED, pShmNode->h, szRegion*(i64)pShmNode->nRegion
);
@@ -27361,6 +27691,236 @@ static int unixShmUnmap(
# define unixShmUnmap 0
#endif /* #ifndef SQLITE_OMIT_WAL */
+/*
+** If it is currently memory mapped, unmap file pFd.
+*/
+static void unixUnmapfile(unixFile *pFd){
+ assert( pFd->nFetchOut==0 );
+#if SQLITE_MAX_MMAP_SIZE>0
+ if( pFd->pMapRegion ){
+ osMunmap(pFd->pMapRegion, pFd->mmapSizeActual);
+ pFd->pMapRegion = 0;
+ pFd->mmapSize = 0;
+ pFd->mmapSizeActual = 0;
+ }
+#endif
+}
+
+#if SQLITE_MAX_MMAP_SIZE>0
+/*
+** Return the system page size.
+*/
+static int unixGetPagesize(void){
+#if HAVE_MREMAP
+ return 512;
+#elif defined(_BSD_SOURCE)
+ return getpagesize();
+#else
+ return (int)sysconf(_SC_PAGESIZE);
+#endif
+}
+#endif /* SQLITE_MAX_MMAP_SIZE>0 */
+
+#if SQLITE_MAX_MMAP_SIZE>0
+/*
+** Attempt to set the size of the memory mapping maintained by file
+** descriptor pFd to nNew bytes. Any existing mapping is discarded.
+**
+** If successful, this function sets the following variables:
+**
+** unixFile.pMapRegion
+** unixFile.mmapSize
+** unixFile.mmapSizeActual
+**
+** If unsuccessful, an error message is logged via sqlite3_log() and
+** the three variables above are zeroed. In this case SQLite should
+** continue accessing the database using the xRead() and xWrite()
+** methods.
+*/
+static void unixRemapfile(
+ unixFile *pFd, /* File descriptor object */
+ i64 nNew /* Required mapping size */
+){
+ const char *zErr = "mmap";
+ int h = pFd->h; /* File descriptor open on db file */
+ u8 *pOrig = (u8 *)pFd->pMapRegion; /* Pointer to current file mapping */
+ i64 nOrig = pFd->mmapSizeActual; /* Size of pOrig region in bytes */
+ u8 *pNew = 0; /* Location of new mapping */
+ int flags = PROT_READ; /* Flags to pass to mmap() */
+
+ assert( pFd->nFetchOut==0 );
+ assert( nNew>pFd->mmapSize );
+ assert( nNew<=pFd->mmapSizeMax );
+ assert( nNew>0 );
+ assert( pFd->mmapSizeActual>=pFd->mmapSize );
+ assert( MAP_FAILED!=0 );
+
+ if( (pFd->ctrlFlags & UNIXFILE_RDONLY)==0 ) flags |= PROT_WRITE;
+
+ if( pOrig ){
+ const int szSyspage = unixGetPagesize();
+ i64 nReuse = (pFd->mmapSize & ~(szSyspage-1));
+ u8 *pReq = &pOrig[nReuse];
+
+ /* Unmap any pages of the existing mapping that cannot be reused. */
+ if( nReuse!=nOrig ){
+ osMunmap(pReq, nOrig-nReuse);
+ }
+
+#if HAVE_MREMAP
+ pNew = osMremap(pOrig, nReuse, nNew, MREMAP_MAYMOVE);
+ zErr = "mremap";
+#else
+ pNew = osMmap(pReq, nNew-nReuse, flags, MAP_SHARED, h, nReuse);
+ if( pNew!=MAP_FAILED ){
+ if( pNew!=pReq ){
+ osMunmap(pNew, nNew - nReuse);
+ pNew = 0;
+ }else{
+ pNew = pOrig;
+ }
+ }
+#endif
+
+ /* The attempt to extend the existing mapping failed. Free it. */
+ if( pNew==MAP_FAILED || pNew==0 ){
+ osMunmap(pOrig, nReuse);
+ }
+ }
+
+ /* If pNew is still NULL, try to create an entirely new mapping. */
+ if( pNew==0 ){
+ pNew = osMmap(0, nNew, flags, MAP_SHARED, h, 0);
+ }
+
+ if( pNew==MAP_FAILED ){
+ pNew = 0;
+ nNew = 0;
+ unixLogError(SQLITE_OK, zErr, pFd->zPath);
+
+ /* If the mmap() above failed, assume that all subsequent mmap() calls
+ ** will probably fail too. Fall back to using xRead/xWrite exclusively
+ ** in this case. */
+ pFd->mmapSizeMax = 0;
+ }
+ pFd->pMapRegion = (void *)pNew;
+ pFd->mmapSize = pFd->mmapSizeActual = nNew;
+}
+#endif
+
+/*
+** Memory map or remap the file opened by file-descriptor pFd (if the file
+** is already mapped, the existing mapping is replaced by the new). Or, if
+** there already exists a mapping for this file, and there are still
+** outstanding xFetch() references to it, this function is a no-op.
+**
+** If parameter nByte is non-negative, then it is the requested size of
+** the mapping to create. Otherwise, if nByte is less than zero, then the
+** requested size is the size of the file on disk. The actual size of the
+** created mapping is either the requested size or the value configured
+** using SQLITE_FCNTL_MMAP_LIMIT, whichever is smaller.
+**
+** SQLITE_OK is returned if no error occurs (even if the mapping is not
+** recreated as a result of outstanding references) or an SQLite error
+** code otherwise.
+*/
+static int unixMapfile(unixFile *pFd, i64 nByte){
+#if SQLITE_MAX_MMAP_SIZE>0
+ i64 nMap = nByte;
+ int rc;
+
+ assert( nMap>=0 || pFd->nFetchOut==0 );
+ if( pFd->nFetchOut>0 ) return SQLITE_OK;
+
+ if( nMap<0 ){
+ struct stat statbuf; /* Low-level file information */
+ rc = osFstat(pFd->h, &statbuf);
+ if( rc!=SQLITE_OK ){
+ return SQLITE_IOERR_FSTAT;
+ }
+ nMap = statbuf.st_size;
+ }
+ if( nMap>pFd->mmapSizeMax ){
+ nMap = pFd->mmapSizeMax;
+ }
+
+ if( nMap!=pFd->mmapSize ){
+ if( nMap>0 ){
+ unixRemapfile(pFd, nMap);
+ }else{
+ unixUnmapfile(pFd);
+ }
+ }
+#endif
+
+ return SQLITE_OK;
+}
+
+/*
+** If possible, return a pointer to a mapping of file fd starting at offset
+** iOff. The mapping must be valid for at least nAmt bytes.
+**
+** If such a pointer can be obtained, store it in *pp and return SQLITE_OK.
+** Or, if one cannot but no error occurs, set *pp to 0 and return SQLITE_OK.
+** Finally, if an error does occur, return an SQLite error code. The final
+** value of *pp is undefined in this case.
+**
+** If this function does return a pointer, the caller must eventually
+** release the reference by calling unixUnfetch().
+*/
+static int unixFetch(sqlite3_file *fd, i64 iOff, int nAmt, void **pp){
+#if SQLITE_MAX_MMAP_SIZE>0
+ unixFile *pFd = (unixFile *)fd; /* The underlying database file */
+#endif
+ *pp = 0;
+
+#if SQLITE_MAX_MMAP_SIZE>0
+ if( pFd->mmapSizeMax>0 ){
+ if( pFd->pMapRegion==0 ){
+ int rc = unixMapfile(pFd, -1);
+ if( rc!=SQLITE_OK ) return rc;
+ }
+ if( pFd->mmapSize >= iOff+nAmt ){
+ *pp = &((u8 *)pFd->pMapRegion)[iOff];
+ pFd->nFetchOut++;
+ }
+ }
+#endif
+ return SQLITE_OK;
+}
+
+/*
+** If the third argument is non-NULL, then this function releases a
+** reference obtained by an earlier call to unixFetch(). The second
+** argument passed to this function must be the same as the corresponding
+** argument that was passed to the unixFetch() invocation.
+**
+** Or, if the third argument is NULL, then this function is being called
+** to inform the VFS layer that, according to POSIX, any existing mapping
+** may now be invalid and should be unmapped.
+*/
+static int unixUnfetch(sqlite3_file *fd, i64 iOff, void *p){
+ unixFile *pFd = (unixFile *)fd; /* The underlying database file */
+ UNUSED_PARAMETER(iOff);
+
+ /* If p==0 (unmap the entire file) then there must be no outstanding
+ ** xFetch references. Or, if p!=0 (meaning it is an xFetch reference),
+ ** then there must be at least one outstanding. */
+ assert( (p==0)==(pFd->nFetchOut==0) );
+
+ /* If p!=0, it must match the iOff value. */
+ assert( p==0 || p==&((u8 *)pFd->pMapRegion)[iOff] );
+
+ if( p ){
+ pFd->nFetchOut--;
+ }else{
+ unixUnmapfile(pFd);
+ }
+
+ assert( pFd->nFetchOut>=0 );
+ return SQLITE_OK;
+}
+
/*
** Here ends the implementation of all sqlite3_file methods.
**
@@ -27419,7 +27979,9 @@ static const sqlite3_io_methods METHOD = { \
unixShmMap, /* xShmMap */ \
unixShmLock, /* xShmLock */ \
unixShmBarrier, /* xShmBarrier */ \
- unixShmUnmap /* xShmUnmap */ \
+ unixShmUnmap, /* xShmUnmap */ \
+ unixFetch, /* xFetch */ \
+ unixUnfetch, /* xUnfetch */ \
}; \
static const sqlite3_io_methods *FINDER##Impl(const char *z, unixFile *p){ \
UNUSED_PARAMETER(z); UNUSED_PARAMETER(p); \
@@ -27436,7 +27998,7 @@ static const sqlite3_io_methods *(*const FINDER)(const char*,unixFile *p) \
IOMETHODS(
posixIoFinder, /* Finder function name */
posixIoMethods, /* sqlite3_io_methods object name */
- 2, /* shared memory is enabled */
+ 3, /* shared memory and mmap are enabled */
unixClose, /* xClose method */
unixLock, /* xLock method */
unixUnlock, /* xUnlock method */
@@ -27687,6 +28249,7 @@ static int fillInUnixFile(
pNew->pVfs = pVfs;
pNew->zPath = zFilename;
pNew->ctrlFlags = (u8)ctrlFlags;
+ pNew->mmapSizeMax = sqlite3GlobalConfig.szMmap;
if( sqlite3_uri_boolean(((ctrlFlags & UNIXFILE_URI) ? zFilename : 0),
"psow", SQLITE_POWERSAFE_OVERWRITE) ){
pNew->ctrlFlags |= UNIXFILE_PSOW;
@@ -27822,15 +28385,15 @@ static int fillInUnixFile(
if( h>=0 ) robust_close(pNew, h, __LINE__);
h = -1;
osUnlink(zFilename);
- isDelete = 0;
+ pNew->ctrlFlags |= UNIXFILE_DELETE;
}
- if( isDelete ) pNew->ctrlFlags |= UNIXFILE_DELETE;
#endif
if( rc!=SQLITE_OK ){
if( h>=0 ) robust_close(pNew, h, __LINE__);
}else{
pNew->pMethod = pLockingStyle;
OpenCounter(+1);
+ verifyDbFile(pNew);
}
return rc;
}
@@ -29924,7 +30487,7 @@ SQLITE_API int sqlite3_os_init(void){
/* Double-check that the aSyscall[] array has been constructed
** correctly. See ticket [bb3a86e890c8e96ab] */
- assert( ArraySize(aSyscall)==21 );
+ assert( ArraySize(aSyscall)==24 );
/* Register all VFSes defined in the aVfs[] array */
for(i=0; i<(sizeof(aVfs)/sizeof(sqlite3_vfs)); i++){
@@ -30307,11 +30870,20 @@ struct winFile {
winceLock local; /* Locks obtained by this instance of winFile */
winceLock *shared; /* Global shared lock memory for the file */
#endif
+#if SQLITE_MAX_MMAP_SIZE>0
+ int nFetchOut; /* Number of outstanding xFetch references */
+ HANDLE hMap; /* Handle for accessing memory mapping */
+ void *pMapRegion; /* Area memory mapped */
+ sqlite3_int64 mmapSize; /* Usable size of mapped region */
+ sqlite3_int64 mmapSizeActual; /* Actual size of mapped region */
+ sqlite3_int64 mmapSizeMax; /* Configured FCNTL_MMAP_SIZE value */
+#endif
};
/*
** Allowed values for winFile.ctrlFlags
*/
+#define WINFILE_RDONLY 0x02 /* Connection is read only */
#define WINFILE_PERSIST_WAL 0x04 /* Persistent WAL mode */
#define WINFILE_PSOW 0x10 /* SQLITE_IOCAP_POWERSAFE_OVERWRITE */
@@ -31671,7 +32243,7 @@ static int getLastErrorMsg(DWORD lastErrno, int nBuf, char *zBuf){
}
#endif
if( 0 == dwLen ){
- sqlite3_snprintf(nBuf, zBuf, "OsError 0x%x (%u)", lastErrno, lastErrno);
+ sqlite3_snprintf(nBuf, zBuf, "OsError 0x%lx (%lu)", lastErrno, lastErrno);
}else{
/* copy a maximum of nBuf chars to output buffer */
sqlite3_snprintf(nBuf, zBuf, "%s", zOut);
@@ -31714,7 +32286,7 @@ static int winLogErrorAtLine(
for(i=0; zMsg[i] && zMsg[i]!='\r' && zMsg[i]!='\n'; i++){}
zMsg[i] = 0;
sqlite3_log(errcode,
- "os_win.c:%d: (%d) %s(%s) - %s",
+ "os_win.c:%d: (%lu) %s(%s) - %s",
iLine, lastErrno, zFunc, zPath, zMsg
);
@@ -32175,6 +32747,8 @@ static int seekWinFile(winFile *pFile, sqlite3_int64 iOffset){
DWORD dwRet; /* Value returned by SetFilePointer() */
DWORD lastErrno; /* Value returned by GetLastError() */
+ OSTRACE(("SEEK file=%p, offset=%lld\n", pFile->h, iOffset));
+
upperBits = (LONG)((iOffset>>32) & 0x7fffffff);
lowerBits = (LONG)(iOffset & 0xffffffff);
@@ -32192,9 +32766,11 @@ static int seekWinFile(winFile *pFile, sqlite3_int64 iOffset){
pFile->lastErrno = lastErrno;
winLogError(SQLITE_IOERR_SEEK, pFile->lastErrno,
"seekWinFile", pFile->zPath);
+ OSTRACE(("SEEK file=%p, rc=SQLITE_IOERR_SEEK\n", pFile->h));
return 1;
}
+ OSTRACE(("SEEK file=%p, rc=SQLITE_OK\n", pFile->h));
return 0;
#else
/*
@@ -32211,13 +32787,20 @@ static int seekWinFile(winFile *pFile, sqlite3_int64 iOffset){
pFile->lastErrno = osGetLastError();
winLogError(SQLITE_IOERR_SEEK, pFile->lastErrno,
"seekWinFile", pFile->zPath);
+ OSTRACE(("SEEK file=%p, rc=SQLITE_IOERR_SEEK\n", pFile->h));
return 1;
}
+ OSTRACE(("SEEK file=%p, rc=SQLITE_OK\n", pFile->h));
return 0;
#endif
}
+#if SQLITE_MAX_MMAP_SIZE>0
+/* Forward references to VFS methods */
+static int winUnmapfile(winFile*);
+#endif
+
/*
** Close a file.
**
@@ -32237,8 +32820,14 @@ static int winClose(sqlite3_file *id){
#ifndef SQLITE_OMIT_WAL
assert( pFile->pShm==0 );
#endif
- OSTRACE(("CLOSE %d\n", pFile->h));
assert( pFile->h!=NULL && pFile->h!=INVALID_HANDLE_VALUE );
+ OSTRACE(("CLOSE file=%p\n", pFile->h));
+
+#if SQLITE_MAX_MMAP_SIZE>0
+ rc = winUnmapfile(pFile);
+ if( rc!=SQLITE_OK ) return rc;
+#endif
+
do{
rc = osCloseHandle(pFile->h);
/* SimulateIOError( rc=0; cnt=MX_CLOSE_ATTEMPT; ); */
@@ -32258,11 +32847,11 @@ static int winClose(sqlite3_file *id){
sqlite3_free(pFile->zDeleteOnClose);
}
#endif
- OSTRACE(("CLOSE %d %s\n", pFile->h, rc ? "ok" : "failed"));
if( rc ){
pFile->h = NULL;
}
OpenCounter(-1);
+ OSTRACE(("CLOSE file=%p, rc=%s\n", pFile->h, rc ? "ok" : "failed"));
return rc ? SQLITE_OK
: winLogError(SQLITE_IOERR_CLOSE, osGetLastError(),
"winClose", pFile->zPath);
@@ -32287,11 +32876,33 @@ static int winRead(
int nRetry = 0; /* Number of retrys */
assert( id!=0 );
+ assert( amt>0 );
+ assert( offset>=0 );
SimulateIOError(return SQLITE_IOERR_READ);
- OSTRACE(("READ %d lock=%d\n", pFile->h, pFile->locktype));
+ OSTRACE(("READ file=%p, buffer=%p, amount=%d, offset=%lld, lock=%d\n",
+ pFile->h, pBuf, amt, offset, pFile->locktype));
+
+#if SQLITE_MAX_MMAP_SIZE>0
+ /* Deal with as much of this read request as possible by transfering
+ ** data from the memory mapping using memcpy(). */
+ if( offsetmmapSize ){
+ if( offset+amt <= pFile->mmapSize ){
+ memcpy(pBuf, &((u8 *)(pFile->pMapRegion))[offset], amt);
+ OSTRACE(("READ-MMAP file=%p, rc=SQLITE_OK\n", pFile->h));
+ return SQLITE_OK;
+ }else{
+ int nCopy = (int)(pFile->mmapSize - offset);
+ memcpy(pBuf, &((u8 *)(pFile->pMapRegion))[offset], nCopy);
+ pBuf = &((u8 *)pBuf)[nCopy];
+ amt -= nCopy;
+ offset += nCopy;
+ }
+ }
+#endif
#if SQLITE_OS_WINCE
if( seekWinFile(pFile, offset) ){
+ OSTRACE(("READ file=%p, rc=SQLITE_FULL\n", pFile->h));
return SQLITE_FULL;
}
while( !osReadFile(pFile->h, pBuf, amt, &nRead, 0) ){
@@ -32305,6 +32916,7 @@ static int winRead(
DWORD lastErrno;
if( retryIoerr(&nRetry, &lastErrno) ) continue;
pFile->lastErrno = lastErrno;
+ OSTRACE(("READ file=%p, rc=SQLITE_IOERR_READ\n", pFile->h));
return winLogError(SQLITE_IOERR_READ, pFile->lastErrno,
"winRead", pFile->zPath);
}
@@ -32312,9 +32924,11 @@ static int winRead(
if( nRead<(DWORD)amt ){
/* Unread parts of the buffer must be zero-filled */
memset(&((char*)pBuf)[nRead], 0, amt-nRead);
+ OSTRACE(("READ file=%p, rc=SQLITE_IOERR_SHORT_READ\n", pFile->h));
return SQLITE_IOERR_SHORT_READ;
}
+ OSTRACE(("READ file=%p, rc=SQLITE_OK\n", pFile->h));
return SQLITE_OK;
}
@@ -32337,7 +32951,26 @@ static int winWrite(
SimulateIOError(return SQLITE_IOERR_WRITE);
SimulateDiskfullError(return SQLITE_FULL);
- OSTRACE(("WRITE %d lock=%d\n", pFile->h, pFile->locktype));
+ OSTRACE(("WRITE file=%p, buffer=%p, amount=%d, offset=%lld, lock=%d\n",
+ pFile->h, pBuf, amt, offset, pFile->locktype));
+
+#if SQLITE_MAX_MMAP_SIZE>0
+ /* Deal with as much of this write request as possible by transfering
+ ** data from the memory mapping using memcpy(). */
+ if( offsetmmapSize ){
+ if( offset+amt <= pFile->mmapSize ){
+ memcpy(&((u8 *)(pFile->pMapRegion))[offset], pBuf, amt);
+ OSTRACE(("WRITE-MMAP file=%p, rc=SQLITE_OK\n", pFile->h));
+ return SQLITE_OK;
+ }else{
+ int nCopy = (int)(pFile->mmapSize - offset);
+ memcpy(&((u8 *)(pFile->pMapRegion))[offset], pBuf, nCopy);
+ pBuf = &((u8 *)pBuf)[nCopy];
+ amt -= nCopy;
+ offset += nCopy;
+ }
+ }
+#endif
#if SQLITE_OS_WINCE
rc = seekWinFile(pFile, offset);
@@ -32390,13 +33023,16 @@ static int winWrite(
if( rc ){
if( ( pFile->lastErrno==ERROR_HANDLE_DISK_FULL )
|| ( pFile->lastErrno==ERROR_DISK_FULL )){
+ OSTRACE(("WRITE file=%p, rc=SQLITE_FULL\n", pFile->h));
return SQLITE_FULL;
}
+ OSTRACE(("WRITE file=%p, rc=SQLITE_IOERR_WRITE\n", pFile->h));
return winLogError(SQLITE_IOERR_WRITE, pFile->lastErrno,
"winWrite", pFile->zPath);
}else{
logIoerr(nRetry);
}
+ OSTRACE(("WRITE file=%p, rc=SQLITE_OK\n", pFile->h));
return SQLITE_OK;
}
@@ -32406,11 +33042,12 @@ static int winWrite(
static int winTruncate(sqlite3_file *id, sqlite3_int64 nByte){
winFile *pFile = (winFile*)id; /* File handle object */
int rc = SQLITE_OK; /* Return code for this function */
+ DWORD lastErrno;
assert( pFile );
-
- OSTRACE(("TRUNCATE %d %lld\n", pFile->h, nByte));
SimulateIOError(return SQLITE_IOERR_TRUNCATE);
+ OSTRACE(("TRUNCATE file=%p, size=%lld, lock=%d\n",
+ pFile->h, nByte, pFile->locktype));
/* If the user has configured a chunk-size for this file, truncate the
** file so that it consists of an integer number of chunks (i.e. the
@@ -32424,14 +33061,25 @@ static int winTruncate(sqlite3_file *id, sqlite3_int64 nByte){
/* SetEndOfFile() returns non-zero when successful, or zero when it fails. */
if( seekWinFile(pFile, nByte) ){
rc = winLogError(SQLITE_IOERR_TRUNCATE, pFile->lastErrno,
- "winTruncate1", pFile->zPath);
- }else if( 0==osSetEndOfFile(pFile->h) ){
- pFile->lastErrno = osGetLastError();
+ "winTruncate1", pFile->zPath);
+ }else if( 0==osSetEndOfFile(pFile->h) &&
+ ((lastErrno = osGetLastError())!=ERROR_USER_MAPPED_FILE) ){
+ pFile->lastErrno = lastErrno;
rc = winLogError(SQLITE_IOERR_TRUNCATE, pFile->lastErrno,
- "winTruncate2", pFile->zPath);
+ "winTruncate2", pFile->zPath);
}
- OSTRACE(("TRUNCATE %d %lld %s\n", pFile->h, nByte, rc ? "failed" : "ok"));
+#if SQLITE_MAX_MMAP_SIZE>0
+ /* If the file was truncated to a size smaller than the currently
+ ** mapped region, reduce the effective mapping size as well. SQLite will
+ ** use read() and write() to access data beyond this point from now on.
+ */
+ if( pFile->pMapRegion && nBytemmapSize ){
+ pFile->mmapSize = nByte;
+ }
+#endif
+
+ OSTRACE(("TRUNCATE file=%p, rc=%s\n", pFile->h, sqlite3ErrName(rc)));
return rc;
}
@@ -32471,13 +33119,14 @@ static int winSync(sqlite3_file *id, int flags){
|| (flags&0x0F)==SQLITE_SYNC_FULL
);
- OSTRACE(("SYNC %d lock=%d\n", pFile->h, pFile->locktype));
-
/* Unix cannot, but some systems may return SQLITE_FULL from here. This
** line is to test that doing so does not cause any problems.
*/
SimulateDiskfullError( return SQLITE_FULL );
+ OSTRACE(("SYNC file=%p, flags=%x, lock=%d\n",
+ pFile->h, flags, pFile->locktype));
+
#ifndef SQLITE_TEST
UNUSED_PARAMETER(flags);
#else
@@ -32496,9 +33145,11 @@ static int winSync(sqlite3_file *id, int flags){
rc = osFlushFileBuffers(pFile->h);
SimulateIOError( rc=FALSE );
if( rc ){
+ OSTRACE(("SYNC file=%p, rc=SQLITE_OK\n", pFile->h));
return SQLITE_OK;
}else{
pFile->lastErrno = osGetLastError();
+ OSTRACE(("SYNC file=%p, rc=SQLITE_IOERR_FSYNC\n", pFile->h));
return winLogError(SQLITE_IOERR_FSYNC, pFile->lastErrno,
"winSync", pFile->zPath);
}
@@ -32513,7 +33164,10 @@ static int winFileSize(sqlite3_file *id, sqlite3_int64 *pSize){
int rc = SQLITE_OK;
assert( id!=0 );
+ assert( pSize!=0 );
SimulateIOError(return SQLITE_IOERR_FSTAT);
+ OSTRACE(("SIZE file=%p, pSize=%p\n", pFile->h, pSize));
+
#if SQLITE_OS_WINRT
{
FILE_STANDARD_INFO info;
@@ -32542,6 +33196,8 @@ static int winFileSize(sqlite3_file *id, sqlite3_int64 *pSize){
}
}
#endif
+ OSTRACE(("SIZE file=%p, pSize=%p, *pSize=%lld, rc=%s\n",
+ pFile->h, pSize, *pSize, sqlite3ErrName(rc)));
return rc;
}
@@ -32583,6 +33239,7 @@ static int winFileSize(sqlite3_file *id, sqlite3_int64 *pSize){
*/
static int getReadLock(winFile *pFile){
int res;
+ OSTRACE(("READ-LOCK file=%p, lock=%d\n", pFile->h, pFile->locktype));
if( isNT() ){
#if SQLITE_OS_WINCE
/*
@@ -32608,6 +33265,7 @@ static int getReadLock(winFile *pFile){
pFile->lastErrno = osGetLastError();
/* No need to log a failure to lock */
}
+ OSTRACE(("READ-LOCK file=%p, rc=%s\n", pFile->h, sqlite3ErrName(res)));
return res;
}
@@ -32617,6 +33275,7 @@ static int getReadLock(winFile *pFile){
static int unlockReadLock(winFile *pFile){
int res;
DWORD lastErrno;
+ OSTRACE(("READ-UNLOCK file=%p, lock=%d\n", pFile->h, pFile->locktype));
if( isNT() ){
res = winUnlockFile(&pFile->h, SHARED_FIRST, 0, SHARED_SIZE, 0);
}
@@ -32630,6 +33289,7 @@ static int unlockReadLock(winFile *pFile){
winLogError(SQLITE_IOERR_UNLOCK, pFile->lastErrno,
"unlockReadLock", pFile->zPath);
}
+ OSTRACE(("READ-UNLOCK file=%p, rc=%s\n", pFile->h, sqlite3ErrName(res)));
return res;
}
@@ -32668,14 +33328,15 @@ static int winLock(sqlite3_file *id, int locktype){
DWORD lastErrno = NO_ERROR;
assert( id!=0 );
- OSTRACE(("LOCK %d %d was %d(%d)\n",
- pFile->h, locktype, pFile->locktype, pFile->sharedLockByte));
+ OSTRACE(("LOCK file=%p, oldLock=%d(%d), newLock=%d\n",
+ pFile->h, pFile->locktype, pFile->sharedLockByte, locktype));
/* If there is already a lock of this type or more restrictive on the
** OsFile, do nothing. Don't use the end_lock: exit path, as
** sqlite3OsEnterMutex() hasn't been called yet.
*/
if( pFile->locktype>=locktype ){
+ OSTRACE(("LOCK-HELD file=%p, rc=SQLITE_OK\n", pFile->h));
return SQLITE_OK;
}
@@ -32703,7 +33364,8 @@ static int winLock(sqlite3_file *id, int locktype){
** If you are using this code as a model for alternative VFSes, do not
** copy this retry logic. It is a hack intended for Windows only.
*/
- OSTRACE(("could not get a PENDING lock. cnt=%d\n", cnt));
+ OSTRACE(("LOCK-PENDING-FAIL file=%p, count=%d, rc=%s\n",
+ pFile->h, cnt, sqlite3ErrName(res)));
if( cnt ) sqlite3_win32_sleep(1);
}
gotPendingLock = res;
@@ -32748,14 +33410,12 @@ static int winLock(sqlite3_file *id, int locktype){
if( locktype==EXCLUSIVE_LOCK && res ){
assert( pFile->locktype>=SHARED_LOCK );
res = unlockReadLock(pFile);
- OSTRACE(("unreadlock = %d\n", res));
res = winLockFile(&pFile->h, SQLITE_LOCKFILE_FLAGS, SHARED_FIRST, 0,
SHARED_SIZE, 0);
if( res ){
newLocktype = EXCLUSIVE_LOCK;
}else{
lastErrno = osGetLastError();
- OSTRACE(("error-code = %d\n", lastErrno));
getReadLock(pFile);
}
}
@@ -32773,12 +33433,14 @@ static int winLock(sqlite3_file *id, int locktype){
if( res ){
rc = SQLITE_OK;
}else{
- OSTRACE(("LOCK FAILED %d trying for %d but got %d\n", pFile->h,
- locktype, newLocktype));
+ OSTRACE(("LOCK-FAIL file=%p, wanted=%d, got=%d\n",
+ pFile->h, locktype, newLocktype));
pFile->lastErrno = lastErrno;
rc = SQLITE_BUSY;
}
pFile->locktype = (u8)newLocktype;
+ OSTRACE(("LOCK file=%p, lock=%d, rc=%s\n",
+ pFile->h, pFile->locktype, sqlite3ErrName(rc)));
return rc;
}
@@ -32792,20 +33454,23 @@ static int winCheckReservedLock(sqlite3_file *id, int *pResOut){
winFile *pFile = (winFile*)id;
SimulateIOError( return SQLITE_IOERR_CHECKRESERVEDLOCK; );
+ OSTRACE(("TEST-WR-LOCK file=%p, pResOut=%p\n", pFile->h, pResOut));
assert( id!=0 );
if( pFile->locktype>=RESERVED_LOCK ){
rc = 1;
- OSTRACE(("TEST WR-LOCK %d %d (local)\n", pFile->h, rc));
+ OSTRACE(("TEST-WR-LOCK file=%p, rc=%d (local)\n", pFile->h, rc));
}else{
rc = winLockFile(&pFile->h, SQLITE_LOCKFILEEX_FLAGS,RESERVED_BYTE, 0, 1, 0);
if( rc ){
winUnlockFile(&pFile->h, RESERVED_BYTE, 0, 1, 0);
}
rc = !rc;
- OSTRACE(("TEST WR-LOCK %d %d (remote)\n", pFile->h, rc));
+ OSTRACE(("TEST-WR-LOCK file=%p, rc=%d (remote)\n", pFile->h, rc));
}
*pResOut = rc;
+ OSTRACE(("TEST-WR-LOCK file=%p, pResOut=%p, *pResOut=%d, rc=SQLITE_OK\n",
+ pFile->h, pResOut, *pResOut));
return SQLITE_OK;
}
@@ -32826,8 +33491,8 @@ static int winUnlock(sqlite3_file *id, int locktype){
int rc = SQLITE_OK;
assert( pFile!=0 );
assert( locktype<=SHARED_LOCK );
- OSTRACE(("UNLOCK %d to %d was %d(%d)\n", pFile->h, locktype,
- pFile->locktype, pFile->sharedLockByte));
+ OSTRACE(("UNLOCK file=%p, oldLock=%d(%d), newLock=%d\n",
+ pFile->h, pFile->locktype, pFile->sharedLockByte, locktype));
type = pFile->locktype;
if( type>=EXCLUSIVE_LOCK ){
winUnlockFile(&pFile->h, SHARED_FIRST, 0, SHARED_SIZE, 0);
@@ -32848,6 +33513,8 @@ static int winUnlock(sqlite3_file *id, int locktype){
winUnlockFile(&pFile->h, PENDING_BYTE, 0, 1, 0);
}
pFile->locktype = (u8)locktype;
+ OSTRACE(("UNLOCK file=%p, lock=%d, rc=%s\n",
+ pFile->h, pFile->locktype, sqlite3ErrName(rc)));
return rc;
}
@@ -32875,17 +33542,21 @@ static int getTempname(int nBuf, char *zBuf);
*/
static int winFileControl(sqlite3_file *id, int op, void *pArg){
winFile *pFile = (winFile*)id;
+ OSTRACE(("FCNTL file=%p, op=%d, pArg=%p\n", pFile->h, op, pArg));
switch( op ){
case SQLITE_FCNTL_LOCKSTATE: {
*(int*)pArg = pFile->locktype;
+ OSTRACE(("FCNTL file=%p, rc=SQLITE_OK\n", pFile->h));
return SQLITE_OK;
}
case SQLITE_LAST_ERRNO: {
*(int*)pArg = (int)pFile->lastErrno;
+ OSTRACE(("FCNTL file=%p, rc=SQLITE_OK\n", pFile->h));
return SQLITE_OK;
}
case SQLITE_FCNTL_CHUNK_SIZE: {
pFile->szChunk = *(int *)pArg;
+ OSTRACE(("FCNTL file=%p, rc=SQLITE_OK\n", pFile->h));
return SQLITE_OK;
}
case SQLITE_FCNTL_SIZE_HINT: {
@@ -32900,20 +33571,25 @@ static int winFileControl(sqlite3_file *id, int op, void *pArg){
SimulateIOErrorBenign(0);
}
}
+ OSTRACE(("FCNTL file=%p, rc=%s\n", pFile->h, sqlite3ErrName(rc)));
return rc;
}
+ OSTRACE(("FCNTL file=%p, rc=SQLITE_OK\n", pFile->h));
return SQLITE_OK;
}
case SQLITE_FCNTL_PERSIST_WAL: {
winModeBit(pFile, WINFILE_PERSIST_WAL, (int*)pArg);
+ OSTRACE(("FCNTL file=%p, rc=SQLITE_OK\n", pFile->h));
return SQLITE_OK;
}
case SQLITE_FCNTL_POWERSAFE_OVERWRITE: {
winModeBit(pFile, WINFILE_PSOW, (int*)pArg);
+ OSTRACE(("FCNTL file=%p, rc=SQLITE_OK\n", pFile->h));
return SQLITE_OK;
}
case SQLITE_FCNTL_VFSNAME: {
*(char**)pArg = sqlite3_mprintf("win32");
+ OSTRACE(("FCNTL file=%p, rc=SQLITE_OK\n", pFile->h));
return SQLITE_OK;
}
case SQLITE_FCNTL_WIN32_AV_RETRY: {
@@ -32928,6 +33604,7 @@ static int winFileControl(sqlite3_file *id, int op, void *pArg){
}else{
a[1] = win32IoerrRetryDelay;
}
+ OSTRACE(("FCNTL file=%p, rc=SQLITE_OK\n", pFile->h));
return SQLITE_OK;
}
case SQLITE_FCNTL_TEMPFILENAME: {
@@ -32936,9 +33613,23 @@ static int winFileControl(sqlite3_file *id, int op, void *pArg){
getTempname(pFile->pVfs->mxPathname, zTFile);
*(char**)pArg = zTFile;
}
+ OSTRACE(("FCNTL file=%p, rc=SQLITE_OK\n", pFile->h));
return SQLITE_OK;
}
+#if SQLITE_MAX_MMAP_SIZE>0
+ case SQLITE_FCNTL_MMAP_SIZE: {
+ i64 newLimit = *(i64*)pArg;
+ if( newLimit>sqlite3GlobalConfig.mxMmap ){
+ newLimit = sqlite3GlobalConfig.mxMmap;
+ }
+ *(i64*)pArg = pFile->mmapSizeMax;
+ if( newLimit>=0 ) pFile->mmapSizeMax = newLimit;
+ OSTRACE(("FCNTL file=%p, rc=SQLITE_OK\n", pFile->h));
+ return SQLITE_OK;
+ }
+#endif
}
+ OSTRACE(("FCNTL file=%p, rc=SQLITE_NOTFOUND\n", pFile->h));
return SQLITE_NOTFOUND;
}
@@ -32966,8 +33657,6 @@ static int winDeviceCharacteristics(sqlite3_file *id){
((p->ctrlFlags & WINFILE_PSOW)?SQLITE_IOCAP_POWERSAFE_OVERWRITE:0);
}
-#ifndef SQLITE_OMIT_WAL
-
/*
** Windows will only let you create file view mappings
** on allocation size granularity boundaries.
@@ -32976,6 +33665,8 @@ static int winDeviceCharacteristics(sqlite3_file *id){
*/
SYSTEM_INFO winSysInfo;
+#ifndef SQLITE_OMIT_WAL
+
/*
** Helper functions to obtain and relinquish the global mutex. The
** global mutex is used to protect the winLockInfo objects used by
@@ -33099,6 +33790,9 @@ static int winShmSystemLock(
/* Access to the winShmNode object is serialized by the caller */
assert( sqlite3_mutex_held(pFile->mutex) || pFile->nRef==0 );
+ OSTRACE(("SHM-LOCK file=%p, lock=%d, offset=%d, size=%d\n",
+ pFile->hFile.h, lockType, ofst, nByte));
+
/* Release/Acquire the system-level lock */
if( lockType==_SHM_UNLCK ){
rc = winUnlockFile(&pFile->hFile.h, ofst, 0, nByte, 0);
@@ -33116,11 +33810,9 @@ static int winShmSystemLock(
rc = SQLITE_BUSY;
}
- OSTRACE(("SHM-LOCK %d %s %s 0x%08lx\n",
- pFile->hFile.h,
- rc==SQLITE_OK ? "ok" : "failed",
- lockType==_SHM_UNLCK ? "UnlockFileEx" : "LockFileEx",
- pFile->lastErrno));
+ OSTRACE(("SHM-LOCK file=%p, func=%s, errno=%lu, rc=%s\n",
+ pFile->hFile.h, (lockType == _SHM_UNLCK) ? "winUnlockFile" :
+ "winLockFile", pFile->lastErrno, sqlite3ErrName(rc)));
return rc;
}
@@ -33140,6 +33832,8 @@ static void winShmPurge(sqlite3_vfs *pVfs, int deleteFlag){
winShmNode *p;
BOOL bRc;
assert( winShmMutexHeld() );
+ OSTRACE(("SHM-PURGE pid=%lu, deleteFlag=%d\n",
+ osGetCurrentProcessId(), deleteFlag));
pp = &winShmNodeList;
while( (p = *pp)!=0 ){
if( p->nRef==0 ){
@@ -33147,13 +33841,11 @@ static void winShmPurge(sqlite3_vfs *pVfs, int deleteFlag){
if( p->mutex ) sqlite3_mutex_free(p->mutex);
for(i=0; inRegion; i++){
bRc = osUnmapViewOfFile(p->aRegion[i].pMap);
- OSTRACE(("SHM-PURGE pid-%d unmap region=%d %s\n",
- (int)osGetCurrentProcessId(), i,
- bRc ? "ok" : "failed"));
+ OSTRACE(("SHM-PURGE-UNMAP pid=%lu, region=%d, rc=%s\n",
+ osGetCurrentProcessId(), i, bRc ? "ok" : "failed"));
bRc = osCloseHandle(p->aRegion[i].hMap);
- OSTRACE(("SHM-PURGE pid-%d close region=%d %s\n",
- (int)osGetCurrentProcessId(), i,
- bRc ? "ok" : "failed"));
+ OSTRACE(("SHM-PURGE-CLOSE pid=%lu, region=%d, rc=%s\n",
+ osGetCurrentProcessId(), i, bRc ? "ok" : "failed"));
}
if( p->hFile.h!=NULL && p->hFile.h!=INVALID_HANDLE_VALUE ){
SimulateIOErrorBenign(1);
@@ -33432,9 +34124,9 @@ static int winShmLock(
}
}
sqlite3_mutex_leave(pShmNode->mutex);
- OSTRACE(("SHM-LOCK shmid-%d, pid-%d got %03x,%03x %s\n",
- p->id, (int)osGetCurrentProcessId(), p->sharedMask, p->exclMask,
- rc ? "failed" : "ok"));
+ OSTRACE(("SHM-LOCK pid=%lu, id=%d, sharedMask=%03x, exclMask=%03x, rc=%s\n",
+ osGetCurrentProcessId(), p->id, p->sharedMask, p->exclMask,
+ sqlite3ErrName(rc)));
return rc;
}
@@ -33555,8 +34247,8 @@ static int winShmMap(
NULL, PAGE_READWRITE, 0, nByte, NULL
);
#endif
- OSTRACE(("SHM-MAP pid-%d create region=%d nbyte=%d %s\n",
- (int)osGetCurrentProcessId(), pShmNode->nRegion, nByte,
+ OSTRACE(("SHM-MAP-CREATE pid=%lu, region=%d, size=%d, rc=%s\n",
+ osGetCurrentProcessId(), pShmNode->nRegion, nByte,
hMap ? "ok" : "failed"));
if( hMap ){
int iOffset = pShmNode->nRegion*szRegion;
@@ -33570,8 +34262,8 @@ static int winShmMap(
0, iOffset - iOffsetShift, szRegion + iOffsetShift
);
#endif
- OSTRACE(("SHM-MAP pid-%d map region=%d offset=%d size=%d %s\n",
- (int)osGetCurrentProcessId(), pShmNode->nRegion, iOffset,
+ OSTRACE(("SHM-MAP-MAP pid=%lu, region=%d, offset=%d, size=%d, rc=%s\n",
+ osGetCurrentProcessId(), pShmNode->nRegion, iOffset,
szRegion, pMap ? "ok" : "failed"));
}
if( !pMap ){
@@ -33608,6 +34300,230 @@ shmpage_out:
# define winShmUnmap 0
#endif /* #ifndef SQLITE_OMIT_WAL */
+/*
+** Cleans up the mapped region of the specified file, if any.
+*/
+#if SQLITE_MAX_MMAP_SIZE>0
+static int winUnmapfile(winFile *pFile){
+ assert( pFile!=0 );
+ OSTRACE(("UNMAP-FILE pid=%lu, pFile=%p, hMap=%p, pMapRegion=%p, "
+ "mmapSize=%lld, mmapSizeActual=%lld, mmapSizeMax=%lld\n",
+ osGetCurrentProcessId(), pFile, pFile->hMap, pFile->pMapRegion,
+ pFile->mmapSize, pFile->mmapSizeActual, pFile->mmapSizeMax));
+ if( pFile->pMapRegion ){
+ if( !osUnmapViewOfFile(pFile->pMapRegion) ){
+ pFile->lastErrno = osGetLastError();
+ OSTRACE(("UNMAP-FILE pid=%lu, pFile=%p, pMapRegion=%p, "
+ "rc=SQLITE_IOERR_MMAP\n", osGetCurrentProcessId(), pFile,
+ pFile->pMapRegion));
+ return winLogError(SQLITE_IOERR_MMAP, pFile->lastErrno,
+ "winUnmap1", pFile->zPath);
+ }
+ pFile->pMapRegion = 0;
+ pFile->mmapSize = 0;
+ pFile->mmapSizeActual = 0;
+ }
+ if( pFile->hMap!=NULL ){
+ if( !osCloseHandle(pFile->hMap) ){
+ pFile->lastErrno = osGetLastError();
+ OSTRACE(("UNMAP-FILE pid=%lu, pFile=%p, hMap=%p, rc=SQLITE_IOERR_MMAP\n",
+ osGetCurrentProcessId(), pFile, pFile->hMap));
+ return winLogError(SQLITE_IOERR_MMAP, pFile->lastErrno,
+ "winUnmap2", pFile->zPath);
+ }
+ pFile->hMap = NULL;
+ }
+ OSTRACE(("UNMAP-FILE pid=%lu, pFile=%p, rc=SQLITE_OK\n",
+ osGetCurrentProcessId(), pFile));
+ return SQLITE_OK;
+}
+
+/*
+** Memory map or remap the file opened by file-descriptor pFd (if the file
+** is already mapped, the existing mapping is replaced by the new). Or, if
+** there already exists a mapping for this file, and there are still
+** outstanding xFetch() references to it, this function is a no-op.
+**
+** If parameter nByte is non-negative, then it is the requested size of
+** the mapping to create. Otherwise, if nByte is less than zero, then the
+** requested size is the size of the file on disk. The actual size of the
+** created mapping is either the requested size or the value configured
+** using SQLITE_FCNTL_MMAP_SIZE, whichever is smaller.
+**
+** SQLITE_OK is returned if no error occurs (even if the mapping is not
+** recreated as a result of outstanding references) or an SQLite error
+** code otherwise.
+*/
+static int winMapfile(winFile *pFd, sqlite3_int64 nByte){
+ sqlite3_int64 nMap = nByte;
+ int rc;
+
+ assert( nMap>=0 || pFd->nFetchOut==0 );
+ OSTRACE(("MAP-FILE pid=%lu, pFile=%p, size=%lld\n",
+ osGetCurrentProcessId(), pFd, nByte));
+
+ if( pFd->nFetchOut>0 ) return SQLITE_OK;
+
+ if( nMap<0 ){
+ rc = winFileSize((sqlite3_file*)pFd, &nMap);
+ if( rc ){
+ OSTRACE(("MAP-FILE pid=%lu, pFile=%p, rc=SQLITE_IOERR_FSTAT\n",
+ osGetCurrentProcessId(), pFd));
+ return SQLITE_IOERR_FSTAT;
+ }
+ }
+ if( nMap>pFd->mmapSizeMax ){
+ nMap = pFd->mmapSizeMax;
+ }
+ nMap &= ~(sqlite3_int64)(winSysInfo.dwPageSize - 1);
+
+ if( nMap==0 && pFd->mmapSize>0 ){
+ winUnmapfile(pFd);
+ }
+ if( nMap!=pFd->mmapSize ){
+ void *pNew = 0;
+ DWORD protect = PAGE_READONLY;
+ DWORD flags = FILE_MAP_READ;
+
+ winUnmapfile(pFd);
+ if( (pFd->ctrlFlags & WINFILE_RDONLY)==0 ){
+ protect = PAGE_READWRITE;
+ flags |= FILE_MAP_WRITE;
+ }
+#if SQLITE_OS_WINRT
+ pFd->hMap = osCreateFileMappingFromApp(pFd->h, NULL, protect, nMap, NULL);
+#elif defined(SQLITE_WIN32_HAS_WIDE)
+ pFd->hMap = osCreateFileMappingW(pFd->h, NULL, protect,
+ (DWORD)((nMap>>32) & 0xffffffff),
+ (DWORD)(nMap & 0xffffffff), NULL);
+#elif defined(SQLITE_WIN32_HAS_ANSI)
+ pFd->hMap = osCreateFileMappingA(pFd->h, NULL, protect,
+ (DWORD)((nMap>>32) & 0xffffffff),
+ (DWORD)(nMap & 0xffffffff), NULL);
+#endif
+ if( pFd->hMap==NULL ){
+ pFd->lastErrno = osGetLastError();
+ rc = winLogError(SQLITE_IOERR_MMAP, pFd->lastErrno,
+ "winMapfile", pFd->zPath);
+ /* Log the error, but continue normal operation using xRead/xWrite */
+ OSTRACE(("MAP-FILE-CREATE pid=%lu, pFile=%p, rc=SQLITE_IOERR_MMAP\n",
+ osGetCurrentProcessId(), pFd));
+ return SQLITE_OK;
+ }
+ assert( (nMap % winSysInfo.dwPageSize)==0 );
+#if SQLITE_OS_WINRT
+ pNew = osMapViewOfFileFromApp(pFd->hMap, flags, 0, nMap);
+#else
+ assert( sizeof(SIZE_T)==sizeof(sqlite3_int64) || nMap<=0xffffffff );
+ pNew = osMapViewOfFile(pFd->hMap, flags, 0, 0, (SIZE_T)nMap);
+#endif
+ if( pNew==NULL ){
+ osCloseHandle(pFd->hMap);
+ pFd->hMap = NULL;
+ pFd->lastErrno = osGetLastError();
+ winLogError(SQLITE_IOERR_MMAP, pFd->lastErrno,
+ "winMapfile", pFd->zPath);
+ OSTRACE(("MAP-FILE-MAP pid=%lu, pFile=%p, rc=SQLITE_IOERR_MMAP\n",
+ osGetCurrentProcessId(), pFd));
+ return SQLITE_OK;
+ }
+ pFd->pMapRegion = pNew;
+ pFd->mmapSize = nMap;
+ pFd->mmapSizeActual = nMap;
+ }
+
+ OSTRACE(("MAP-FILE pid=%lu, pFile=%p, rc=SQLITE_OK\n",
+ osGetCurrentProcessId(), pFd));
+ return SQLITE_OK;
+}
+#endif /* SQLITE_MAX_MMAP_SIZE>0 */
+
+/*
+** If possible, return a pointer to a mapping of file fd starting at offset
+** iOff. The mapping must be valid for at least nAmt bytes.
+**
+** If such a pointer can be obtained, store it in *pp and return SQLITE_OK.
+** Or, if one cannot but no error occurs, set *pp to 0 and return SQLITE_OK.
+** Finally, if an error does occur, return an SQLite error code. The final
+** value of *pp is undefined in this case.
+**
+** If this function does return a pointer, the caller must eventually
+** release the reference by calling winUnfetch().
+*/
+static int winFetch(sqlite3_file *fd, i64 iOff, int nAmt, void **pp){
+#if SQLITE_MAX_MMAP_SIZE>0
+ winFile *pFd = (winFile*)fd; /* The underlying database file */
+#endif
+ *pp = 0;
+
+ OSTRACE(("FETCH pid=%lu, pFile=%p, offset=%lld, amount=%d, pp=%p\n",
+ osGetCurrentProcessId(), fd, iOff, nAmt, pp));
+
+#if SQLITE_MAX_MMAP_SIZE>0
+ if( pFd->mmapSizeMax>0 ){
+ if( pFd->pMapRegion==0 ){
+ int rc = winMapfile(pFd, -1);
+ if( rc!=SQLITE_OK ){
+ OSTRACE(("FETCH pid=%lu, pFile=%p, rc=%s\n",
+ osGetCurrentProcessId(), pFd, sqlite3ErrName(rc)));
+ return rc;
+ }
+ }
+ if( pFd->mmapSize >= iOff+nAmt ){
+ *pp = &((u8 *)pFd->pMapRegion)[iOff];
+ pFd->nFetchOut++;
+ }
+ }
+#endif
+
+ OSTRACE(("FETCH pid=%lu, pFile=%p, pp=%p, *pp=%p, rc=SQLITE_OK\n",
+ osGetCurrentProcessId(), fd, pp, *pp));
+ return SQLITE_OK;
+}
+
+/*
+** If the third argument is non-NULL, then this function releases a
+** reference obtained by an earlier call to winFetch(). The second
+** argument passed to this function must be the same as the corresponding
+** argument that was passed to the winFetch() invocation.
+**
+** Or, if the third argument is NULL, then this function is being called
+** to inform the VFS layer that, according to POSIX, any existing mapping
+** may now be invalid and should be unmapped.
+*/
+static int winUnfetch(sqlite3_file *fd, i64 iOff, void *p){
+#if SQLITE_MAX_MMAP_SIZE>0
+ winFile *pFd = (winFile*)fd; /* The underlying database file */
+
+ /* If p==0 (unmap the entire file) then there must be no outstanding
+ ** xFetch references. Or, if p!=0 (meaning it is an xFetch reference),
+ ** then there must be at least one outstanding. */
+ assert( (p==0)==(pFd->nFetchOut==0) );
+
+ /* If p!=0, it must match the iOff value. */
+ assert( p==0 || p==&((u8 *)pFd->pMapRegion)[iOff] );
+
+ OSTRACE(("UNFETCH pid=%lu, pFile=%p, offset=%lld, p=%p\n",
+ osGetCurrentProcessId(), pFd, iOff, p));
+
+ if( p ){
+ pFd->nFetchOut--;
+ }else{
+ /* FIXME: If Windows truly always prevents truncating or deleting a
+ ** file while a mapping is held, then the following winUnmapfile() call
+ ** is unnecessary can can be omitted - potentially improving
+ ** performance. */
+ winUnmapfile(pFd);
+ }
+
+ assert( pFd->nFetchOut>=0 );
+#endif
+
+ OSTRACE(("UNFETCH pid=%lu, pFile=%p, rc=SQLITE_OK\n",
+ osGetCurrentProcessId(), fd));
+ return SQLITE_OK;
+}
+
/*
** Here ends the implementation of all sqlite3_file methods.
**
@@ -33619,7 +34535,7 @@ shmpage_out:
** sqlite3_file for win32.
*/
static const sqlite3_io_methods winIoMethod = {
- 2, /* iVersion */
+ 3, /* iVersion */
winClose, /* xClose */
winRead, /* xRead */
winWrite, /* xWrite */
@@ -33635,7 +34551,9 @@ static const sqlite3_io_methods winIoMethod = {
winShmMap, /* xShmMap */
winShmLock, /* xShmLock */
winShmBarrier, /* xShmBarrier */
- winShmUnmap /* xShmUnmap */
+ winShmUnmap, /* xShmUnmap */
+ winFetch, /* xFetch */
+ winUnfetch /* xUnfetch */
};
/****************************************************************************
@@ -33699,6 +34617,7 @@ static int getTempname(int nBuf, char *zBuf){
sqlite3_snprintf(MAX_PATH-30, zTempPath, "%s", zMulti);
sqlite3_free(zMulti);
}else{
+ OSTRACE(("TEMP-FILENAME rc=SQLITE_IOERR_NOMEM\n"));
return SQLITE_IOERR_NOMEM;
}
}
@@ -33712,6 +34631,7 @@ static int getTempname(int nBuf, char *zBuf){
sqlite3_snprintf(MAX_PATH-30, zTempPath, "%s", zUtf8);
sqlite3_free(zUtf8);
}else{
+ OSTRACE(("TEMP-FILENAME rc=SQLITE_IOERR_NOMEM\n"));
return SQLITE_IOERR_NOMEM;
}
}
@@ -33724,6 +34644,7 @@ static int getTempname(int nBuf, char *zBuf){
nTempPath = sqlite3Strlen30(zTempPath);
if( (nTempPath + sqlite3Strlen30(SQLITE_TEMP_FILE_PREFIX) + 18) >= nBuf ){
+ OSTRACE(("TEMP-FILENAME rc=SQLITE_ERROR\n"));
return SQLITE_ERROR;
}
@@ -33741,8 +34662,8 @@ static int getTempname(int nBuf, char *zBuf){
zBuf[j] = 0;
zBuf[j+1] = 0;
- OSTRACE(("TEMP FILENAME: %s\n", zBuf));
- return SQLITE_OK;
+ OSTRACE(("TEMP-FILENAME name=%s, rc=SQLITE_OK\n", zBuf));
+ return SQLITE_OK;
}
/*
@@ -33811,9 +34732,7 @@ static int winOpen(
int isExclusive = (flags & SQLITE_OPEN_EXCLUSIVE);
int isDelete = (flags & SQLITE_OPEN_DELETEONCLOSE);
int isCreate = (flags & SQLITE_OPEN_CREATE);
-#ifndef NDEBUG
int isReadonly = (flags & SQLITE_OPEN_READONLY);
-#endif
int isReadWrite = (flags & SQLITE_OPEN_READWRITE);
#ifndef NDEBUG
@@ -33824,6 +34743,9 @@ static int winOpen(
));
#endif
+ OSTRACE(("OPEN name=%s, pFile=%p, flags=%x, pOutFlags=%p\n",
+ zUtf8Name, id, flags, pOutFlags));
+
/* Check the following statements are true:
**
** (a) Exactly one of the READWRITE and READONLY flags must be set, and
@@ -33869,6 +34791,7 @@ static int winOpen(
memset(zTmpname, 0, MAX_PATH+2);
rc = getTempname(MAX_PATH+2, zTmpname);
if( rc!=SQLITE_OK ){
+ OSTRACE(("OPEN name=%s, rc=%s", zUtf8Name, sqlite3ErrName(rc)));
return rc;
}
zUtf8Name = zTmpname;
@@ -33884,11 +34807,13 @@ static int winOpen(
/* Convert the filename to the system encoding. */
zConverted = convertUtf8Filename(zUtf8Name);
if( zConverted==0 ){
+ OSTRACE(("OPEN name=%s, rc=SQLITE_IOERR_NOMEM", zUtf8Name));
return SQLITE_IOERR_NOMEM;
}
if( winIsDir(zConverted) ){
sqlite3_free(zConverted);
+ OSTRACE(("OPEN name=%s, rc=SQLITE_CANTOPEN_ISDIR", zUtf8Name));
return SQLITE_CANTOPEN_ISDIR;
}
@@ -33979,9 +34904,8 @@ static int winOpen(
#endif
logIoerr(cnt);
- OSTRACE(("OPEN %d %s 0x%lx %s\n",
- h, zName, dwDesiredAccess,
- h==INVALID_HANDLE_VALUE ? "failed" : "ok"));
+ OSTRACE(("OPEN file=%p, name=%s, access=%lx, rc=%s\n", h, zUtf8Name,
+ dwDesiredAccess, (h==INVALID_HANDLE_VALUE) ? "failed" : "ok"));
if( h==INVALID_HANDLE_VALUE ){
pFile->lastErrno = lastErrno;
@@ -34005,12 +34929,17 @@ static int winOpen(
}
}
+ OSTRACE(("OPEN file=%p, name=%s, access=%lx, pOutFlags=%p, *pOutFlags=%d, "
+ "rc=%s\n", h, zUtf8Name, dwDesiredAccess, pOutFlags, pOutFlags ?
+ *pOutFlags : 0, (h==INVALID_HANDLE_VALUE) ? "failed" : "ok"));
+
#if SQLITE_OS_WINCE
if( isReadWrite && eType==SQLITE_OPEN_MAIN_DB
&& (rc = winceCreateLock(zName, pFile))!=SQLITE_OK
){
osCloseHandle(h);
sqlite3_free(zConverted);
+ OSTRACE(("OPEN-CE-LOCK name=%s, rc=%s\n", zName, sqlite3ErrName(rc)));
return rc;
}
if( isTemp ){
@@ -34024,11 +34953,21 @@ static int winOpen(
pFile->pMethod = &winIoMethod;
pFile->pVfs = pVfs;
pFile->h = h;
+ if( isReadonly ){
+ pFile->ctrlFlags |= WINFILE_RDONLY;
+ }
if( sqlite3_uri_boolean(zName, "psow", SQLITE_POWERSAFE_OVERWRITE) ){
pFile->ctrlFlags |= WINFILE_PSOW;
}
pFile->lastErrno = NO_ERROR;
pFile->zPath = zName;
+#if SQLITE_MAX_MMAP_SIZE>0
+ pFile->hMap = NULL;
+ pFile->pMapRegion = 0;
+ pFile->mmapSize = 0;
+ pFile->mmapSizeActual = 0;
+ pFile->mmapSizeMax = sqlite3GlobalConfig.szMmap;
+#endif
OpenCounter(+1);
return rc;
@@ -34060,6 +34999,8 @@ static int winDelete(
UNUSED_PARAMETER(syncDir);
SimulateIOError(return SQLITE_IOERR_DELETE);
+ OSTRACE(("DELETE name=%s, syncDir=%d\n", zFilename, syncDir));
+
zConverted = convertUtf8Filename(zFilename);
if( zConverted==0 ){
return SQLITE_IOERR_NOMEM;
@@ -34145,7 +35086,7 @@ static int winDelete(
logIoerr(cnt);
}
sqlite3_free(zConverted);
- OSTRACE(("DELETE \"%s\" %s\n", zFilename, (rc ? "failed" : "ok" )));
+ OSTRACE(("DELETE name=%s, rc=%s\n", zFilename, sqlite3ErrName(rc)));
return rc;
}
@@ -34165,8 +35106,12 @@ static int winAccess(
UNUSED_PARAMETER(pVfs);
SimulateIOError( return SQLITE_IOERR_ACCESS; );
+ OSTRACE(("ACCESS name=%s, flags=%x, pResOut=%p\n",
+ zFilename, flags, pResOut));
+
zConverted = convertUtf8Filename(zFilename);
if( zConverted==0 ){
+ OSTRACE(("ACCESS name=%s, rc=SQLITE_IOERR_NOMEM\n", zFilename));
return SQLITE_IOERR_NOMEM;
}
if( isNT() ){
@@ -34217,6 +35162,8 @@ static int winAccess(
assert(!"Invalid flags argument");
}
*pResOut = rc;
+ OSTRACE(("ACCESS name=%s, pResOut=%p, *pResOut=%d, rc=SQLITE_OK\n",
+ zFilename, pResOut, *pResOut));
return SQLITE_OK;
}
@@ -34657,7 +35604,6 @@ SQLITE_API int sqlite3_os_init(void){
** correctly. See ticket [bb3a86e890c8e96ab] */
assert( ArraySize(aSyscall)==74 );
-#ifndef SQLITE_OMIT_WAL
/* get memory map allocation granularity */
memset(&winSysInfo, 0, sizeof(SYSTEM_INFO));
#if SQLITE_OS_WINRT
@@ -34665,8 +35611,8 @@ SQLITE_API int sqlite3_os_init(void){
#else
osGetSystemInfo(&winSysInfo);
#endif
- assert(winSysInfo.dwAllocationGranularity > 0);
-#endif
+ assert( winSysInfo.dwAllocationGranularity>0 );
+ assert( winSysInfo.dwPageSize>0 );
sqlite3_vfs_register(&winVfs, 1);
return SQLITE_OK;
@@ -37303,7 +38249,6 @@ SQLITE_PRIVATE int sqlite3RowSetTest(RowSet *pRowSet, u8 iBatch, sqlite3_int64 i
# define sqlite3WalClose(w,x,y,z) 0
# define sqlite3WalBeginReadTransaction(y,z) 0
# define sqlite3WalEndReadTransaction(z)
-# define sqlite3WalRead(v,w,x,y,z) 0
# define sqlite3WalDbsize(y) 0
# define sqlite3WalBeginWriteTransaction(y) 0
# define sqlite3WalEndWriteTransaction(x) 0
@@ -37316,6 +38261,7 @@ SQLITE_PRIVATE int sqlite3RowSetTest(RowSet *pRowSet, u8 iBatch, sqlite3_int64 i
# define sqlite3WalExclusiveMode(y,z) 0
# define sqlite3WalHeapMemory(z) 0
# define sqlite3WalFramesize(z) 0
+# define sqlite3WalFindFrame(x,y,z) 0
#else
#define WAL_SAVEPOINT_NDATA 4
@@ -37343,7 +38289,8 @@ SQLITE_PRIVATE int sqlite3WalBeginReadTransaction(Wal *pWal, int *);
SQLITE_PRIVATE void sqlite3WalEndReadTransaction(Wal *pWal);
/* Read a page from the write-ahead log, if it is present. */
-SQLITE_PRIVATE int sqlite3WalRead(Wal *pWal, Pgno pgno, int *pInWal, int nOut, u8 *pOut);
+SQLITE_PRIVATE int sqlite3WalFindFrame(Wal *, Pgno, u32 *);
+SQLITE_PRIVATE int sqlite3WalReadFrame(Wal *, u32, int, u8 *);
/* If the WAL is not empty, return the size of the database. */
SQLITE_PRIVATE Pgno sqlite3WalDbsize(Wal *pWal);
@@ -38043,6 +38990,11 @@ struct Pager {
PagerSavepoint *aSavepoint; /* Array of active savepoints */
int nSavepoint; /* Number of elements in aSavepoint[] */
char dbFileVers[16]; /* Changes whenever database file changes */
+
+ u8 bUseFetch; /* True to use xFetch() */
+ int nMmapOut; /* Number of mmap pages currently outstanding */
+ sqlite3_int64 szMmap; /* Desired maximum mmap size */
+ PgHdr *pMmapFreelist; /* List of free mmap page headers (pDirty) */
/*
** End of the routinely-changing class members
***************************************************************************/
@@ -38153,6 +39105,16 @@ static const unsigned char aJournalMagic[] = {
# define MEMDB pPager->memDb
#endif
+/*
+** The macro USEFETCH is true if we are allowed to use the xFetch and xUnfetch
+** interfaces to access the database using memory-mapped I/O.
+*/
+#if SQLITE_MAX_MMAP_SIZE>0
+# define USEFETCH(x) ((x)->bUseFetch)
+#else
+# define USEFETCH(x) 0
+#endif
+
/*
** The maximum legal page number is (2^31 - 1).
*/
@@ -39640,7 +40602,7 @@ static int pager_playback_one_page(
i64 ofst = (pgno-1)*(i64)pPager->pageSize;
testcase( !isSavepnt && pPg!=0 && (pPg->flags&PGHDR_NEED_SYNC)!=0 );
assert( !pagerUseWal(pPager) );
- rc = sqlite3OsWrite(pPager->fd, (u8*)aData, pPager->pageSize, ofst);
+ rc = sqlite3OsWrite(pPager->fd, (u8 *)aData, pPager->pageSize, ofst);
if( pgno>pPager->dbFileSize ){
pPager->dbFileSize = pgno;
}
@@ -40031,6 +40993,7 @@ static int pager_playback(Pager *pPager, int isHot){
int res = 1; /* Value returned by sqlite3OsAccess() */
char *zMaster = 0; /* Name of master journal file if any */
int needPagerReset; /* True to reset page prior to first page rollback */
+ int nPlayback = 0; /* Total number of pages restored from journal */
/* Figure out how many records are in the journal. Abort early if
** the journal is empty.
@@ -40131,7 +41094,9 @@ static int pager_playback(Pager *pPager, int isHot){
needPagerReset = 0;
}
rc = pager_playback_one_page(pPager,&pPager->journalOff,0,1,0);
- if( rc!=SQLITE_OK ){
+ if( rc==SQLITE_OK ){
+ nPlayback++;
+ }else{
if( rc==SQLITE_DONE ){
pPager->journalOff = szJ;
break;
@@ -40201,6 +41166,10 @@ end_playback:
rc = pager_delmaster(pPager, zMaster);
testcase( rc!=SQLITE_OK );
}
+ if( isHot && nPlayback ){
+ sqlite3_log(SQLITE_NOTICE_RECOVER_ROLLBACK, "recovered %d pages from %s",
+ nPlayback, pPager->zJournal);
+ }
/* The Pager.sectorSize variable may have been updated while rolling
** back a journal created by a process with a different sector size
@@ -40222,11 +41191,10 @@ end_playback:
** If an IO error occurs, then the IO error is returned to the caller.
** Otherwise, SQLITE_OK is returned.
*/
-static int readDbPage(PgHdr *pPg){
+static int readDbPage(PgHdr *pPg, u32 iFrame){
Pager *pPager = pPg->pPager; /* Pager object associated with page pPg */
Pgno pgno = pPg->pgno; /* Page number to read */
int rc = SQLITE_OK; /* Return code */
- int isInWal = 0; /* True if page is in log file */
int pgsz = pPager->pageSize; /* Number of bytes to read */
assert( pPager->eState>=PAGER_READER && !MEMDB );
@@ -40238,11 +41206,13 @@ static int readDbPage(PgHdr *pPg){
return SQLITE_OK;
}
- if( pagerUseWal(pPager) ){
+#ifndef SQLITE_OMIT_WAL
+ if( iFrame ){
/* Try to pull the page from the write-ahead log. */
- rc = sqlite3WalRead(pPager->pWal, pgno, &isInWal, pgsz, pPg->pData);
- }
- if( rc==SQLITE_OK && !isInWal ){
+ rc = sqlite3WalReadFrame(pPager->pWal, iFrame, pgsz, pPg->pData);
+ }else
+#endif
+ {
i64 iOffset = (pgno-1)*(i64)pPager->pageSize;
rc = sqlite3OsRead(pPager->fd, pPg->pData, pgsz, iOffset);
if( rc==SQLITE_IOERR_SHORT_READ ){
@@ -40321,12 +41291,17 @@ static int pagerUndoCallback(void *pCtx, Pgno iPg){
Pager *pPager = (Pager *)pCtx;
PgHdr *pPg;
+ assert( pagerUseWal(pPager) );
pPg = sqlite3PagerLookup(pPager, iPg);
if( pPg ){
if( sqlite3PcachePageRefcount(pPg)==1 ){
sqlite3PcacheDrop(pPg);
}else{
- rc = readDbPage(pPg);
+ u32 iFrame = 0;
+ rc = sqlite3WalFindFrame(pPager->pWal, pPg->pgno, &iFrame);
+ if( rc==SQLITE_OK ){
+ rc = readDbPage(pPg, iFrame);
+ }
if( rc==SQLITE_OK ){
pPager->xReiniter(pPg);
}
@@ -40470,6 +41445,7 @@ static int pagerBeginReadTransaction(Pager *pPager){
rc = sqlite3WalBeginReadTransaction(pPager->pWal, &changed);
if( rc!=SQLITE_OK || changed ){
pager_reset(pPager);
+ if( USEFETCH(pPager) ) sqlite3OsUnfetch(pPager->fd, 0, 0);
}
return rc;
@@ -40731,6 +41707,29 @@ SQLITE_PRIVATE void sqlite3PagerSetCachesize(Pager *pPager, int mxPage){
sqlite3PcacheSetCachesize(pPager->pPCache, mxPage);
}
+/*
+** Invoke SQLITE_FCNTL_MMAP_SIZE based on the current value of szMmap.
+*/
+static void pagerFixMaplimit(Pager *pPager){
+#if SQLITE_MAX_MMAP_SIZE>0
+ sqlite3_file *fd = pPager->fd;
+ if( isOpen(fd) ){
+ sqlite3_int64 sz;
+ pPager->bUseFetch = (fd->pMethods->iVersion>=3) && pPager->szMmap>0;
+ sz = pPager->szMmap;
+ sqlite3OsFileControlHint(pPager->fd, SQLITE_FCNTL_MMAP_SIZE, &sz);
+ }
+#endif
+}
+
+/*
+** Change the maximum size of any memory mapping made of the database file.
+*/
+SQLITE_PRIVATE void sqlite3PagerSetMmapLimit(Pager *pPager, sqlite3_int64 szMmap){
+ pPager->szMmap = szMmap;
+ pagerFixMaplimit(pPager);
+}
+
/*
** Free as much memory as possible from the pager.
*/
@@ -40966,6 +41965,7 @@ SQLITE_PRIVATE int sqlite3PagerSetPagesize(Pager *pPager, u32 *pPageSize, int nR
assert( nReserve>=0 && nReserve<1000 );
pPager->nReserve = (i16)nReserve;
pagerReportSize(pPager);
+ pagerFixMaplimit(pPager);
}
return rc;
}
@@ -41191,6 +42191,81 @@ static int pagerSyncHotJournal(Pager *pPager){
return rc;
}
+/*
+** Obtain a reference to a memory mapped page object for page number pgno.
+** The new object will use the pointer pData, obtained from xFetch().
+** If successful, set *ppPage to point to the new page reference
+** and return SQLITE_OK. Otherwise, return an SQLite error code and set
+** *ppPage to zero.
+**
+** Page references obtained by calling this function should be released
+** by calling pagerReleaseMapPage().
+*/
+static int pagerAcquireMapPage(
+ Pager *pPager, /* Pager object */
+ Pgno pgno, /* Page number */
+ void *pData, /* xFetch()'d data for this page */
+ PgHdr **ppPage /* OUT: Acquired page object */
+){
+ PgHdr *p; /* Memory mapped page to return */
+
+ if( pPager->pMmapFreelist ){
+ *ppPage = p = pPager->pMmapFreelist;
+ pPager->pMmapFreelist = p->pDirty;
+ p->pDirty = 0;
+ memset(p->pExtra, 0, pPager->nExtra);
+ }else{
+ *ppPage = p = (PgHdr *)sqlite3MallocZero(sizeof(PgHdr) + pPager->nExtra);
+ if( p==0 ){
+ sqlite3OsUnfetch(pPager->fd, (i64)(pgno-1) * pPager->pageSize, pData);
+ return SQLITE_NOMEM;
+ }
+ p->pExtra = (void *)&p[1];
+ p->flags = PGHDR_MMAP;
+ p->nRef = 1;
+ p->pPager = pPager;
+ }
+
+ assert( p->pExtra==(void *)&p[1] );
+ assert( p->pPage==0 );
+ assert( p->flags==PGHDR_MMAP );
+ assert( p->pPager==pPager );
+ assert( p->nRef==1 );
+
+ p->pgno = pgno;
+ p->pData = pData;
+ pPager->nMmapOut++;
+
+ return SQLITE_OK;
+}
+
+/*
+** Release a reference to page pPg. pPg must have been returned by an
+** earlier call to pagerAcquireMapPage().
+*/
+static void pagerReleaseMapPage(PgHdr *pPg){
+ Pager *pPager = pPg->pPager;
+ pPager->nMmapOut--;
+ pPg->pDirty = pPager->pMmapFreelist;
+ pPager->pMmapFreelist = pPg;
+
+ assert( pPager->fd->pMethods->iVersion>=3 );
+ sqlite3OsUnfetch(pPager->fd, (i64)(pPg->pgno-1)*pPager->pageSize, pPg->pData);
+}
+
+/*
+** Free all PgHdr objects stored in the Pager.pMmapFreelist list.
+*/
+static void pagerFreeMapHdrs(Pager *pPager){
+ PgHdr *p;
+ PgHdr *pNext;
+ for(p=pPager->pMmapFreelist; p; p=pNext){
+ pNext = p->pDirty;
+ sqlite3_free(p);
+ }
+}
+
+
/*
** Shutdown the page cache. Free all memory and close all files.
**
@@ -41211,6 +42286,7 @@ SQLITE_PRIVATE int sqlite3PagerClose(Pager *pPager){
assert( assert_pager_state(pPager) );
disable_simulated_io_errors();
sqlite3BeginBenignMalloc();
+ pagerFreeMapHdrs(pPager);
/* pPager->errCode = 0; */
pPager->exclusiveMode = 0;
#ifndef SQLITE_OMIT_WAL
@@ -41472,7 +42548,9 @@ static int pager_write_pagelist(Pager *pPager, PgHdr *pList){
** file size will be.
*/
assert( rc!=SQLITE_OK || isOpen(pPager->fd) );
- if( rc==SQLITE_OK && pPager->dbSize>pPager->dbHintSize ){
+ if( rc==SQLITE_OK
+ && (pList->pDirty ? pPager->dbSize : pList->pgno+1)>pPager->dbHintSize
+ ){
sqlite3_int64 szFile = pPager->pageSize * (sqlite3_int64)pPager->dbSize;
sqlite3OsFileControlHint(pPager->fd, SQLITE_FCNTL_SIZE_HINT, &szFile);
pPager->dbHintSize = pPager->dbSize;
@@ -42026,6 +43104,7 @@ SQLITE_PRIVATE int sqlite3PagerOpen(
/* pPager->pBusyHandlerArg = 0; */
pPager->xReiniter = xReinit;
/* memset(pPager->aHash, 0, sizeof(pPager->aHash)); */
+ /* pPager->szMmap = SQLITE_DEFAULT_MMAP_SIZE // will be set by btree.c */
*ppPager = pPager;
return SQLITE_OK;
@@ -42317,9 +43396,11 @@ SQLITE_PRIVATE int sqlite3PagerSharedLock(Pager *pPager){
);
}
- if( !pPager->tempFile
- && (pPager->pBackup || sqlite3PcachePagecount(pPager->pPCache)>0)
- ){
+ if( !pPager->tempFile && (
+ pPager->pBackup
+ || sqlite3PcachePagecount(pPager->pPCache)>0
+ || USEFETCH(pPager)
+ )){
/* The shared-lock has just been acquired on the database file
** and there are already pages in the cache (from a previous
** read or write transaction). Check to see if the database
@@ -42345,7 +43426,7 @@ SQLITE_PRIVATE int sqlite3PagerSharedLock(Pager *pPager){
if( nPage>0 ){
IOTRACE(("CKVERS %p %d\n", pPager, sizeof(dbFileVers)));
rc = sqlite3OsRead(pPager->fd, &dbFileVers, sizeof(dbFileVers), 24);
- if( rc!=SQLITE_OK ){
+ if( rc!=SQLITE_OK && rc!=SQLITE_IOERR_SHORT_READ ){
goto failed;
}
}else{
@@ -42354,6 +43435,16 @@ SQLITE_PRIVATE int sqlite3PagerSharedLock(Pager *pPager){
if( memcmp(pPager->dbFileVers, dbFileVers, sizeof(dbFileVers))!=0 ){
pager_reset(pPager);
+
+ /* Unmap the database file. It is possible that external processes
+ ** may have truncated the database file and then extended it back
+ ** to its original size while this process was not holding a lock.
+ ** In this case there may exist a Pager.pMap mapping that appears
+ ** to be the right size but is not actually valid. Avoid this
+ ** possibility by unmapping the db here. */
+ if( USEFETCH(pPager) ){
+ sqlite3OsUnfetch(pPager->fd, 0, 0);
+ }
}
}
@@ -42395,7 +43486,7 @@ SQLITE_PRIVATE int sqlite3PagerSharedLock(Pager *pPager){
** nothing to rollback, so this routine is a no-op.
*/
static void pagerUnlockIfUnused(Pager *pPager){
- if( (sqlite3PcacheRefCount(pPager->pPCache)==0) ){
+ if( pPager->nMmapOut==0 && (sqlite3PcacheRefCount(pPager->pPCache)==0) ){
pagerUnlockAndRollback(pPager);
}
}
@@ -42454,13 +43545,27 @@ SQLITE_PRIVATE int sqlite3PagerAcquire(
Pager *pPager, /* The pager open on the database file */
Pgno pgno, /* Page number to fetch */
DbPage **ppPage, /* Write a pointer to the page here */
- int noContent /* Do not bother reading content from disk if true */
+ int flags /* PAGER_ACQUIRE_XXX flags */
){
- int rc;
- PgHdr *pPg;
+ int rc = SQLITE_OK;
+ PgHdr *pPg = 0;
+ u32 iFrame = 0; /* Frame to read from WAL file */
+ const int noContent = (flags & PAGER_ACQUIRE_NOCONTENT);
+
+ /* It is acceptable to use a read-only (mmap) page for any page except
+ ** page 1 if there is no write-transaction open or the ACQUIRE_READONLY
+ ** flag was specified by the caller. And so long as the db is not a
+ ** temporary or in-memory database. */
+ const int bMmapOk = (pgno!=1 && USEFETCH(pPager)
+ && (pPager->eState==PAGER_READER || (flags & PAGER_ACQUIRE_READONLY))
+#ifdef SQLITE_HAS_CODEC
+ && pPager->xCodec==0
+#endif
+ );
assert( pPager->eState>=PAGER_READER );
assert( assert_pager_state(pPager) );
+ assert( noContent==0 || bMmapOk==0 );
if( pgno==0 ){
return SQLITE_CORRUPT_BKPT;
@@ -42471,6 +43576,39 @@ SQLITE_PRIVATE int sqlite3PagerAcquire(
if( pPager->errCode!=SQLITE_OK ){
rc = pPager->errCode;
}else{
+
+ if( bMmapOk && pagerUseWal(pPager) ){
+ rc = sqlite3WalFindFrame(pPager->pWal, pgno, &iFrame);
+ if( rc!=SQLITE_OK ) goto pager_acquire_err;
+ }
+
+ if( iFrame==0 && bMmapOk ){
+ void *pData = 0;
+
+ rc = sqlite3OsFetch(pPager->fd,
+ (i64)(pgno-1) * pPager->pageSize, pPager->pageSize, &pData
+ );
+
+ if( rc==SQLITE_OK && pData ){
+ if( pPager->eState>PAGER_READER ){
+ (void)sqlite3PcacheFetch(pPager->pPCache, pgno, 0, &pPg);
+ }
+ if( pPg==0 ){
+ rc = pagerAcquireMapPage(pPager, pgno, pData, &pPg);
+ }else{
+ sqlite3OsUnfetch(pPager->fd, (i64)(pgno-1)*pPager->pageSize, pData);
+ }
+ if( pPg ){
+ assert( rc==SQLITE_OK );
+ *ppPage = pPg;
+ return SQLITE_OK;
+ }
+ }
+ if( rc!=SQLITE_OK ){
+ goto pager_acquire_err;
+ }
+ }
+
rc = sqlite3PcacheFetch(pPager->pPCache, pgno, 1, ppPage);
}
@@ -42529,9 +43667,13 @@ SQLITE_PRIVATE int sqlite3PagerAcquire(
memset(pPg->pData, 0, pPager->pageSize);
IOTRACE(("ZERO %p %d\n", pPager, pgno));
}else{
+ if( pagerUseWal(pPager) && bMmapOk==0 ){
+ rc = sqlite3WalFindFrame(pPager->pWal, pgno, &iFrame);
+ if( rc!=SQLITE_OK ) goto pager_acquire_err;
+ }
assert( pPg->pPager==pPager );
pPager->aStat[PAGER_STAT_MISS]++;
- rc = readDbPage(pPg);
+ rc = readDbPage(pPg, iFrame);
if( rc!=SQLITE_OK ){
goto pager_acquire_err;
}
@@ -42584,7 +43726,11 @@ SQLITE_PRIVATE DbPage *sqlite3PagerLookup(Pager *pPager, Pgno pgno){
SQLITE_PRIVATE void sqlite3PagerUnref(DbPage *pPg){
if( pPg ){
Pager *pPager = pPg->pPager;
- sqlite3PcacheRelease(pPg);
+ if( pPg->flags & PGHDR_MMAP ){
+ pagerReleaseMapPage(pPg);
+ }else{
+ sqlite3PcacheRelease(pPg);
+ }
pagerUnlockIfUnused(pPager);
}
}
@@ -42919,6 +44065,7 @@ SQLITE_PRIVATE int sqlite3PagerWrite(DbPage *pDbPage){
Pager *pPager = pPg->pPager;
Pgno nPagePerSector = (pPager->sectorSize/pPager->pageSize);
+ assert( (pPg->flags & PGHDR_MMAP)==0 );
assert( pPager->eState>=PAGER_WRITER_LOCKED );
assert( pPager->eState!=PAGER_ERROR );
assert( assert_pager_state(pPager) );
@@ -43118,6 +44265,11 @@ static int pager_incr_changecounter(Pager *pPager, int isDirectMode){
pPager->aStat[PAGER_STAT_WRITE]++;
}
if( rc==SQLITE_OK ){
+ /* Update the pager's copy of the change-counter. Otherwise, the
+ ** next time a read transaction is opened the cache will be
+ ** flushed (as the change-counter values will not match). */
+ const void *pCopy = (const void *)&((const char *)zBuf)[24];
+ memcpy(&pPager->dbFileVers, pCopy, sizeof(pPager->dbFileVers));
pPager->changeCountDone = 1;
}
}else{
@@ -43475,7 +44627,7 @@ SQLITE_PRIVATE int sqlite3PagerRollback(Pager *pPager){
}
assert( pPager->eState==PAGER_READER || rc!=SQLITE_OK );
- assert( rc==SQLITE_OK || rc==SQLITE_FULL
+ assert( rc==SQLITE_OK || rc==SQLITE_FULL || rc==SQLITE_CORRUPT
|| rc==SQLITE_NOMEM || (rc&0xFF)==SQLITE_IOERR );
/* If an error occurs during a ROLLBACK, we can no longer trust the pager
@@ -44209,11 +45361,12 @@ static int pagerOpenWal(Pager *pPager){
** (e.g. due to malloc() failure), return an error code.
*/
if( rc==SQLITE_OK ){
- rc = sqlite3WalOpen(pPager->pVfs,
+ rc = sqlite3WalOpen(pPager->pVfs,
pPager->fd, pPager->zWal, pPager->exclusiveMode,
pPager->journalSizeLimit, &pPager->pWal
);
}
+ pagerFixMaplimit(pPager);
return rc;
}
@@ -44304,6 +45457,7 @@ SQLITE_PRIVATE int sqlite3PagerCloseWal(Pager *pPager){
rc = sqlite3WalClose(pPager->pWal, pPager->ckptSyncFlags,
pPager->pageSize, (u8*)pPager->pTmpSpace);
pPager->pWal = 0;
+ pagerFixMaplimit(pPager);
}
}
return rc;
@@ -45552,8 +46706,9 @@ finished:
** checkpointing the log file.
*/
if( pWal->hdr.nPage ){
- sqlite3_log(SQLITE_OK, "Recovered %d frames from WAL file %s",
- pWal->hdr.nPage, pWal->zWalName
+ sqlite3_log(SQLITE_NOTICE_RECOVER_WAL,
+ "recovered %d frames from WAL file %s",
+ pWal->hdr.mxFrame, pWal->zWalName
);
}
}
@@ -46067,8 +47222,8 @@ static int walCheckpoint(
rc = sqlite3OsSync(pWal->pWalFd, sync_flags);
}
- /* If the database file may grow as a result of this checkpoint, hint
- ** about the eventual size of the db file to the VFS layer.
+ /* If the database may grow as a result of this checkpoint, hint
+ ** about the eventual size of the db file to the VFS layer.
*/
if( rc==SQLITE_OK ){
i64 nReq = ((i64)mxPage * szPage);
@@ -46078,6 +47233,7 @@ static int walCheckpoint(
}
}
+
/* Iterate through the contents of the WAL, copying data to the db file. */
while( rc==SQLITE_OK && 0==walIteratorNext(pIter, &iDbpage, &iFrame) ){
i64 iOffset;
@@ -46632,19 +47788,17 @@ SQLITE_PRIVATE void sqlite3WalEndReadTransaction(Wal *pWal){
}
/*
-** Read a page from the WAL, if it is present in the WAL and if the
-** current read transaction is configured to use the WAL.
+** Search the wal file for page pgno. If found, set *piRead to the frame that
+** contains the page. Otherwise, if pgno is not in the wal file, set *piRead
+** to zero.
**
-** The *pInWal is set to 1 if the requested page is in the WAL and
-** has been loaded. Or *pInWal is set to 0 if the page was not in
-** the WAL and needs to be read out of the database.
+** Return SQLITE_OK if successful, or an error code if an error occurs. If an
+** error does occur, the final value of *piRead is undefined.
*/
-SQLITE_PRIVATE int sqlite3WalRead(
+SQLITE_PRIVATE int sqlite3WalFindFrame(
Wal *pWal, /* WAL handle */
Pgno pgno, /* Database page number to read data for */
- int *pInWal, /* OUT: True if data is read from WAL */
- int nOut, /* Size of buffer pOut in bytes */
- u8 *pOut /* Buffer to write page data to */
+ u32 *piRead /* OUT: Frame number (or zero) */
){
u32 iRead = 0; /* If !=0, WAL frame to return data from */
u32 iLast = pWal->hdr.mxFrame; /* Last page in WAL for this reader */
@@ -46660,7 +47814,7 @@ SQLITE_PRIVATE int sqlite3WalRead(
** WAL were empty.
*/
if( iLast==0 || pWal->readLock==0 ){
- *pInWal = 0;
+ *piRead = 0;
return SQLITE_OK;
}
@@ -46731,26 +47885,31 @@ SQLITE_PRIVATE int sqlite3WalRead(
}
#endif
- /* If iRead is non-zero, then it is the log frame number that contains the
- ** required page. Read and return data from the log file.
- */
- if( iRead ){
- int sz;
- i64 iOffset;
- sz = pWal->hdr.szPage;
- sz = (sz&0xfe00) + ((sz&0x0001)<<16);
- testcase( sz<=32768 );
- testcase( sz>=65536 );
- iOffset = walFrameOffset(iRead, sz) + WAL_FRAME_HDRSIZE;
- *pInWal = 1;
- /* testcase( IS_BIG_INT(iOffset) ); // requires a 4GiB WAL */
- return sqlite3OsRead(pWal->pWalFd, pOut, (nOut>sz ? sz : nOut), iOffset);
- }
-
- *pInWal = 0;
+ *piRead = iRead;
return SQLITE_OK;
}
+/*
+** Read the contents of frame iRead from the wal file into buffer pOut
+** (which is nOut bytes in size). Return SQLITE_OK if successful, or an
+** error code otherwise.
+*/
+SQLITE_PRIVATE int sqlite3WalReadFrame(
+ Wal *pWal, /* WAL handle */
+ u32 iRead, /* Frame to read */
+ int nOut, /* Size of buffer pOut in bytes */
+ u8 *pOut /* Buffer to write page data to */
+){
+ int sz;
+ i64 iOffset;
+ sz = pWal->hdr.szPage;
+ sz = (sz&0xfe00) + ((sz&0x0001)<<16);
+ testcase( sz<=32768 );
+ testcase( sz>=65536 );
+ iOffset = walFrameOffset(iRead, sz) + WAL_FRAME_HDRSIZE;
+ /* testcase( IS_BIG_INT(iOffset) ); // requires a 4GiB WAL */
+ return sqlite3OsRead(pWal->pWalFd, pOut, (nOut>sz ? sz : nOut), iOffset);
+}
/*
** Return the size of the database in pages (or zero, if unknown).
@@ -47297,6 +48456,9 @@ SQLITE_PRIVATE int sqlite3WalCheckpoint(
/* Read the wal-index header. */
if( rc==SQLITE_OK ){
rc = walIndexReadHdr(pWal, &isChanged);
+ if( isChanged && pWal->pDbFd->pMethods->iVersion>=3 ){
+ sqlite3OsUnfetch(pWal->pDbFd, 0, 0);
+ }
}
/* Copy data from the log to the database file. */
@@ -49968,13 +51130,17 @@ static int btreeGetPage(
BtShared *pBt, /* The btree */
Pgno pgno, /* Number of the page to fetch */
MemPage **ppPage, /* Return the page in this parameter */
- int noContent /* Do not load page content if true */
+ int noContent, /* Do not load page content if true */
+ int bReadonly /* True if a read-only (mmap) page is ok */
){
int rc;
DbPage *pDbPage;
+ int flags = (noContent ? PAGER_ACQUIRE_NOCONTENT : 0)
+ | (bReadonly ? PAGER_ACQUIRE_READONLY : 0);
+ assert( noContent==0 || bReadonly==0 );
assert( sqlite3_mutex_held(pBt->mutex) );
- rc = sqlite3PagerAcquire(pBt->pPager, pgno, (DbPage**)&pDbPage, noContent);
+ rc = sqlite3PagerAcquire(pBt->pPager, pgno, (DbPage**)&pDbPage, flags);
if( rc ) return rc;
*ppPage = btreePageFromDbPage(pDbPage, pgno, pBt);
return SQLITE_OK;
@@ -50017,9 +51183,10 @@ SQLITE_PRIVATE u32 sqlite3BtreeLastPage(Btree *p){
** may remain unchanged, or it may be set to an invalid value.
*/
static int getAndInitPage(
- BtShared *pBt, /* The database file */
- Pgno pgno, /* Number of the page to get */
- MemPage **ppPage /* Write the page pointer here */
+ BtShared *pBt, /* The database file */
+ Pgno pgno, /* Number of the page to get */
+ MemPage **ppPage, /* Write the page pointer here */
+ int bReadonly /* True if a read-only (mmap) page is ok */
){
int rc;
assert( sqlite3_mutex_held(pBt->mutex) );
@@ -50027,7 +51194,7 @@ static int getAndInitPage(
if( pgno>btreePagecount(pBt) ){
rc = SQLITE_CORRUPT_BKPT;
}else{
- rc = btreeGetPage(pBt, pgno, ppPage, 0);
+ rc = btreeGetPage(pBt, pgno, ppPage, 0, bReadonly);
if( rc==SQLITE_OK ){
rc = btreeInitPage(*ppPage);
if( rc!=SQLITE_OK ){
@@ -50258,6 +51425,7 @@ SQLITE_PRIVATE int sqlite3BtreeOpen(
rc = sqlite3PagerOpen(pVfs, &pBt->pPager, zFilename,
EXTRA_SIZE, flags, vfsFlags, pageReinit);
if( rc==SQLITE_OK ){
+ sqlite3PagerSetMmapLimit(pBt->pPager, db->szMmap);
rc = sqlite3PagerReadFileheader(pBt->pPager,sizeof(zDbHeader),zDbHeader);
}
if( rc!=SQLITE_OK ){
@@ -50524,6 +51692,19 @@ SQLITE_PRIVATE int sqlite3BtreeSetCacheSize(Btree *p, int mxPage){
return SQLITE_OK;
}
+/*
+** Change the limit on the amount of the database file that may be
+** memory mapped.
+*/
+SQLITE_PRIVATE int sqlite3BtreeSetMmapLimit(Btree *p, sqlite3_int64 szMmap){
+ BtShared *pBt = p->pBt;
+ assert( sqlite3_mutex_held(p->db->mutex) );
+ sqlite3BtreeEnter(p);
+ sqlite3PagerSetMmapLimit(pBt->pPager, szMmap);
+ sqlite3BtreeLeave(p);
+ return SQLITE_OK;
+}
+
/*
** Change the way data is synced to disk in order to increase or decrease
** how well the database resists damage due to OS crashes and power
@@ -50749,7 +51930,7 @@ static int lockBtree(BtShared *pBt){
assert( pBt->pPage1==0 );
rc = sqlite3PagerSharedLock(pBt->pPager);
if( rc!=SQLITE_OK ) return rc;
- rc = btreeGetPage(pBt, 1, &pPage1, 0);
+ rc = btreeGetPage(pBt, 1, &pPage1, 0, 0);
if( rc!=SQLITE_OK ) return rc;
/* Do some checking to help insure the file we opened really is
@@ -50885,6 +52066,29 @@ page1_init_failed:
return rc;
}
+#ifndef NDEBUG
+/*
+** Return the number of cursors open on pBt. This is for use
+** in assert() expressions, so it is only compiled if NDEBUG is not
+** defined.
+**
+** Only write cursors are counted if wrOnly is true. If wrOnly is
+** false then all cursors are counted.
+**
+** For the purposes of this routine, a cursor is any cursor that
+** is capable of reading or writing to the databse. Cursors that
+** have been tripped into the CURSOR_FAULT state are not counted.
+*/
+static int countValidCursors(BtShared *pBt, int wrOnly){
+ BtCursor *pCur;
+ int r = 0;
+ for(pCur=pBt->pCursor; pCur; pCur=pCur->pNext){
+ if( (wrOnly==0 || pCur->wrFlag) && pCur->eState!=CURSOR_FAULT ) r++;
+ }
+ return r;
+}
+#endif
+
/*
** If there are no outstanding cursors and we are not in the middle
** of a transaction but there is a read lock on the database, then
@@ -50895,7 +52099,7 @@ page1_init_failed:
*/
static void unlockBtreeIfUnused(BtShared *pBt){
assert( sqlite3_mutex_held(pBt->mutex) );
- assert( pBt->pCursor==0 || pBt->inTransaction>TRANS_NONE );
+ assert( countValidCursors(pBt,0)==0 || pBt->inTransaction>TRANS_NONE );
if( pBt->inTransaction==TRANS_NONE && pBt->pPage1!=0 ){
assert( pBt->pPage1->aData );
assert( sqlite3PagerRefcount(pBt->pPager)==1 );
@@ -51308,7 +52512,7 @@ static int relocatePage(
** iPtrPage.
*/
if( eType!=PTRMAP_ROOTPAGE ){
- rc = btreeGetPage(pBt, iPtrPage, &pPtrPage, 0);
+ rc = btreeGetPage(pBt, iPtrPage, &pPtrPage, 0, 0);
if( rc!=SQLITE_OK ){
return rc;
}
@@ -51392,7 +52596,7 @@ static int incrVacuumStep(BtShared *pBt, Pgno nFin, Pgno iLastPg, int bCommit){
u8 eMode = BTALLOC_ANY; /* Mode parameter for allocateBtreePage() */
Pgno iNear = 0; /* nearby parameter for allocateBtreePage() */
- rc = btreeGetPage(pBt, iLastPg, &pLastPg, 0);
+ rc = btreeGetPage(pBt, iLastPg, &pLastPg, 0, 0);
if( rc!=SQLITE_OK ){
return rc;
}
@@ -51484,8 +52688,11 @@ SQLITE_PRIVATE int sqlite3BtreeIncrVacuum(Btree *p){
if( nOrig0 ){
- invalidateAllOverflowCache(pBt);
- rc = incrVacuumStep(pBt, nFin, nOrig, 0);
+ rc = saveAllCursors(pBt, 0, 0);
+ if( rc==SQLITE_OK ){
+ invalidateAllOverflowCache(pBt);
+ rc = incrVacuumStep(pBt, nFin, nOrig, 0);
+ }
if( rc==SQLITE_OK ){
rc = sqlite3PagerWrite(pBt->pPage1->pDbPage);
put4byte(&pBt->pPage1->aData[28], pBt->nPage);
@@ -51533,7 +52740,9 @@ static int autoVacuumCommit(BtShared *pBt){
nFree = get4byte(&pBt->pPage1->aData[36]);
nFin = finalDbSize(pBt, nOrig, nFree);
if( nFin>nOrig ) return SQLITE_CORRUPT_BKPT;
-
+ if( nFinnFin && rc==SQLITE_OK; iFree--){
rc = incrVacuumStep(pBt, nFin, iFree, 1);
}
@@ -51550,7 +52759,7 @@ static int autoVacuumCommit(BtShared *pBt){
}
}
- assert( nRef==sqlite3PagerRefcount(pPager) );
+ assert( nRef>=sqlite3PagerRefcount(pPager) );
return rc;
}
@@ -51618,7 +52827,6 @@ static void btreeEndTransaction(Btree *p){
#ifndef SQLITE_OMIT_AUTOVACUUM
pBt->bDoTruncate = 0;
#endif
- btreeClearHasContent(pBt);
if( p->inTrans>TRANS_NONE && p->db->activeVdbeCnt>1 ){
/* If there are other active statements that belong to this database
** handle, downgrade to a read-only transaction. The other statements
@@ -51693,6 +52901,7 @@ SQLITE_PRIVATE int sqlite3BtreeCommitPhaseTwo(Btree *p, int bCleanup){
return rc;
}
pBt->inTransaction = TRANS_READ;
+ btreeClearHasContent(pBt);
}
btreeEndTransaction(p);
@@ -51714,27 +52923,6 @@ SQLITE_PRIVATE int sqlite3BtreeCommit(Btree *p){
return rc;
}
-#ifndef NDEBUG
-/*
-** Return the number of write-cursors open on this handle. This is for use
-** in assert() expressions, so it is only compiled if NDEBUG is not
-** defined.
-**
-** For the purposes of this routine, a write-cursor is any cursor that
-** is capable of writing to the databse. That means the cursor was
-** originally opened for writing and the cursor has not be disabled
-** by having its state changed to CURSOR_FAULT.
-*/
-static int countWriteCursors(BtShared *pBt){
- BtCursor *pCur;
- int r = 0;
- for(pCur=pBt->pCursor; pCur; pCur=pCur->pNext){
- if( pCur->wrFlag && pCur->eState!=CURSOR_FAULT ) r++;
- }
- return r;
-}
-#endif
-
/*
** This routine sets the state to CURSOR_FAULT and the error
** code to errCode for every cursor on BtShared that pBtree
@@ -51806,7 +52994,7 @@ SQLITE_PRIVATE int sqlite3BtreeRollback(Btree *p, int tripCode){
/* The rollback may have destroyed the pPage1->aData value. So
** call btreeGetPage() on page 1 again to make
** sure pPage1->aData is set correctly. */
- if( btreeGetPage(pBt, 1, &pPage1, 0)==SQLITE_OK ){
+ if( btreeGetPage(pBt, 1, &pPage1, 0, 0)==SQLITE_OK ){
int nPage = get4byte(28+(u8*)pPage1->aData);
testcase( nPage==0 );
if( nPage==0 ) sqlite3PagerPagecount(pBt->pPager, &nPage);
@@ -51814,8 +53002,9 @@ SQLITE_PRIVATE int sqlite3BtreeRollback(Btree *p, int tripCode){
pBt->nPage = nPage;
releasePage(pPage1);
}
- assert( countWriteCursors(pBt)==0 );
+ assert( countValidCursors(pBt, 1)==0 );
pBt->inTransaction = TRANS_READ;
+ btreeClearHasContent(pBt);
}
btreeEndTransaction(p);
@@ -52240,7 +53429,7 @@ static int getOverflowPage(
assert( next==0 || rc==SQLITE_DONE );
if( rc==SQLITE_OK ){
- rc = btreeGetPage(pBt, ovfl, &pPage, 0);
+ rc = btreeGetPage(pBt, ovfl, &pPage, 0, (ppPage==0));
assert( rc==SQLITE_OK || pPage==0 );
if( rc==SQLITE_OK ){
next = get4byte(pPage->aData);
@@ -52461,7 +53650,9 @@ static int accessPayload(
{
DbPage *pDbPage;
- rc = sqlite3PagerGet(pBt->pPager, nextPage, &pDbPage);
+ rc = sqlite3PagerAcquire(pBt->pPager, nextPage, &pDbPage,
+ (eOp==0 ? PAGER_ACQUIRE_READONLY : 0)
+ );
if( rc==SQLITE_OK ){
aPayload = sqlite3PagerGetData(pDbPage);
nextPage = get4byte(aPayload);
@@ -52640,10 +53831,11 @@ static int moveToChild(BtCursor *pCur, u32 newPgno){
assert( cursorHoldsMutex(pCur) );
assert( pCur->eState==CURSOR_VALID );
assert( pCur->iPageiPage>=0 );
if( pCur->iPage>=(BTCURSOR_MAX_DEPTH-1) ){
return SQLITE_CORRUPT_BKPT;
}
- rc = getAndInitPage(pBt, newPgno, &pNewPage);
+ rc = getAndInitPage(pBt, newPgno, &pNewPage, (pCur->wrFlag==0));
if( rc ) return rc;
pCur->apPage[i+1] = pNewPage;
pCur->aiIdx[i+1] = 0;
@@ -52760,7 +53952,7 @@ static int moveToRoot(BtCursor *pCur){
pCur->eState = CURSOR_INVALID;
return SQLITE_OK;
}else{
- rc = getAndInitPage(pBt, pCur->pgnoRoot, &pCur->apPage[0]);
+ rc = getAndInitPage(pBt, pCur->pgnoRoot, &pCur->apPage[0], pCur->wrFlag==0);
if( rc!=SQLITE_OK ){
pCur->eState = CURSOR_INVALID;
return rc;
@@ -53374,7 +54566,7 @@ static int allocateBtreePage(
if( iTrunk>mxPage ){
rc = SQLITE_CORRUPT_BKPT;
}else{
- rc = btreeGetPage(pBt, iTrunk, &pTrunk, 0);
+ rc = btreeGetPage(pBt, iTrunk, &pTrunk, 0, 0);
}
if( rc ){
pTrunk = 0;
@@ -53438,7 +54630,7 @@ static int allocateBtreePage(
goto end_allocate_page;
}
testcase( iNewTrunk==mxPage );
- rc = btreeGetPage(pBt, iNewTrunk, &pNewTrunk, 0);
+ rc = btreeGetPage(pBt, iNewTrunk, &pNewTrunk, 0, 0);
if( rc!=SQLITE_OK ){
goto end_allocate_page;
}
@@ -53518,7 +54710,7 @@ static int allocateBtreePage(
}
put4byte(&aData[4], k-1);
noContent = !btreeGetHasContent(pBt, *pPgno);
- rc = btreeGetPage(pBt, *pPgno, ppPage, noContent);
+ rc = btreeGetPage(pBt, *pPgno, ppPage, noContent, 0);
if( rc==SQLITE_OK ){
rc = sqlite3PagerWrite((*ppPage)->pDbPage);
if( rc!=SQLITE_OK ){
@@ -53566,7 +54758,7 @@ static int allocateBtreePage(
MemPage *pPg = 0;
TRACE(("ALLOCATE: %d from end of file (pointer-map page)\n", pBt->nPage));
assert( pBt->nPage!=PENDING_BYTE_PAGE(pBt) );
- rc = btreeGetPage(pBt, pBt->nPage, &pPg, bNoContent);
+ rc = btreeGetPage(pBt, pBt->nPage, &pPg, bNoContent, 0);
if( rc==SQLITE_OK ){
rc = sqlite3PagerWrite(pPg->pDbPage);
releasePage(pPg);
@@ -53580,7 +54772,7 @@ static int allocateBtreePage(
*pPgno = pBt->nPage;
assert( *pPgno!=PENDING_BYTE_PAGE(pBt) );
- rc = btreeGetPage(pBt, *pPgno, ppPage, bNoContent);
+ rc = btreeGetPage(pBt, *pPgno, ppPage, bNoContent, 0);
if( rc ) return rc;
rc = sqlite3PagerWrite((*ppPage)->pDbPage);
if( rc!=SQLITE_OK ){
@@ -53648,7 +54840,7 @@ static int freePage2(BtShared *pBt, MemPage *pMemPage, Pgno iPage){
/* If the secure_delete option is enabled, then
** always fully overwrite deleted information with zeros.
*/
- if( (!pPage && ((rc = btreeGetPage(pBt, iPage, &pPage, 0))!=0) )
+ if( (!pPage && ((rc = btreeGetPage(pBt, iPage, &pPage, 0, 0))!=0) )
|| ((rc = sqlite3PagerWrite(pPage->pDbPage))!=0)
){
goto freepage_out;
@@ -53675,7 +54867,7 @@ static int freePage2(BtShared *pBt, MemPage *pMemPage, Pgno iPage){
u32 nLeaf; /* Initial number of leaf cells on trunk page */
iTrunk = get4byte(&pPage1->aData[32]);
- rc = btreeGetPage(pBt, iTrunk, &pTrunk, 0);
+ rc = btreeGetPage(pBt, iTrunk, &pTrunk, 0, 0);
if( rc!=SQLITE_OK ){
goto freepage_out;
}
@@ -53721,7 +54913,7 @@ static int freePage2(BtShared *pBt, MemPage *pMemPage, Pgno iPage){
** first trunk in the free-list is full. Either way, the page being freed
** will become the new first trunk page in the free-list.
*/
- if( pPage==0 && SQLITE_OK!=(rc = btreeGetPage(pBt, iPage, &pPage, 0)) ){
+ if( pPage==0 && SQLITE_OK!=(rc = btreeGetPage(pBt, iPage, &pPage, 0, 0)) ){
goto freepage_out;
}
rc = sqlite3PagerWrite(pPage->pDbPage);
@@ -54522,7 +55714,7 @@ static int balance_nonroot(
}
pgno = get4byte(pRight);
while( 1 ){
- rc = getAndInitPage(pBt, pgno, &apOld[i]);
+ rc = getAndInitPage(pBt, pgno, &apOld[i], 0);
if( rc ){
memset(apOld, 0, (i+1)*sizeof(MemPage*));
goto balance_cleanup;
@@ -55610,10 +56802,17 @@ static int btreeCreateTable(Btree *p, int *piTable, int createTabFlags){
u8 eType = 0;
Pgno iPtrPage = 0;
+ /* Save the positions of any open cursors. This is required in
+ ** case they are holding a reference to an xFetch reference
+ ** corresponding to page pgnoRoot. */
+ rc = saveAllCursors(pBt, 0, 0);
releasePage(pPageMove);
+ if( rc!=SQLITE_OK ){
+ return rc;
+ }
/* Move the page currently at pgnoRoot to pgnoMove. */
- rc = btreeGetPage(pBt, pgnoRoot, &pRoot, 0);
+ rc = btreeGetPage(pBt, pgnoRoot, &pRoot, 0, 0);
if( rc!=SQLITE_OK ){
return rc;
}
@@ -55634,7 +56833,7 @@ static int btreeCreateTable(Btree *p, int *piTable, int createTabFlags){
if( rc!=SQLITE_OK ){
return rc;
}
- rc = btreeGetPage(pBt, pgnoRoot, &pRoot, 0);
+ rc = btreeGetPage(pBt, pgnoRoot, &pRoot, 0, 0);
if( rc!=SQLITE_OK ){
return rc;
}
@@ -55710,7 +56909,7 @@ static int clearDatabasePage(
return SQLITE_CORRUPT_BKPT;
}
- rc = getAndInitPage(pBt, pgno, &pPage);
+ rc = getAndInitPage(pBt, pgno, &pPage, 0);
if( rc ) return rc;
for(i=0; inCell; i++){
pCell = findCell(pPage, i);
@@ -55812,7 +57011,7 @@ static int btreeDropTable(Btree *p, Pgno iTable, int *piMoved){
return SQLITE_LOCKED_SHAREDCACHE;
}
- rc = btreeGetPage(pBt, (Pgno)iTable, &pPage, 0);
+ rc = btreeGetPage(pBt, (Pgno)iTable, &pPage, 0, 0);
if( rc ) return rc;
rc = sqlite3BtreeClearTable(p, iTable, 0);
if( rc ){
@@ -55847,7 +57046,7 @@ static int btreeDropTable(Btree *p, Pgno iTable, int *piMoved){
*/
MemPage *pMove;
releasePage(pPage);
- rc = btreeGetPage(pBt, maxRootPgno, &pMove, 0);
+ rc = btreeGetPage(pBt, maxRootPgno, &pMove, 0, 0);
if( rc!=SQLITE_OK ){
return rc;
}
@@ -55857,7 +57056,7 @@ static int btreeDropTable(Btree *p, Pgno iTable, int *piMoved){
return rc;
}
pMove = 0;
- rc = btreeGetPage(pBt, maxRootPgno, &pMove, 0);
+ rc = btreeGetPage(pBt, maxRootPgno, &pMove, 0, 0);
freePage(pMove, &rc);
releasePage(pMove);
if( rc!=SQLITE_OK ){
@@ -56269,7 +57468,7 @@ static int checkTreePage(
usableSize = pBt->usableSize;
if( iPage==0 ) return 0;
if( checkRef(pCheck, iPage, zParentContext) ) return 0;
- if( (rc = btreeGetPage(pBt, (Pgno)iPage, &pPage, 0))!=0 ){
+ if( (rc = btreeGetPage(pBt, (Pgno)iPage, &pPage, 0, 0))!=0 ){
checkAppendMsg(pCheck, zContext,
"unable to get the page. error code=%d", rc);
return 0;
@@ -56741,6 +57940,17 @@ SQLITE_PRIVATE int sqlite3BtreePutData(BtCursor *pCsr, u32 offset, u32 amt, void
return SQLITE_ABORT;
}
+ /* Save the positions of all other cursors open on this table. This is
+ ** required in case any of them are holding references to an xFetch
+ ** version of the b-tree page modified by the accessPayload call below.
+ **
+ ** Note that pCsr must be open on a BTREE_INTKEY table and saveCursorPosition()
+ ** and hence saveAllCursors() cannot fail on a BTREE_INTKEY table, hence
+ ** saveAllCursors can only return SQLITE_OK.
+ */
+ VVA_ONLY(rc =) saveAllCursors(pCsr->pBt, pCsr->pgnoRoot, pCsr);
+ assert( rc==SQLITE_OK );
+
/* Check some assumptions:
** (a) the cursor is open for writing,
** (b) there is a read/write transaction open,
@@ -57222,7 +58432,8 @@ SQLITE_API int sqlite3_backup_step(sqlite3_backup *p, int nPage){
const Pgno iSrcPg = p->iNext; /* Source page number */
if( iSrcPg!=PENDING_BYTE_PAGE(p->pSrc->pBt) ){
DbPage *pSrcPg; /* Source page object */
- rc = sqlite3PagerGet(pSrcPager, iSrcPg, &pSrcPg);
+ rc = sqlite3PagerAcquire(pSrcPager, iSrcPg, &pSrcPg,
+ PAGER_ACQUIRE_READONLY);
if( rc==SQLITE_OK ){
rc = backupOnePage(p, iSrcPg, sqlite3PagerGetData(pSrcPg), 0);
sqlite3PagerUnref(pSrcPg);
@@ -62445,14 +63656,6 @@ end_of_step:
return (rc&db->errMask);
}
-/*
-** The maximum number of times that a statement will try to reparse
-** itself before giving up and returning SQLITE_SCHEMA.
-*/
-#ifndef SQLITE_MAX_SCHEMA_RETRY
-# define SQLITE_MAX_SCHEMA_RETRY 5
-#endif
-
/*
** This is the top-level implementation of sqlite3_step(). Call
** sqlite3Step() to do most of the work. If a schema error occurs,
@@ -63356,6 +64559,11 @@ static int findNextHostParameter(const char *zSql, int *pnToken){
** then the returned string holds a copy of zRawSql with "-- " prepended
** to each line of text.
**
+** If the SQLITE_TRACE_SIZE_LIMIT macro is defined to an integer, then
+** then long strings and blobs are truncated to that many bytes. This
+** can be used to prevent unreasonably large trace strings when dealing
+** with large (multi-megabyte) strings and blobs.
+**
** The calling function is responsible for making sure the memory returned
** is eventually freed.
**
@@ -63426,30 +64634,49 @@ SQLITE_PRIVATE char *sqlite3VdbeExpandSql(
}else if( pVar->flags & MEM_Real ){
sqlite3XPrintf(&out, "%!.15g", pVar->r);
}else if( pVar->flags & MEM_Str ){
+ int nOut; /* Number of bytes of the string text to include in output */
#ifndef SQLITE_OMIT_UTF16
u8 enc = ENC(db);
+ Mem utf8;
if( enc!=SQLITE_UTF8 ){
- Mem utf8;
memset(&utf8, 0, sizeof(utf8));
utf8.db = db;
sqlite3VdbeMemSetStr(&utf8, pVar->z, pVar->n, enc, SQLITE_STATIC);
sqlite3VdbeChangeEncoding(&utf8, SQLITE_UTF8);
- sqlite3XPrintf(&out, "'%.*q'", utf8.n, utf8.z);
- sqlite3VdbeMemRelease(&utf8);
- }else
-#endif
- {
- sqlite3XPrintf(&out, "'%.*q'", pVar->n, pVar->z);
+ pVar = &utf8;
}
+#endif
+ nOut = pVar->n;
+#ifdef SQLITE_TRACE_SIZE_LIMIT
+ if( nOut>SQLITE_TRACE_SIZE_LIMIT ){
+ nOut = SQLITE_TRACE_SIZE_LIMIT;
+ while( nOutn && (pVar->z[nOut]&0xc0)==0x80 ){ nOut++; }
+ }
+#endif
+ sqlite3XPrintf(&out, "'%.*q'", nOut, pVar->z);
+#ifdef SQLITE_TRACE_SIZE_LIMIT
+ if( nOutn ) sqlite3XPrintf(&out, "/*+%d bytes*/", pVar->n-nOut);
+#endif
+#ifndef SQLITE_OMIT_UTF16
+ if( enc!=SQLITE_UTF8 ) sqlite3VdbeMemRelease(&utf8);
+#endif
}else if( pVar->flags & MEM_Zero ){
sqlite3XPrintf(&out, "zeroblob(%d)", pVar->u.nZero);
}else{
+ int nOut; /* Number of bytes of the blob to include in output */
assert( pVar->flags & MEM_Blob );
sqlite3StrAccumAppend(&out, "x'", 2);
- for(i=0; in; i++){
+ nOut = pVar->n;
+#ifdef SQLITE_TRACE_SIZE_LIMIT
+ if( nOut>SQLITE_TRACE_SIZE_LIMIT ) nOut = SQLITE_TRACE_SIZE_LIMIT;
+#endif
+ for(i=0; iz[i]&0xff);
}
sqlite3StrAccumAppend(&out, "'", 1);
+#ifdef SQLITE_TRACE_SIZE_LIMIT
+ if( nOutn ) sqlite3XPrintf(&out, "/*+%d bytes*/", pVar->n-nOut);
+#endif
}
}
}
@@ -67666,7 +68893,7 @@ case OP_SeekGt: { /* jump, in3 */
** u.bc.r.flags = 0;
** }
*/
- u.bc.r.flags = (u16)(UNPACKED_INCRKEY * (1 & (u.bc.oc - OP_SeekLt)));
+ u.bc.r.flags = (u8)(UNPACKED_INCRKEY * (1 & (u.bc.oc - OP_SeekLt)));
assert( u.bc.oc!=OP_SeekGt || u.bc.r.flags==UNPACKED_INCRKEY );
assert( u.bc.oc!=OP_SeekLe || u.bc.r.flags==UNPACKED_INCRKEY );
assert( u.bc.oc!=OP_SeekGe || u.bc.r.flags==0 );
@@ -70791,7 +72018,7 @@ SQLITE_API int sqlite3_blob_open(
}
sqlite3_bind_int64(pBlob->pStmt, 1, iRow);
rc = blobSeekToRow(pBlob, iRow, &zErr);
- } while( (++nAttempt)<5 && rc==SQLITE_SCHEMA );
+ } while( (++nAttempt)mallocFailed==0 ){
@@ -72476,7 +73703,9 @@ static const struct sqlite3_io_methods MemJournalMethods = {
0, /* xShmMap */
0, /* xShmLock */
0, /* xShmBarrier */
- 0 /* xShmUnlock */
+ 0, /* xShmUnmap */
+ 0, /* xFetch */
+ 0 /* xUnfetch */
};
/*
@@ -72620,7 +73849,9 @@ SQLITE_PRIVATE int sqlite3WalkSelectFrom(Walker *pWalker, Select *p){
/*
** Call sqlite3WalkExpr() for every expression in Select statement p.
** Invoke sqlite3WalkSelect() for subqueries in the FROM clause and
-** on the compound select chain, p->pPrior.
+** on the compound select chain, p->pPrior. Invoke the xSelectCallback()
+** either before or after the walk of expressions and FROM clause, depending
+** on whether pWalker->bSelectDepthFirst is false or true, respectively.
**
** Return WRC_Continue under normal conditions. Return WRC_Abort if
** there is an abort request.
@@ -72634,14 +73865,23 @@ SQLITE_PRIVATE int sqlite3WalkSelect(Walker *pWalker, Select *p){
rc = WRC_Continue;
pWalker->walkerDepth++;
while( p ){
- rc = pWalker->xSelectCallback(pWalker, p);
- if( rc ) break;
+ if( !pWalker->bSelectDepthFirst ){
+ rc = pWalker->xSelectCallback(pWalker, p);
+ if( rc ) break;
+ }
if( sqlite3WalkSelectExpr(pWalker, p)
|| sqlite3WalkSelectFrom(pWalker, p)
){
pWalker->walkerDepth--;
return WRC_Abort;
}
+ if( pWalker->bSelectDepthFirst ){
+ rc = pWalker->xSelectCallback(pWalker, p);
+ /* Depth-first search is currently only used for
+ ** selectAddSubqueryTypeInfo() and that routine always returns
+ ** WRC_Continue (0). So the following branch is never taken. */
+ if( NEVER(rc) ) break;
+ }
p = p->pPrior;
}
pWalker->walkerDepth--;
@@ -73039,7 +74279,10 @@ static int lookupName(
** Note that the expression in the result set should have already been
** resolved by the time the WHERE clause is resolved.
*/
- if( cnt==0 && (pEList = pNC->pEList)!=0 && zTab==0 ){
+ if( (pEList = pNC->pEList)!=0
+ && zTab==0
+ && ((pNC->ncFlags & NC_AsMaybe)==0 || cnt==0)
+ ){
for(j=0; jnExpr; j++){
char *zAs = pEList->a[j].zName;
if( zAs!=0 && sqlite3StrICmp(zAs, zCol)==0 ){
@@ -73130,7 +74373,9 @@ static int lookupName(
lookupname_end:
if( cnt==1 ){
assert( pNC!=0 );
- sqlite3AuthRead(pParse, pExpr, pSchema, pNC->pSrcList);
+ if( pExpr->op!=TK_AS ){
+ sqlite3AuthRead(pParse, pExpr, pSchema, pNC->pSrcList);
+ }
/* Increment the nRef value on all name contexts from TopNC up to
** the point where the name matched. */
for(;;){
@@ -73805,11 +75050,10 @@ static int resolveSelectStep(Walker *pWalker, Select *p){
** re-evaluated for each reference to it.
*/
sNC.pEList = p->pEList;
- if( sqlite3ResolveExprNames(&sNC, p->pWhere) ||
- sqlite3ResolveExprNames(&sNC, p->pHaving)
- ){
- return WRC_Abort;
- }
+ sNC.ncFlags |= NC_AsMaybe;
+ if( sqlite3ResolveExprNames(&sNC, p->pHaving) ) return WRC_Abort;
+ if( sqlite3ResolveExprNames(&sNC, p->pWhere) ) return WRC_Abort;
+ sNC.ncFlags &= ~NC_AsMaybe;
/* The ORDER BY and GROUP BY clauses may not refer to terms in
** outer queries
@@ -73930,6 +75174,7 @@ SQLITE_PRIVATE int sqlite3ResolveExprNames(
#endif
savedHasAgg = pNC->ncFlags & NC_HasAgg;
pNC->ncFlags &= ~NC_HasAgg;
+ memset(&w, 0, sizeof(w));
w.xExprCallback = resolveExprStep;
w.xSelectCallback = resolveSelectStep;
w.pParse = pNC->pParse;
@@ -73970,6 +75215,7 @@ SQLITE_PRIVATE void sqlite3ResolveSelectNames(
Walker w;
assert( p!=0 );
+ memset(&w, 0, sizeof(w));
w.xExprCallback = resolveExprStep;
w.xSelectCallback = resolveSelectStep;
w.pParse = pParse;
@@ -74096,12 +75342,7 @@ SQLITE_PRIVATE CollSeq *sqlite3ExprCollSeq(Parse *pParse, Expr *pExpr){
}
assert( op!=TK_REGISTER || p->op2!=TK_COLLATE );
if( op==TK_COLLATE ){
- if( db->init.busy ){
- /* Do not report errors when parsing while the schema */
- pColl = sqlite3FindCollSeq(db, ENC(db), p->u.zToken, 0);
- }else{
- pColl = sqlite3GetCollSeq(pParse, ENC(db), 0, p->u.zToken);
- }
+ pColl = sqlite3GetCollSeq(pParse, ENC(db), 0, p->u.zToken);
break;
}
if( p->pTab!=0
@@ -75194,6 +76435,7 @@ static int selectNodeIsConstant(Walker *pWalker, Select *NotUsed){
}
static int exprIsConst(Expr *p, int initFlag){
Walker w;
+ memset(&w, 0, sizeof(w));
w.u.i = initFlag;
w.xExprCallback = exprNodeIsConstant;
w.xSelectCallback = selectNodeIsConstant;
@@ -77408,8 +78650,8 @@ SQLITE_PRIVATE void sqlite3ExprCodeConstants(Parse *pParse, Expr *pExpr){
Walker w;
if( pParse->cookieGoto ) return;
if( OptimizationDisabled(pParse->db, SQLITE_FactorOutConst) ) return;
+ memset(&w, 0, sizeof(w));
w.xExprCallback = evalConstExpr;
- w.xSelectCallback = 0;
w.pParse = pParse;
sqlite3WalkExpr(&w, pExpr);
}
@@ -83601,10 +84843,8 @@ SQLITE_PRIVATE Index *sqlite3CreateIndex(
for(i=0; inExpr; i++){
Expr *pExpr = pList->a[i].pExpr;
if( pExpr ){
- CollSeq *pColl = sqlite3ExprCollSeq(pParse, pExpr);
- if( pColl ){
- nExtra += (1 + sqlite3Strlen30(pColl->zName));
- }
+ assert( pExpr->op==TK_COLLATE );
+ nExtra += (1 + sqlite3Strlen30(pExpr->u.zToken));
}
}
@@ -83665,7 +84905,6 @@ SQLITE_PRIVATE Index *sqlite3CreateIndex(
const char *zColName = pListItem->zName;
Column *pTabCol;
int requestedSortOrder;
- CollSeq *pColl; /* Collating sequence */
char *zColl; /* Collation sequence name */
for(j=0, pTabCol=pTab->aCol; jnCol; j++, pTabCol++){
@@ -83678,11 +84917,10 @@ SQLITE_PRIVATE Index *sqlite3CreateIndex(
goto exit_create_index;
}
pIndex->aiColumn[i] = j;
- if( pListItem->pExpr
- && (pColl = sqlite3ExprCollSeq(pParse, pListItem->pExpr))!=0
- ){
+ if( pListItem->pExpr ){
int nColl;
- zColl = pColl->zName;
+ assert( pListItem->pExpr->op==TK_COLLATE );
+ zColl = pListItem->pExpr->u.zToken;
nColl = sqlite3Strlen30(zColl) + 1;
assert( nExtra>=nColl );
memcpy(zExtra, zColl, nColl);
@@ -83691,9 +84929,7 @@ SQLITE_PRIVATE Index *sqlite3CreateIndex(
nExtra -= nColl;
}else{
zColl = pTab->aCol[j].zColl;
- if( !zColl ){
- zColl = "BINARY";
- }
+ if( !zColl ) zColl = "BINARY";
}
if( !db->init.busy && !sqlite3LocateCollSeq(pParse, zColl) ){
goto exit_create_index;
@@ -86612,6 +87848,13 @@ static int patternCompare(
return *zString==0;
}
+/*
+** The sqlite3_strglob() interface.
+*/
+SQLITE_API int sqlite3_strglob(const char *zGlobPattern, const char *zString){
+ return patternCompare((u8*)zGlobPattern, (u8*)zString, &globInfo, 0)==0;
+}
+
/*
** Count the number of times that the LIKE operator (or GLOB which is
** just a variation of LIKE) gets called. This is used for testing
@@ -90812,7 +92055,6 @@ SQLITE_API int sqlite3_exec(
const char *zLeftover; /* Tail of unprocessed SQL */
sqlite3_stmt *pStmt = 0; /* The current SQL statement */
char **azCols = 0; /* Names of result columns */
- int nRetry = 0; /* Number of retry attempts */
int callbackIsInit; /* True if callback data is initialized */
if( !sqlite3SafetyCheckOk(db) ) return SQLITE_MISUSE_BKPT;
@@ -90820,12 +92062,12 @@ SQLITE_API int sqlite3_exec(
sqlite3_mutex_enter(db->mutex);
sqlite3Error(db, SQLITE_OK, 0);
- while( (rc==SQLITE_OK || (rc==SQLITE_SCHEMA && (++nRetry)<2)) && zSql[0] ){
+ while( rc==SQLITE_OK && zSql[0] ){
int nCol;
char **azVals = 0;
pStmt = 0;
- rc = sqlite3_prepare(db, zSql, -1, &pStmt, &zLeftover);
+ rc = sqlite3_prepare_v2(db, zSql, -1, &pStmt, &zLeftover);
assert( rc==SQLITE_OK || pStmt==0 );
if( rc!=SQLITE_OK ){
continue;
@@ -90882,11 +92124,8 @@ SQLITE_API int sqlite3_exec(
if( rc!=SQLITE_ROW ){
rc = sqlite3VdbeFinalize((Vdbe *)pStmt);
pStmt = 0;
- if( rc!=SQLITE_SCHEMA ){
- nRetry = 0;
- zSql = zLeftover;
- while( sqlite3Isspace(zSql[0]) ) zSql++;
- }
+ zSql = zLeftover;
+ while( sqlite3Isspace(zSql[0]) ) zSql++;
break;
}
}
@@ -91410,8 +92649,17 @@ struct sqlite3_api_routines {
#define sqlite3_wal_checkpoint_v2 sqlite3_api->wal_checkpoint_v2
#endif /* SQLITE_CORE */
-#define SQLITE_EXTENSION_INIT1 const sqlite3_api_routines *sqlite3_api = 0;
-#define SQLITE_EXTENSION_INIT2(v) sqlite3_api = v;
+#ifndef SQLITE_CORE
+ /* This case when the file really is being compiled as a loadable
+ ** extension */
+# define SQLITE_EXTENSION_INIT1 const sqlite3_api_routines *sqlite3_api=0;
+# define SQLITE_EXTENSION_INIT2(v) sqlite3_api=v;
+#else
+ /* This case when the file is being statically linked into the
+ ** application */
+# define SQLITE_EXTENSION_INIT1 /*no-op*/
+# define SQLITE_EXTENSION_INIT2(v) (void)v; /* unused parameter */
+#endif
#endif /* _SQLITE3EXT_H_ */
@@ -91814,8 +93062,23 @@ static int sqlite3LoadExtension(
void *handle;
int (*xInit)(sqlite3*,char**,const sqlite3_api_routines*);
char *zErrmsg = 0;
+ const char *zEntry;
+ char *zAltEntry = 0;
void **aHandle;
int nMsg = 300 + sqlite3Strlen30(zFile);
+ int ii;
+
+ /* Shared library endings to try if zFile cannot be loaded as written */
+ static const char *azEndings[] = {
+#if SQLITE_OS_WIN
+ "dll"
+#elif defined(__APPLE__)
+ "dylib"
+#else
+ "so"
+#endif
+ };
+
if( pzErrMsg ) *pzErrMsg = 0;
@@ -91832,11 +93095,17 @@ static int sqlite3LoadExtension(
return SQLITE_ERROR;
}
- if( zProc==0 ){
- zProc = "sqlite3_extension_init";
- }
+ zEntry = zProc ? zProc : "sqlite3_extension_init";
handle = sqlite3OsDlOpen(pVfs, zFile);
+#if SQLITE_OS_UNIX || SQLITE_OS_WIN
+ for(ii=0; ii sqlite3_example_init
+ ** C:/lib/mathfuncs.dll ==> sqlite3_mathfuncs_init
+ */
+ if( xInit==0 && zProc==0 ){
+ int iFile, iEntry, c;
+ int ncFile = sqlite3Strlen30(zFile);
+ zAltEntry = sqlite3_malloc(ncFile+30);
+ if( zAltEntry==0 ){
+ sqlite3OsDlClose(pVfs, handle);
+ return SQLITE_NOMEM;
+ }
+ memcpy(zAltEntry, "sqlite3_", 8);
+ for(iFile=ncFile-1; iFile>=0 && zFile[iFile]!='/'; iFile--){}
+ iFile++;
+ if( sqlite3_strnicmp(zFile+iFile, "lib", 3)==0 ) iFile += 3;
+ for(iEntry=8; (c = zFile[iFile])!=0 && c!='.'; iFile++){
+ if( sqlite3Isalpha(c) ){
+ zAltEntry[iEntry++] = (char)sqlite3UpperToLower[(unsigned)c];
+ }
+ }
+ memcpy(zAltEntry+iEntry, "_init", 6);
+ zEntry = zAltEntry;
+ xInit = (int(*)(sqlite3*,char**,const sqlite3_api_routines*))
+ sqlite3OsDlSym(pVfs, handle, zEntry);
+ }
if( xInit==0 ){
if( pzErrMsg ){
- nMsg += sqlite3Strlen30(zProc);
+ nMsg += sqlite3Strlen30(zEntry);
*pzErrMsg = zErrmsg = sqlite3_malloc(nMsg);
if( zErrmsg ){
sqlite3_snprintf(nMsg, zErrmsg,
- "no entry point [%s] in shared library [%s]", zProc,zFile);
+ "no entry point [%s] in shared library [%s]", zEntry, zFile);
sqlite3OsDlError(pVfs, nMsg-1, zErrmsg);
}
- sqlite3OsDlClose(pVfs, handle);
}
+ sqlite3OsDlClose(pVfs, handle);
+ sqlite3_free(zAltEntry);
return SQLITE_ERROR;
- }else if( xInit(db, &zErrmsg, &sqlite3Apis) ){
+ }
+ sqlite3_free(zAltEntry);
+ if( xInit(db, &zErrmsg, &sqlite3Apis) ){
if( pzErrMsg ){
*pzErrMsg = sqlite3_mprintf("error during initialization: %s", zErrmsg);
}
@@ -92391,7 +93697,7 @@ SQLITE_PRIVATE void sqlite3Pragma(
int rc; /* return value form SQLITE_FCNTL_PRAGMA */
sqlite3 *db = pParse->db; /* The database connection */
Db *pDb; /* The specific database being pragmaed */
- Vdbe *v = pParse->pVdbe = sqlite3VdbeCreate(db); /* Prepared statement */
+ Vdbe *v = sqlite3GetVdbe(pParse); /* Prepared statement */
if( v==0 ) return;
sqlite3VdbeRunOnlyOnce(v);
@@ -92474,11 +93780,12 @@ SQLITE_PRIVATE void sqlite3Pragma(
static const VdbeOpList getCacheSize[] = {
{ OP_Transaction, 0, 0, 0}, /* 0 */
{ OP_ReadCookie, 0, 1, BTREE_DEFAULT_CACHE_SIZE}, /* 1 */
- { OP_IfPos, 1, 7, 0},
+ { OP_IfPos, 1, 8, 0},
{ OP_Integer, 0, 2, 0},
{ OP_Subtract, 1, 2, 1},
- { OP_IfPos, 1, 7, 0},
+ { OP_IfPos, 1, 8, 0},
{ OP_Integer, 0, 1, 0}, /* 6 */
+ { OP_Noop, 0, 0, 0},
{ OP_ResultRow, 1, 1, 0},
};
int addr;
@@ -92816,6 +94123,43 @@ SQLITE_PRIVATE void sqlite3Pragma(
}
}else
+ /*
+ ** PRAGMA [database.]mmap_size(N)
+ **
+ ** Used to set mapping size limit. The mapping size limit is
+ ** used to limit the aggregate size of all memory mapped regions of the
+ ** database file. If this parameter is set to zero, then memory mapping
+ ** is not used at all. If N is negative, then the default memory map
+ ** limit determined by sqlite3_config(SQLITE_CONFIG_MMAP_SIZE) is set.
+ ** The parameter N is measured in bytes.
+ **
+ ** This value is advisory. The underlying VFS is free to memory map
+ ** as little or as much as it wants. Except, if N is set to 0 then the
+ ** upper layers will never invoke the xFetch interfaces to the VFS.
+ */
+ if( sqlite3StrICmp(zLeft,"mmap_size")==0 ){
+ sqlite3_int64 sz;
+ assert( sqlite3SchemaMutexHeld(db, iDb, 0) );
+ if( zRight ){
+ int ii;
+ sqlite3Atoi64(zRight, &sz, 1000, SQLITE_UTF8);
+ if( sz<0 ) sz = sqlite3GlobalConfig.szMmap;
+ if( pId2->n==0 ) db->szMmap = sz;
+ for(ii=db->nDb-1; ii>=0; ii--){
+ if( db->aDb[ii].pBt && (ii==iDb || pId2->n==0) ){
+ sqlite3BtreeSetMmapLimit(db->aDb[ii].pBt, sz);
+ }
+ }
+ }
+ sz = -1;
+ if( sqlite3_file_control(db,zDb,SQLITE_FCNTL_MMAP_SIZE,&sz)==SQLITE_OK ){
+#if SQLITE_MAX_MMAP_SIZE==0
+ sz = 0;
+#endif
+ returnSingleInt(pParse, "mmap_size", sz);
+ }
+ }else
+
/*
** PRAGMA temp_store
** PRAGMA temp_store = "default"|"memory"|"file"
@@ -93601,6 +94945,11 @@ SQLITE_PRIVATE void sqlite3Pragma(
** PRAGMA [database.]user_version
** PRAGMA [database.]user_version =
**
+ ** PRAGMA [database.]freelist_count =
+ **
+ ** PRAGMA [database.]application_id
+ ** PRAGMA [database.]application_id =
+ **
** The pragma's schema_version and user_version are used to set or get
** the value of the schema-version and user-version, respectively. Both
** the schema-version and the user-version are 32-bit signed integers
@@ -93622,10 +94971,14 @@ SQLITE_PRIVATE void sqlite3Pragma(
if( sqlite3StrICmp(zLeft, "schema_version")==0
|| sqlite3StrICmp(zLeft, "user_version")==0
|| sqlite3StrICmp(zLeft, "freelist_count")==0
+ || sqlite3StrICmp(zLeft, "application_id")==0
){
int iCookie; /* Cookie index. 1 for schema-cookie, 6 for user-cookie. */
sqlite3VdbeUsesBtree(v, iDb);
switch( zLeft[0] ){
+ case 'a': case 'A':
+ iCookie = BTREE_APPLICATION_ID;
+ break;
case 'f': case 'F':
iCookie = BTREE_FREE_PAGE_COUNT;
break;
@@ -94506,7 +95859,6 @@ static int sqlite3Prepare(
}
#endif
- assert( db->init.busy==0 || saveSqlFlag==0 );
if( db->init.busy==0 ){
Vdbe *pVdbe = pParse->pVdbe;
sqlite3VdbeSetSql(pVdbe, zSql, (int)(pParse->zTail-zSql), saveSqlFlag);
@@ -97982,6 +99334,69 @@ SQLITE_PRIVATE int sqlite3IndexedByLookup(Parse *pParse, struct SrcList_item *pF
}
return SQLITE_OK;
}
+/*
+** Detect compound SELECT statements that use an ORDER BY clause with
+** an alternative collating sequence.
+**
+** SELECT ... FROM t1 EXCEPT SELECT ... FROM t2 ORDER BY .. COLLATE ...
+**
+** These are rewritten as a subquery:
+**
+** SELECT * FROM (SELECT ... FROM t1 EXCEPT SELECT ... FROM t2)
+** ORDER BY ... COLLATE ...
+**
+** This transformation is necessary because the multiSelectOrderBy() routine
+** above that generates the code for a compound SELECT with an ORDER BY clause
+** uses a merge algorithm that requires the same collating sequence on the
+** result columns as on the ORDER BY clause. See ticket
+** http://www.sqlite.org/src/info/6709574d2a
+**
+** This transformation is only needed for EXCEPT, INTERSECT, and UNION.
+** The UNION ALL operator works fine with multiSelectOrderBy() even when
+** there are COLLATE terms in the ORDER BY.
+*/
+static int convertCompoundSelectToSubquery(Walker *pWalker, Select *p){
+ int i;
+ Select *pNew;
+ Select *pX;
+ sqlite3 *db;
+ struct ExprList_item *a;
+ SrcList *pNewSrc;
+ Parse *pParse;
+ Token dummy;
+
+ if( p->pPrior==0 ) return WRC_Continue;
+ if( p->pOrderBy==0 ) return WRC_Continue;
+ for(pX=p; pX && (pX->op==TK_ALL || pX->op==TK_SELECT); pX=pX->pPrior){}
+ if( pX==0 ) return WRC_Continue;
+ a = p->pOrderBy->a;
+ for(i=p->pOrderBy->nExpr-1; i>=0; i--){
+ if( a[i].pExpr->flags & EP_Collate ) break;
+ }
+ if( i<0 ) return WRC_Continue;
+
+ /* If we reach this point, that means the transformation is required. */
+
+ pParse = pWalker->pParse;
+ db = pParse->db;
+ pNew = sqlite3DbMallocZero(db, sizeof(*pNew) );
+ if( pNew==0 ) return WRC_Abort;
+ memset(&dummy, 0, sizeof(dummy));
+ pNewSrc = sqlite3SrcListAppendFromTerm(pParse,0,0,0,&dummy,pNew,0,0);
+ if( pNewSrc==0 ) return WRC_Abort;
+ *pNew = *p;
+ p->pSrc = pNewSrc;
+ p->pEList = sqlite3ExprListAppend(pParse, 0, sqlite3Expr(db, TK_ALL, 0));
+ p->op = TK_SELECT;
+ p->pWhere = 0;
+ pNew->pGroupBy = 0;
+ pNew->pHaving = 0;
+ pNew->pOrderBy = 0;
+ p->pPrior = 0;
+ pNew->pLimit = 0;
+ pNew->pOffset = 0;
+ return WRC_Continue;
+}
/*
** This routine is a Walker callback for "expanding" a SELECT statement.
@@ -98298,10 +99713,13 @@ static int exprWalkNoop(Walker *NotUsed, Expr *NotUsed2){
*/
static void sqlite3SelectExpand(Parse *pParse, Select *pSelect){
Walker w;
- w.xSelectCallback = selectExpander;
+ memset(&w, 0, sizeof(w));
+ w.xSelectCallback = convertCompoundSelectToSubquery;
w.xExprCallback = exprWalkNoop;
w.pParse = pParse;
sqlite3WalkSelect(&w, pSelect);
+ w.xSelectCallback = selectExpander;
+ sqlite3WalkSelect(&w, pSelect);
}
@@ -98356,9 +99774,11 @@ static int selectAddSubqueryTypeInfo(Walker *pWalker, Select *p){
static void sqlite3SelectAddTypeInfo(Parse *pParse, Select *pSelect){
#ifndef SQLITE_OMIT_SUBQUERY
Walker w;
+ memset(&w, 0, sizeof(w));
w.xSelectCallback = selectAddSubqueryTypeInfo;
w.xExprCallback = exprWalkNoop;
w.pParse = pParse;
+ w.bSelectDepthFirst = 1;
sqlite3WalkSelect(&w, pSelect);
#endif
}
@@ -98769,7 +100189,7 @@ SQLITE_PRIVATE int sqlite3Select(
pItem->addrFillSub = topAddr+1;
VdbeNoopComment((v, "materialize %s", pItem->pTab->zName));
if( pItem->isCorrelated==0 ){
- /* If the subquery is no correlated and if we are not inside of
+ /* If the subquery is not correlated and if we are not inside of
** a trigger, then we only need to compute the value of the subquery
** once. */
onceAddr = sqlite3CodeOnce(pParse);
@@ -101035,6 +102455,7 @@ SQLITE_PRIVATE void sqlite3Update(
}
if( j>=pTab->nCol ){
if( sqlite3IsRowid(pChanges->a[i].zName) ){
+ j = -1;
chngRowid = 1;
pRowidExpr = pChanges->a[i].pExpr;
}else{
@@ -101047,7 +102468,8 @@ SQLITE_PRIVATE void sqlite3Update(
{
int rc;
rc = sqlite3AuthCheck(pParse, SQLITE_UPDATE, pTab->zName,
- pTab->aCol[j].zName, db->aDb[iDb].zName);
+ j<0 ? "ROWID" : pTab->aCol[j].zName,
+ db->aDb[iDb].zName);
if( rc==SQLITE_DENY ){
goto update_cleanup;
}else if( rc==SQLITE_IGNORE ){
@@ -101790,6 +103212,7 @@ SQLITE_PRIVATE int sqlite3RunVacuum(char **pzErrMsg, sqlite3 *db){
BTREE_DEFAULT_CACHE_SIZE, 0, /* Preserve the default page cache size */
BTREE_TEXT_ENCODING, 0, /* Preserve the text encoding */
BTREE_USER_VERSION, 0, /* Preserve the user version */
+ BTREE_APPLICATION_ID, 0, /* Preserve the application id */
};
assert( 1==sqlite3BtreeIsInTrans(pTemp) );
@@ -103657,7 +105080,7 @@ static WhereTerm *findTerm(
continue;
}
}
- if( pTerm->prereqRight==0 ){
+ if( pTerm->prereqRight==0 && (pTerm->eOperator&WO_EQ)!=0 ){
pResult = pTerm;
goto findTerm_success;
}else if( pResult==0 ){
@@ -105227,9 +106650,8 @@ static void bestVirtualIndex(WhereBestIdx *p){
struct sqlite3_index_constraint *pIdxCons;
struct sqlite3_index_constraint_usage *pUsage;
WhereTerm *pTerm;
- int i, j, k;
+ int i, j;
int nOrderBy;
- int sortOrder; /* Sort order for IN clauses */
int bAllowIN; /* Allow IN optimizations */
double rCost;
@@ -105328,7 +106750,6 @@ static void bestVirtualIndex(WhereBestIdx *p){
return;
}
- sortOrder = SQLITE_SO_ASC;
pIdxCons = *(struct sqlite3_index_constraint**)&pIdxInfo->aConstraint;
for(i=0; inConstraint; i++, pIdxCons++){
if( pUsage[i].argvIndex>0 ){
@@ -105343,17 +106764,28 @@ static void bestVirtualIndex(WhereBestIdx *p){
** repeated in the output. */
break;
}
- for(k=0; knOrderBy; k++){
- if( pIdxInfo->aOrderBy[k].iColumn==pIdxCons->iColumn ){
- sortOrder = pIdxInfo->aOrderBy[k].desc;
- break;
- }
- }
+ /* A virtual table that is constrained by an IN clause may not
+ ** consume the ORDER BY clause because (1) the order of IN terms
+ ** is not necessarily related to the order of output terms and
+ ** (2) Multiple outputs from a single IN value will not merge
+ ** together. */
+ pIdxInfo->orderByConsumed = 0;
}
}
}
if( i>=pIdxInfo->nConstraint ) break;
}
+
+ /* The orderByConsumed signal is only valid if all outer loops collectively
+ ** generate just a single row of output.
+ */
+ if( pIdxInfo->orderByConsumed ){
+ for(i=0; ii; i++){
+ if( (p->aLevel[i].plan.wsFlags & WHERE_UNIQUE)==0 ){
+ pIdxInfo->orderByConsumed = 0;
+ }
+ }
+ }
/* If there is an ORDER BY clause, and the selected virtual table index
** does not satisfy it, increase the cost of the scan accordingly. This
@@ -105378,8 +106810,7 @@ static void bestVirtualIndex(WhereBestIdx *p){
}
p->cost.plan.u.pVtabIdx = pIdxInfo;
if( pIdxInfo->orderByConsumed ){
- assert( sortOrder==0 || sortOrder==1 );
- p->cost.plan.wsFlags |= WHERE_ORDERED + sortOrder*WHERE_REVERSE;
+ p->cost.plan.wsFlags |= WHERE_ORDERED;
p->cost.plan.nOBSat = nOrderBy;
}else{
p->cost.plan.nOBSat = p->i ? p->aLevel[p->i-1].plan.nOBSat : 0;
@@ -107116,6 +108547,7 @@ static Bitmask codeOneLoopStart(
int addrCont; /* Jump here to continue with next cycle */
int iRowidReg = 0; /* Rowid is stored in this register, if not zero */
int iReleaseReg = 0; /* Temp register to free before returning */
+ Bitmask newNotReady; /* Return value */
pParse = pWInfo->pParse;
v = pParse->pVdbe;
@@ -107126,6 +108558,7 @@ static Bitmask codeOneLoopStart(
bRev = (pLevel->plan.wsFlags & WHERE_REVERSE)!=0;
omitTable = (pLevel->plan.wsFlags & WHERE_IDX_ONLY)!=0
&& (wctrlFlags & WHERE_FORCE_TABLE)==0;
+ VdbeNoopComment((v, "Begin Join Loop %d", iLevel));
/* Create labels for the "break" and "continue" instructions
** for the current loop. Jump to addrBrk to break out of a loop.
@@ -107668,6 +109101,10 @@ static Bitmask codeOneLoopStart(
** the "interesting" terms of z - terms that did not originate in the
** ON or USING clause of a LEFT JOIN, and terms that are usable as
** indices.
+ **
+ ** This optimization also only applies if the (x1 OR x2 OR ...) term
+ ** is not contained in the ON clause of a LEFT JOIN.
+ ** See ticket http://www.sqlite.org/src/info/f2369304e4
*/
if( pWC->nTerm>1 ){
int iTerm;
@@ -107689,7 +109126,7 @@ static Bitmask codeOneLoopStart(
if( pOrTerm->leftCursor==iCur || (pOrTerm->eOperator & WO_AND)!=0 ){
WhereInfo *pSubWInfo; /* Info for single OR-term scan */
Expr *pOrExpr = pOrTerm->pExpr;
- if( pAndExpr ){
+ if( pAndExpr && !ExprHasProperty(pOrExpr, EP_FromJoin) ){
pAndExpr->pLeft = pOrExpr;
pOrExpr = pAndExpr;
}
@@ -107776,7 +109213,7 @@ static Bitmask codeOneLoopStart(
pLevel->p2 = 1 + sqlite3VdbeAddOp2(v, aStart[bRev], iCur, addrBrk);
pLevel->p5 = SQLITE_STMTSTATUS_FULLSCAN_STEP;
}
- notReady &= ~getMask(pWC->pMaskSet, iCur);
+ newNotReady = notReady & ~getMask(pWC->pMaskSet, iCur);
/* Insert code to test every subexpression that can be completely
** computed using the current set of tables.
@@ -107790,7 +109227,7 @@ static Bitmask codeOneLoopStart(
testcase( pTerm->wtFlags & TERM_VIRTUAL ); /* IMP: R-30575-11662 */
testcase( pTerm->wtFlags & TERM_CODED );
if( pTerm->wtFlags & (TERM_VIRTUAL|TERM_CODED) ) continue;
- if( (pTerm->prereqAll & notReady)!=0 ){
+ if( (pTerm->prereqAll & newNotReady)!=0 ){
testcase( pWInfo->untestedTerms==0
&& (pWInfo->wctrlFlags & WHERE_ONETABLE_ONLY)!=0 );
pWInfo->untestedTerms = 1;
@@ -107805,6 +109242,33 @@ static Bitmask codeOneLoopStart(
pTerm->wtFlags |= TERM_CODED;
}
+ /* Insert code to test for implied constraints based on transitivity
+ ** of the "==" operator.
+ **
+ ** Example: If the WHERE clause contains "t1.a=t2.b" and "t2.b=123"
+ ** and we are coding the t1 loop and the t2 loop has not yet coded,
+ ** then we cannot use the "t1.a=t2.b" constraint, but we can code
+ ** the implied "t1.a=123" constraint.
+ */
+ for(pTerm=pWC->a, j=pWC->nTerm; j>0; j--, pTerm++){
+ Expr *pE;
+ WhereTerm *pAlt;
+ Expr sEq;
+ if( pTerm->wtFlags & (TERM_VIRTUAL|TERM_CODED) ) continue;
+ if( pTerm->eOperator!=(WO_EQUIV|WO_EQ) ) continue;
+ if( pTerm->leftCursor!=iCur ) continue;
+ pE = pTerm->pExpr;
+ assert( !ExprHasProperty(pE, EP_FromJoin) );
+ assert( (pTerm->prereqRight & newNotReady)!=0 );
+ pAlt = findTerm(pWC, iCur, pTerm->u.leftColumn, notReady, WO_EQ|WO_IN, 0);
+ if( pAlt==0 ) continue;
+ if( pAlt->wtFlags & (TERM_CODED) ) continue;
+ VdbeNoopComment((v, "begin transitive constraint"));
+ sEq = *pAlt->pExpr;
+ sEq.pLeft = pE->pLeft;
+ sqlite3ExprIfFalse(pParse, &sEq, addrCont, SQLITE_JUMPIFNULL);
+ }
+
/* For a LEFT OUTER JOIN, generate code that will record the fact that
** at least one row of the right table has matched the left table.
*/
@@ -107817,7 +109281,7 @@ static Bitmask codeOneLoopStart(
testcase( pTerm->wtFlags & TERM_VIRTUAL ); /* IMP: R-30575-11662 */
testcase( pTerm->wtFlags & TERM_CODED );
if( pTerm->wtFlags & (TERM_VIRTUAL|TERM_CODED) ) continue;
- if( (pTerm->prereqAll & notReady)!=0 ){
+ if( (pTerm->prereqAll & newNotReady)!=0 ){
assert( pWInfo->untestedTerms );
continue;
}
@@ -107828,7 +109292,7 @@ static Bitmask codeOneLoopStart(
}
sqlite3ReleaseTempReg(pParse, iReleaseReg);
- return notReady;
+ return newNotReady;
}
#if defined(SQLITE_TEST)
@@ -111146,7 +112610,9 @@ static void yy_reduce(
struct SrcList_item *pOld = yymsp[-4].minor.yy347->a;
pNew->zName = pOld->zName;
pNew->zDatabase = pOld->zDatabase;
+ pNew->pSelect = pOld->pSelect;
pOld->zName = pOld->zDatabase = 0;
+ pOld->pSelect = 0;
}
sqlite3SrcListDelete(pParse->db, yymsp[-4].minor.yy347);
}else{
@@ -113814,6 +115280,19 @@ SQLITE_API int sqlite3_config(int op, ...){
}
#endif
+ case SQLITE_CONFIG_MMAP_SIZE: {
+ sqlite3_int64 szMmap = va_arg(ap, sqlite3_int64);
+ sqlite3_int64 mxMmap = va_arg(ap, sqlite3_int64);
+ if( mxMmap<0 || mxMmap>SQLITE_MAX_MMAP_SIZE ){
+ mxMmap = SQLITE_MAX_MMAP_SIZE;
+ }
+ sqlite3GlobalConfig.mxMmap = mxMmap;
+ if( szMmap<0 ) szMmap = SQLITE_DEFAULT_MMAP_SIZE;
+ if( szMmap>mxMmap) szMmap = mxMmap;
+ sqlite3GlobalConfig.szMmap = szMmap;
+ break;
+ }
+
default: {
rc = SQLITE_ERROR;
break;
@@ -114207,6 +115686,12 @@ SQLITE_PRIVATE void sqlite3LeaveMutexAndCloseZombie(sqlite3 *db){
** go ahead and free all resources.
*/
+ /* If a transaction is open, roll it back. This also ensures that if
+ ** any database schemas have been modified by an uncommitted transaction
+ ** they are reset. And that the required b-tree mutex is held to make
+ ** the pager rollback and schema reset an atomic operation. */
+ sqlite3RollbackAll(db, SQLITE_OK);
+
/* Free any outstanding Savepoint structures. */
sqlite3CloseSavepoints(db);
@@ -114307,6 +115792,15 @@ SQLITE_PRIVATE void sqlite3RollbackAll(sqlite3 *db, int tripCode){
int inTrans = 0;
assert( sqlite3_mutex_held(db->mutex) );
sqlite3BeginBenignMalloc();
+
+ /* Obtain all b-tree mutexes before making any calls to BtreeRollback().
+ ** This is important in case the transaction being rolled back has
+ ** modified the database schema. If the b-tree mutexes are not taken
+ ** here, then another shared-cache connection might sneak in between
+ ** the database rollback and schema reset, which can cause false
+ ** corruption reports in some cases. */
+ sqlite3BtreeEnterAll(db);
+
for(i=0; inDb; i++){
Btree *p = db->aDb[i].pBt;
if( p ){
@@ -114324,6 +115818,7 @@ SQLITE_PRIVATE void sqlite3RollbackAll(sqlite3 *db, int tripCode){
sqlite3ExpirePreparedStatements(db);
sqlite3ResetAllSchemasOfConnection(db);
}
+ sqlite3BtreeLeaveAll(db);
/* Any deferred constraint violations have now been resolved. */
db->nDeferredCons = 0;
@@ -114334,6 +115829,110 @@ SQLITE_PRIVATE void sqlite3RollbackAll(sqlite3 *db, int tripCode){
}
}
+/*
+** Return a static string containing the name corresponding to the error code
+** specified in the argument.
+*/
+#if defined(SQLITE_DEBUG) || defined(SQLITE_TEST) || \
+ defined(SQLITE_DEBUG_OS_TRACE)
+SQLITE_PRIVATE const char *sqlite3ErrName(int rc){
+ const char *zName = 0;
+ int i, origRc = rc;
+ for(i=0; i<2 && zName==0; i++, rc &= 0xff){
+ switch( rc ){
+ case SQLITE_OK: zName = "SQLITE_OK"; break;
+ case SQLITE_ERROR: zName = "SQLITE_ERROR"; break;
+ case SQLITE_INTERNAL: zName = "SQLITE_INTERNAL"; break;
+ case SQLITE_PERM: zName = "SQLITE_PERM"; break;
+ case SQLITE_ABORT: zName = "SQLITE_ABORT"; break;
+ case SQLITE_ABORT_ROLLBACK: zName = "SQLITE_ABORT_ROLLBACK"; break;
+ case SQLITE_BUSY: zName = "SQLITE_BUSY"; break;
+ case SQLITE_BUSY_RECOVERY: zName = "SQLITE_BUSY_RECOVERY"; break;
+ case SQLITE_LOCKED: zName = "SQLITE_LOCKED"; break;
+ case SQLITE_LOCKED_SHAREDCACHE: zName = "SQLITE_LOCKED_SHAREDCACHE";break;
+ case SQLITE_NOMEM: zName = "SQLITE_NOMEM"; break;
+ case SQLITE_READONLY: zName = "SQLITE_READONLY"; break;
+ case SQLITE_READONLY_RECOVERY: zName = "SQLITE_READONLY_RECOVERY"; break;
+ case SQLITE_READONLY_CANTLOCK: zName = "SQLITE_READONLY_CANTLOCK"; break;
+ case SQLITE_READONLY_ROLLBACK: zName = "SQLITE_READONLY_ROLLBACK"; break;
+ case SQLITE_INTERRUPT: zName = "SQLITE_INTERRUPT"; break;
+ case SQLITE_IOERR: zName = "SQLITE_IOERR"; break;
+ case SQLITE_IOERR_READ: zName = "SQLITE_IOERR_READ"; break;
+ case SQLITE_IOERR_SHORT_READ: zName = "SQLITE_IOERR_SHORT_READ"; break;
+ case SQLITE_IOERR_WRITE: zName = "SQLITE_IOERR_WRITE"; break;
+ case SQLITE_IOERR_FSYNC: zName = "SQLITE_IOERR_FSYNC"; break;
+ case SQLITE_IOERR_DIR_FSYNC: zName = "SQLITE_IOERR_DIR_FSYNC"; break;
+ case SQLITE_IOERR_TRUNCATE: zName = "SQLITE_IOERR_TRUNCATE"; break;
+ case SQLITE_IOERR_FSTAT: zName = "SQLITE_IOERR_FSTAT"; break;
+ case SQLITE_IOERR_UNLOCK: zName = "SQLITE_IOERR_UNLOCK"; break;
+ case SQLITE_IOERR_RDLOCK: zName = "SQLITE_IOERR_RDLOCK"; break;
+ case SQLITE_IOERR_DELETE: zName = "SQLITE_IOERR_DELETE"; break;
+ case SQLITE_IOERR_BLOCKED: zName = "SQLITE_IOERR_BLOCKED"; break;
+ case SQLITE_IOERR_NOMEM: zName = "SQLITE_IOERR_NOMEM"; break;
+ case SQLITE_IOERR_ACCESS: zName = "SQLITE_IOERR_ACCESS"; break;
+ case SQLITE_IOERR_CHECKRESERVEDLOCK:
+ zName = "SQLITE_IOERR_CHECKRESERVEDLOCK"; break;
+ case SQLITE_IOERR_LOCK: zName = "SQLITE_IOERR_LOCK"; break;
+ case SQLITE_IOERR_CLOSE: zName = "SQLITE_IOERR_CLOSE"; break;
+ case SQLITE_IOERR_DIR_CLOSE: zName = "SQLITE_IOERR_DIR_CLOSE"; break;
+ case SQLITE_IOERR_SHMOPEN: zName = "SQLITE_IOERR_SHMOPEN"; break;
+ case SQLITE_IOERR_SHMSIZE: zName = "SQLITE_IOERR_SHMSIZE"; break;
+ case SQLITE_IOERR_SHMLOCK: zName = "SQLITE_IOERR_SHMLOCK"; break;
+ case SQLITE_IOERR_SHMMAP: zName = "SQLITE_IOERR_SHMMAP"; break;
+ case SQLITE_IOERR_SEEK: zName = "SQLITE_IOERR_SEEK"; break;
+ case SQLITE_IOERR_DELETE_NOENT: zName = "SQLITE_IOERR_DELETE_NOENT";break;
+ case SQLITE_IOERR_MMAP: zName = "SQLITE_IOERR_MMAP"; break;
+ case SQLITE_CORRUPT: zName = "SQLITE_CORRUPT"; break;
+ case SQLITE_CORRUPT_VTAB: zName = "SQLITE_CORRUPT_VTAB"; break;
+ case SQLITE_NOTFOUND: zName = "SQLITE_NOTFOUND"; break;
+ case SQLITE_FULL: zName = "SQLITE_FULL"; break;
+ case SQLITE_CANTOPEN: zName = "SQLITE_CANTOPEN"; break;
+ case SQLITE_CANTOPEN_NOTEMPDIR: zName = "SQLITE_CANTOPEN_NOTEMPDIR";break;
+ case SQLITE_CANTOPEN_ISDIR: zName = "SQLITE_CANTOPEN_ISDIR"; break;
+ case SQLITE_CANTOPEN_FULLPATH: zName = "SQLITE_CANTOPEN_FULLPATH"; break;
+ case SQLITE_PROTOCOL: zName = "SQLITE_PROTOCOL"; break;
+ case SQLITE_EMPTY: zName = "SQLITE_EMPTY"; break;
+ case SQLITE_SCHEMA: zName = "SQLITE_SCHEMA"; break;
+ case SQLITE_TOOBIG: zName = "SQLITE_TOOBIG"; break;
+ case SQLITE_CONSTRAINT: zName = "SQLITE_CONSTRAINT"; break;
+ case SQLITE_CONSTRAINT_UNIQUE: zName = "SQLITE_CONSTRAINT_UNIQUE"; break;
+ case SQLITE_CONSTRAINT_TRIGGER: zName = "SQLITE_CONSTRAINT_TRIGGER";break;
+ case SQLITE_CONSTRAINT_FOREIGNKEY:
+ zName = "SQLITE_CONSTRAINT_FOREIGNKEY"; break;
+ case SQLITE_CONSTRAINT_CHECK: zName = "SQLITE_CONSTRAINT_CHECK"; break;
+ case SQLITE_CONSTRAINT_PRIMARYKEY:
+ zName = "SQLITE_CONSTRAINT_PRIMARYKEY"; break;
+ case SQLITE_CONSTRAINT_NOTNULL: zName = "SQLITE_CONSTRAINT_NOTNULL";break;
+ case SQLITE_CONSTRAINT_COMMITHOOK:
+ zName = "SQLITE_CONSTRAINT_COMMITHOOK"; break;
+ case SQLITE_CONSTRAINT_VTAB: zName = "SQLITE_CONSTRAINT_VTAB"; break;
+ case SQLITE_CONSTRAINT_FUNCTION:
+ zName = "SQLITE_CONSTRAINT_FUNCTION"; break;
+ case SQLITE_MISMATCH: zName = "SQLITE_MISMATCH"; break;
+ case SQLITE_MISUSE: zName = "SQLITE_MISUSE"; break;
+ case SQLITE_NOLFS: zName = "SQLITE_NOLFS"; break;
+ case SQLITE_AUTH: zName = "SQLITE_AUTH"; break;
+ case SQLITE_FORMAT: zName = "SQLITE_FORMAT"; break;
+ case SQLITE_RANGE: zName = "SQLITE_RANGE"; break;
+ case SQLITE_NOTADB: zName = "SQLITE_NOTADB"; break;
+ case SQLITE_ROW: zName = "SQLITE_ROW"; break;
+ case SQLITE_NOTICE: zName = "SQLITE_NOTICE"; break;
+ case SQLITE_NOTICE_RECOVER_WAL: zName = "SQLITE_NOTICE_RECOVER_WAL";break;
+ case SQLITE_NOTICE_RECOVER_ROLLBACK:
+ zName = "SQLITE_NOTICE_RECOVER_ROLLBACK"; break;
+ case SQLITE_WARNING: zName = "SQLITE_WARNING"; break;
+ case SQLITE_DONE: zName = "SQLITE_DONE"; break;
+ }
+ }
+ if( zName==0 ){
+ static char zBuf[50];
+ sqlite3_snprintf(sizeof(zBuf), zBuf, "SQLITE_UNKNOWN(%d)", origRc);
+ zName = zBuf;
+ }
+ return zName;
+}
+#endif
+
/*
** Return a static string that describes the kind of error specified in the
** argument.
@@ -115634,6 +117233,7 @@ static int openDatabase(
memcpy(db->aLimit, aHardLimit, sizeof(db->aLimit));
db->autoCommit = 1;
db->nextAutovac = -1;
+ db->szMmap = sqlite3GlobalConfig.szMmap;
db->nextPagesize = 0;
db->flags |= SQLITE_ShortColNames | SQLITE_AutoIndex | SQLITE_EnableTrigger
#if SQLITE_DEFAULT_FILE_FORMAT<4
@@ -117950,7 +119550,7 @@ SQLITE_PRIVATE void sqlite3Fts3Matchinfo(sqlite3_context *, Fts3Cursor *, const
/* fts3_expr.c */
SQLITE_PRIVATE int sqlite3Fts3ExprParse(sqlite3_tokenizer *, int,
- char **, int, int, int, const char *, int, Fts3Expr **
+ char **, int, int, int, const char *, int, Fts3Expr **, char **
);
SQLITE_PRIVATE void sqlite3Fts3ExprFree(Fts3Expr *);
#ifdef SQLITE_TEST
@@ -117975,6 +119575,9 @@ SQLITE_PRIVATE int sqlite3Fts3EvalPhrasePoslist(Fts3Cursor *, Fts3Expr *, int iC
SQLITE_PRIVATE int sqlite3Fts3MsrOvfl(Fts3Cursor *, Fts3MultiSegReader *, int *);
SQLITE_PRIVATE int sqlite3Fts3MsrIncrRestart(Fts3MultiSegReader *pCsr);
+/* fts3_tokenize_vtab.c */
+SQLITE_PRIVATE int sqlite3Fts3InitTok(sqlite3*, Fts3Hash *);
+
/* fts3_unicode2.c (functions generated by parsing unicode text files) */
#ifdef SQLITE_ENABLE_FTS4_UNICODE61
SQLITE_PRIVATE int sqlite3FtsUnicodeFold(int, int);
@@ -120671,14 +122274,12 @@ static int fts3FilterMethod(
pCsr->iLangid = 0;
if( nVal==2 ) pCsr->iLangid = sqlite3_value_int(apVal[1]);
+ assert( p->base.zErrMsg==0 );
rc = sqlite3Fts3ExprParse(p->pTokenizer, pCsr->iLangid,
- p->azColumn, p->bFts4, p->nColumn, iCol, zQuery, -1, &pCsr->pExpr
+ p->azColumn, p->bFts4, p->nColumn, iCol, zQuery, -1, &pCsr->pExpr,
+ &p->base.zErrMsg
);
if( rc!=SQLITE_OK ){
- if( rc==SQLITE_ERROR ){
- static const char *zErr = "malformed MATCH expression: [%s]";
- p->base.zErrMsg = sqlite3_mprintf(zErr, zQuery);
- }
return rc;
}
@@ -121342,9 +122943,13 @@ SQLITE_PRIVATE int sqlite3Fts3Init(sqlite3 *db){
db, "fts4", &fts3Module, (void *)pHash, 0
);
}
+ if( rc==SQLITE_OK ){
+ rc = sqlite3Fts3InitTok(db, (void *)pHash);
+ }
return rc;
}
+
/* An error has occurred. Delete the hash table and return the error code. */
assert( rc!=SQLITE_OK );
if( pHash ){
@@ -123118,17 +124723,26 @@ static int fts3auxConnectMethod(
UNUSED_PARAMETER(pUnused);
- /* The user should specify a single argument - the name of an fts3 table. */
- if( argc!=4 ){
- *pzErr = sqlite3_mprintf(
- "wrong number of arguments to fts4aux constructor"
- );
- return SQLITE_ERROR;
- }
+ /* The user should invoke this in one of two forms:
+ **
+ ** CREATE VIRTUAL TABLE xxx USING fts4aux(fts4-table);
+ ** CREATE VIRTUAL TABLE xxx USING fts4aux(fts4-table-db, fts4-table);
+ */
+ if( argc!=4 && argc!=5 ) goto bad_args;
zDb = argv[1];
nDb = (int)strlen(zDb);
- zFts3 = argv[3];
+ if( argc==5 ){
+ if( nDb==4 && 0==sqlite3_strnicmp("temp", zDb, 4) ){
+ zDb = argv[3];
+ nDb = (int)strlen(zDb);
+ zFts3 = argv[4];
+ }else{
+ goto bad_args;
+ }
+ }else{
+ zFts3 = argv[3];
+ }
nFts3 = (int)strlen(zFts3);
rc = sqlite3_declare_vtab(db, FTS3_TERMS_SCHEMA);
@@ -123151,6 +124765,10 @@ static int fts3auxConnectMethod(
*ppVtab = (sqlite3_vtab *)p;
return SQLITE_OK;
+
+ bad_args:
+ *pzErr = sqlite3_mprintf("invalid arguments to fts4aux constructor");
+ return SQLITE_ERROR;
}
/*
@@ -124164,8 +125782,10 @@ static int fts3ExprParse(
}
pNot->eType = FTSQUERY_NOT;
pNot->pRight = p;
+ p->pParent = pNot;
if( pNotBranch ){
pNot->pLeft = pNotBranch;
+ pNotBranch->pParent = pNot;
}
pNotBranch = pNot;
p = pPrev;
@@ -124253,6 +125873,7 @@ static int fts3ExprParse(
pIter = pIter->pLeft;
}
pIter->pLeft = pRet;
+ pRet->pParent = pIter;
pRet = pNotBranch;
}
}
@@ -124269,6 +125890,223 @@ exprparse_out:
return rc;
}
+/*
+** Return SQLITE_ERROR if the maximum depth of the expression tree passed
+** as the only argument is more than nMaxDepth.
+*/
+static int fts3ExprCheckDepth(Fts3Expr *p, int nMaxDepth){
+ int rc = SQLITE_OK;
+ if( p ){
+ if( nMaxDepth<0 ){
+ rc = SQLITE_TOOBIG;
+ }else{
+ rc = fts3ExprCheckDepth(p->pLeft, nMaxDepth-1);
+ if( rc==SQLITE_OK ){
+ rc = fts3ExprCheckDepth(p->pRight, nMaxDepth-1);
+ }
+ }
+ }
+ return rc;
+}
+
+/*
+** This function attempts to transform the expression tree at (*pp) to
+** an equivalent but more balanced form. The tree is modified in place.
+** If successful, SQLITE_OK is returned and (*pp) set to point to the
+** new root expression node.
+**
+** nMaxDepth is the maximum allowable depth of the balanced sub-tree.
+**
+** Otherwise, if an error occurs, an SQLite error code is returned and
+** expression (*pp) freed.
+*/
+static int fts3ExprBalance(Fts3Expr **pp, int nMaxDepth){
+ int rc = SQLITE_OK; /* Return code */
+ Fts3Expr *pRoot = *pp; /* Initial root node */
+ Fts3Expr *pFree = 0; /* List of free nodes. Linked by pParent. */
+ int eType = pRoot->eType; /* Type of node in this tree */
+
+ if( nMaxDepth==0 ){
+ rc = SQLITE_ERROR;
+ }
+
+ if( rc==SQLITE_OK && (eType==FTSQUERY_AND || eType==FTSQUERY_OR) ){
+ Fts3Expr **apLeaf;
+ apLeaf = (Fts3Expr **)sqlite3_malloc(sizeof(Fts3Expr *) * nMaxDepth);
+ if( 0==apLeaf ){
+ rc = SQLITE_NOMEM;
+ }else{
+ memset(apLeaf, 0, sizeof(Fts3Expr *) * nMaxDepth);
+ }
+
+ if( rc==SQLITE_OK ){
+ int i;
+ Fts3Expr *p;
+
+ /* Set $p to point to the left-most leaf in the tree of eType nodes. */
+ for(p=pRoot; p->eType==eType; p=p->pLeft){
+ assert( p->pParent==0 || p->pParent->pLeft==p );
+ assert( p->pLeft && p->pRight );
+ }
+
+ /* This loop runs once for each leaf in the tree of eType nodes. */
+ while( 1 ){
+ int iLvl;
+ Fts3Expr *pParent = p->pParent; /* Current parent of p */
+
+ assert( pParent==0 || pParent->pLeft==p );
+ p->pParent = 0;
+ if( pParent ){
+ pParent->pLeft = 0;
+ }else{
+ pRoot = 0;
+ }
+ rc = fts3ExprBalance(&p, nMaxDepth-1);
+ if( rc!=SQLITE_OK ) break;
+
+ for(iLvl=0; p && iLvlpLeft = apLeaf[iLvl];
+ pFree->pRight = p;
+ pFree->pLeft->pParent = pFree;
+ pFree->pRight->pParent = pFree;
+
+ p = pFree;
+ pFree = pFree->pParent;
+ p->pParent = 0;
+ apLeaf[iLvl] = 0;
+ }
+ }
+ if( p ){
+ sqlite3Fts3ExprFree(p);
+ rc = SQLITE_TOOBIG;
+ break;
+ }
+
+ /* If that was the last leaf node, break out of the loop */
+ if( pParent==0 ) break;
+
+ /* Set $p to point to the next leaf in the tree of eType nodes */
+ for(p=pParent->pRight; p->eType==eType; p=p->pLeft);
+
+ /* Remove pParent from the original tree. */
+ assert( pParent->pParent==0 || pParent->pParent->pLeft==pParent );
+ pParent->pRight->pParent = pParent->pParent;
+ if( pParent->pParent ){
+ pParent->pParent->pLeft = pParent->pRight;
+ }else{
+ assert( pParent==pRoot );
+ pRoot = pParent->pRight;
+ }
+
+ /* Link pParent into the free node list. It will be used as an
+ ** internal node of the new tree. */
+ pParent->pParent = pFree;
+ pFree = pParent;
+ }
+
+ if( rc==SQLITE_OK ){
+ p = 0;
+ for(i=0; ipParent = 0;
+ }else{
+ assert( pFree!=0 );
+ pFree->pRight = p;
+ pFree->pLeft = apLeaf[i];
+ pFree->pLeft->pParent = pFree;
+ pFree->pRight->pParent = pFree;
+
+ p = pFree;
+ pFree = pFree->pParent;
+ p->pParent = 0;
+ }
+ }
+ }
+ pRoot = p;
+ }else{
+ /* An error occurred. Delete the contents of the apLeaf[] array
+ ** and pFree list. Everything else is cleaned up by the call to
+ ** sqlite3Fts3ExprFree(pRoot) below. */
+ Fts3Expr *pDel;
+ for(i=0; ipParent;
+ sqlite3_free(pDel);
+ }
+ }
+
+ assert( pFree==0 );
+ sqlite3_free( apLeaf );
+ }
+ }
+
+ if( rc!=SQLITE_OK ){
+ sqlite3Fts3ExprFree(pRoot);
+ pRoot = 0;
+ }
+ *pp = pRoot;
+ return rc;
+}
+
+/*
+** This function is similar to sqlite3Fts3ExprParse(), with the following
+** differences:
+**
+** 1. It does not do expression rebalancing.
+** 2. It does not check that the expression does not exceed the
+** maximum allowable depth.
+** 3. Even if it fails, *ppExpr may still be set to point to an
+** expression tree. It should be deleted using sqlite3Fts3ExprFree()
+** in this case.
+*/
+static int fts3ExprParseUnbalanced(
+ sqlite3_tokenizer *pTokenizer, /* Tokenizer module */
+ int iLangid, /* Language id for tokenizer */
+ char **azCol, /* Array of column names for fts3 table */
+ int bFts4, /* True to allow FTS4-only syntax */
+ int nCol, /* Number of entries in azCol[] */
+ int iDefaultCol, /* Default column to query */
+ const char *z, int n, /* Text of MATCH query */
+ Fts3Expr **ppExpr /* OUT: Parsed query structure */
+){
+ int nParsed;
+ int rc;
+ ParseContext sParse;
+
+ memset(&sParse, 0, sizeof(ParseContext));
+ sParse.pTokenizer = pTokenizer;
+ sParse.iLangid = iLangid;
+ sParse.azCol = (const char **)azCol;
+ sParse.nCol = nCol;
+ sParse.iDefaultCol = iDefaultCol;
+ sParse.bFts4 = bFts4;
+ if( z==0 ){
+ *ppExpr = 0;
+ return SQLITE_OK;
+ }
+ if( n<0 ){
+ n = (int)strlen(z);
+ }
+ rc = fts3ExprParse(&sParse, z, n, ppExpr, &nParsed);
+ assert( rc==SQLITE_OK || *ppExpr==0 );
+
+ /* Check for mismatched parenthesis */
+ if( rc==SQLITE_OK && sParse.nNest ){
+ rc = SQLITE_ERROR;
+ }
+
+ return rc;
+}
+
/*
** Parameters z and n contain a pointer to and length of a buffer containing
** an fts3 query expression, respectively. This function attempts to parse the
@@ -124301,49 +126139,74 @@ SQLITE_PRIVATE int sqlite3Fts3ExprParse(
int nCol, /* Number of entries in azCol[] */
int iDefaultCol, /* Default column to query */
const char *z, int n, /* Text of MATCH query */
- Fts3Expr **ppExpr /* OUT: Parsed query structure */
+ Fts3Expr **ppExpr, /* OUT: Parsed query structure */
+ char **pzErr /* OUT: Error message (sqlite3_malloc) */
){
- int nParsed;
- int rc;
- ParseContext sParse;
-
- memset(&sParse, 0, sizeof(ParseContext));
- sParse.pTokenizer = pTokenizer;
- sParse.iLangid = iLangid;
- sParse.azCol = (const char **)azCol;
- sParse.nCol = nCol;
- sParse.iDefaultCol = iDefaultCol;
- sParse.bFts4 = bFts4;
- if( z==0 ){
- *ppExpr = 0;
- return SQLITE_OK;
+ static const int MAX_EXPR_DEPTH = 12;
+ int rc = fts3ExprParseUnbalanced(
+ pTokenizer, iLangid, azCol, bFts4, nCol, iDefaultCol, z, n, ppExpr
+ );
+
+ /* Rebalance the expression. And check that its depth does not exceed
+ ** MAX_EXPR_DEPTH. */
+ if( rc==SQLITE_OK && *ppExpr ){
+ rc = fts3ExprBalance(ppExpr, MAX_EXPR_DEPTH);
+ if( rc==SQLITE_OK ){
+ rc = fts3ExprCheckDepth(*ppExpr, MAX_EXPR_DEPTH);
+ }
}
- if( n<0 ){
- n = (int)strlen(z);
- }
- rc = fts3ExprParse(&sParse, z, n, ppExpr, &nParsed);
- /* Check for mismatched parenthesis */
- if( rc==SQLITE_OK && sParse.nNest ){
- rc = SQLITE_ERROR;
+ if( rc!=SQLITE_OK ){
sqlite3Fts3ExprFree(*ppExpr);
*ppExpr = 0;
+ if( rc==SQLITE_TOOBIG ){
+ *pzErr = sqlite3_mprintf(
+ "FTS expression tree is too large (maximum depth %d)", MAX_EXPR_DEPTH
+ );
+ rc = SQLITE_ERROR;
+ }else if( rc==SQLITE_ERROR ){
+ *pzErr = sqlite3_mprintf("malformed MATCH expression: [%s]", z);
+ }
}
return rc;
}
/*
-** Free a parsed fts3 query expression allocated by sqlite3Fts3ExprParse().
+** Free a single node of an expression tree.
*/
-SQLITE_PRIVATE void sqlite3Fts3ExprFree(Fts3Expr *p){
- if( p ){
- assert( p->eType==FTSQUERY_PHRASE || p->pPhrase==0 );
- sqlite3Fts3ExprFree(p->pLeft);
- sqlite3Fts3ExprFree(p->pRight);
- sqlite3Fts3EvalPhraseCleanup(p->pPhrase);
- sqlite3_free(p->aMI);
- sqlite3_free(p);
+static void fts3FreeExprNode(Fts3Expr *p){
+ assert( p->eType==FTSQUERY_PHRASE || p->pPhrase==0 );
+ sqlite3Fts3EvalPhraseCleanup(p->pPhrase);
+ sqlite3_free(p->aMI);
+ sqlite3_free(p);
+}
+
+/*
+** Free a parsed fts3 query expression allocated by sqlite3Fts3ExprParse().
+**
+** This function would be simpler if it recursively called itself. But
+** that would mean passing a sufficiently large expression to ExprParse()
+** could cause a stack overflow.
+*/
+SQLITE_PRIVATE void sqlite3Fts3ExprFree(Fts3Expr *pDel){
+ Fts3Expr *p;
+ assert( pDel==0 || pDel->pParent==0 );
+ for(p=pDel; p && (p->pLeft||p->pRight); p=(p->pLeft ? p->pLeft : p->pRight)){
+ assert( p->pParent==0 || p==p->pParent->pRight || p==p->pParent->pLeft );
+ }
+ while( p ){
+ Fts3Expr *pParent = p->pParent;
+ fts3FreeExprNode(p);
+ if( pParent && p==pParent->pLeft && pParent->pRight ){
+ p = pParent->pRight;
+ while( p && (p->pLeft || p->pRight) ){
+ assert( p==p->pParent->pRight || p==p->pParent->pLeft );
+ p = (p->pLeft ? p->pLeft : p->pRight);
+ }
+ }else{
+ p = pParent;
+ }
}
}
@@ -124395,6 +126258,9 @@ static int queryTestTokenizer(
** the returned expression text and then freed using sqlite3_free().
*/
static char *exprToString(Fts3Expr *pExpr, char *zBuf){
+ if( pExpr==0 ){
+ return sqlite3_mprintf("");
+ }
switch( pExpr->eType ){
case FTSQUERY_PHRASE: {
Fts3Phrase *pPhrase = pExpr->pPhrase;
@@ -124502,10 +126368,21 @@ static void fts3ExprTest(
azCol[ii] = (char *)sqlite3_value_text(argv[ii+2]);
}
- rc = sqlite3Fts3ExprParse(
- pTokenizer, 0, azCol, 0, nCol, nCol, zExpr, nExpr, &pExpr
- );
+ if( sqlite3_user_data(context) ){
+ char *zDummy = 0;
+ rc = sqlite3Fts3ExprParse(
+ pTokenizer, 0, azCol, 0, nCol, nCol, zExpr, nExpr, &pExpr, &zDummy
+ );
+ assert( rc==SQLITE_OK || pExpr==0 );
+ sqlite3_free(zDummy);
+ }else{
+ rc = fts3ExprParseUnbalanced(
+ pTokenizer, 0, azCol, 0, nCol, nCol, zExpr, nExpr, &pExpr
+ );
+ }
+
if( rc!=SQLITE_OK && rc!=SQLITE_NOMEM ){
+ sqlite3Fts3ExprFree(pExpr);
sqlite3_result_error(context, "Error parsing expression", -1);
}else if( rc==SQLITE_NOMEM || !(zBuf = exprToString(pExpr, 0)) ){
sqlite3_result_error_nomem(context);
@@ -124528,9 +126405,15 @@ exprtest_out:
** with database connection db.
*/
SQLITE_PRIVATE int sqlite3Fts3ExprInitTestInterface(sqlite3* db){
- return sqlite3_create_function(
+ int rc = sqlite3_create_function(
db, "fts3_exprtest", -1, SQLITE_UTF8, 0, fts3ExprTest, 0, 0
);
+ if( rc==SQLITE_OK ){
+ rc = sqlite3_create_function(db, "fts3_exprtest_rebalance",
+ -1, SQLITE_UTF8, (void *)1, fts3ExprTest, 0, 0
+ );
+ }
+ return rc;
}
#endif
@@ -126293,6 +128176,462 @@ SQLITE_PRIVATE void sqlite3Fts3SimpleTokenizerModule(
#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) */
/************** End of fts3_tokenizer1.c *************************************/
+/************** Begin file fts3_tokenize_vtab.c ******************************/
+/*
+** 2013 Apr 22
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+******************************************************************************
+**
+** This file contains code for the "fts3tokenize" virtual table module.
+** An fts3tokenize virtual table is created as follows:
+**
+** CREATE VIRTUAL TABLE USING fts3tokenize(
+** , , ...
+** );
+**
+** The table created has the following schema:
+**
+** CREATE TABLE (input, token, start, end, position)
+**
+** When queried, the query must include a WHERE clause of type:
+**
+** input =
+**
+** The virtual table module tokenizes this , using the FTS3
+** tokenizer specified by the arguments to the CREATE VIRTUAL TABLE
+** statement and returns one row for each token in the result. With
+** fields set as follows:
+**
+** input: Always set to a copy of
+** token: A token from the input.
+** start: Byte offset of the token within the input .
+** end: Byte offset of the byte immediately following the end of the
+** token within the input string.
+** pos: Token offset of token within input.
+**
+*/
+#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3)
+
+/* #include */
+/* #include */
+
+typedef struct Fts3tokTable Fts3tokTable;
+typedef struct Fts3tokCursor Fts3tokCursor;
+
+/*
+** Virtual table structure.
+*/
+struct Fts3tokTable {
+ sqlite3_vtab base; /* Base class used by SQLite core */
+ const sqlite3_tokenizer_module *pMod;
+ sqlite3_tokenizer *pTok;
+};
+
+/*
+** Virtual table cursor structure.
+*/
+struct Fts3tokCursor {
+ sqlite3_vtab_cursor base; /* Base class used by SQLite core */
+ char *zInput; /* Input string */
+ sqlite3_tokenizer_cursor *pCsr; /* Cursor to iterate through zInput */
+ int iRowid; /* Current 'rowid' value */
+ const char *zToken; /* Current 'token' value */
+ int nToken; /* Size of zToken in bytes */
+ int iStart; /* Current 'start' value */
+ int iEnd; /* Current 'end' value */
+ int iPos; /* Current 'pos' value */
+};
+
+/*
+** Query FTS for the tokenizer implementation named zName.
+*/
+static int fts3tokQueryTokenizer(
+ Fts3Hash *pHash,
+ const char *zName,
+ const sqlite3_tokenizer_module **pp,
+ char **pzErr
+){
+ sqlite3_tokenizer_module *p;
+ int nName = (int)strlen(zName);
+
+ p = (sqlite3_tokenizer_module *)sqlite3Fts3HashFind(pHash, zName, nName+1);
+ if( !p ){
+ *pzErr = sqlite3_mprintf("unknown tokenizer: %s", zName);
+ return SQLITE_ERROR;
+ }
+
+ *pp = p;
+ return SQLITE_OK;
+}
+
+/*
+** The second argument, argv[], is an array of pointers to nul-terminated
+** strings. This function makes a copy of the array and strings into a
+** single block of memory. It then dequotes any of the strings that appear
+** to be quoted.
+**
+** If successful, output parameter *pazDequote is set to point at the
+** array of dequoted strings and SQLITE_OK is returned. The caller is
+** responsible for eventually calling sqlite3_free() to free the array
+** in this case. Or, if an error occurs, an SQLite error code is returned.
+** The final value of *pazDequote is undefined in this case.
+*/
+static int fts3tokDequoteArray(
+ int argc, /* Number of elements in argv[] */
+ const char * const *argv, /* Input array */
+ char ***pazDequote /* Output array */
+){
+ int rc = SQLITE_OK; /* Return code */
+ if( argc==0 ){
+ *pazDequote = 0;
+ }else{
+ int i;
+ int nByte = 0;
+ char **azDequote;
+
+ for(i=0; ixCreate((nDequote>1 ? nDequote-1 : 0), azArg, &pTok);
+ }
+
+ if( rc==SQLITE_OK ){
+ pTab = (Fts3tokTable *)sqlite3_malloc(sizeof(Fts3tokTable));
+ if( pTab==0 ){
+ rc = SQLITE_NOMEM;
+ }
+ }
+
+ if( rc==SQLITE_OK ){
+ memset(pTab, 0, sizeof(Fts3tokTable));
+ pTab->pMod = pMod;
+ pTab->pTok = pTok;
+ *ppVtab = &pTab->base;
+ }else{
+ if( pTok ){
+ pMod->xDestroy(pTok);
+ }
+ }
+
+ sqlite3_free(azDequote);
+ return rc;
+}
+
+/*
+** This function does the work for both the xDisconnect and xDestroy methods.
+** These tables have no persistent representation of their own, so xDisconnect
+** and xDestroy are identical operations.
+*/
+static int fts3tokDisconnectMethod(sqlite3_vtab *pVtab){
+ Fts3tokTable *pTab = (Fts3tokTable *)pVtab;
+
+ pTab->pMod->xDestroy(pTab->pTok);
+ sqlite3_free(pTab);
+ return SQLITE_OK;
+}
+
+/*
+** xBestIndex - Analyze a WHERE and ORDER BY clause.
+*/
+static int fts3tokBestIndexMethod(
+ sqlite3_vtab *pVTab,
+ sqlite3_index_info *pInfo
+){
+ int i;
+ UNUSED_PARAMETER(pVTab);
+
+ for(i=0; inConstraint; i++){
+ if( pInfo->aConstraint[i].usable
+ && pInfo->aConstraint[i].iColumn==0
+ && pInfo->aConstraint[i].op==SQLITE_INDEX_CONSTRAINT_EQ
+ ){
+ pInfo->idxNum = 1;
+ pInfo->aConstraintUsage[i].argvIndex = 1;
+ pInfo->aConstraintUsage[i].omit = 1;
+ pInfo->estimatedCost = 1;
+ return SQLITE_OK;
+ }
+ }
+
+ pInfo->idxNum = 0;
+ assert( pInfo->estimatedCost>1000000.0 );
+
+ return SQLITE_OK;
+}
+
+/*
+** xOpen - Open a cursor.
+*/
+static int fts3tokOpenMethod(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCsr){
+ Fts3tokCursor *pCsr;
+ UNUSED_PARAMETER(pVTab);
+
+ pCsr = (Fts3tokCursor *)sqlite3_malloc(sizeof(Fts3tokCursor));
+ if( pCsr==0 ){
+ return SQLITE_NOMEM;
+ }
+ memset(pCsr, 0, sizeof(Fts3tokCursor));
+
+ *ppCsr = (sqlite3_vtab_cursor *)pCsr;
+ return SQLITE_OK;
+}
+
+/*
+** Reset the tokenizer cursor passed as the only argument. As if it had
+** just been returned by fts3tokOpenMethod().
+*/
+static void fts3tokResetCursor(Fts3tokCursor *pCsr){
+ if( pCsr->pCsr ){
+ Fts3tokTable *pTab = (Fts3tokTable *)(pCsr->base.pVtab);
+ pTab->pMod->xClose(pCsr->pCsr);
+ pCsr->pCsr = 0;
+ }
+ sqlite3_free(pCsr->zInput);
+ pCsr->zInput = 0;
+ pCsr->zToken = 0;
+ pCsr->nToken = 0;
+ pCsr->iStart = 0;
+ pCsr->iEnd = 0;
+ pCsr->iPos = 0;
+ pCsr->iRowid = 0;
+}
+
+/*
+** xClose - Close a cursor.
+*/
+static int fts3tokCloseMethod(sqlite3_vtab_cursor *pCursor){
+ Fts3tokCursor *pCsr = (Fts3tokCursor *)pCursor;
+
+ fts3tokResetCursor(pCsr);
+ sqlite3_free(pCsr);
+ return SQLITE_OK;
+}
+
+/*
+** xNext - Advance the cursor to the next row, if any.
+*/
+static int fts3tokNextMethod(sqlite3_vtab_cursor *pCursor){
+ Fts3tokCursor *pCsr = (Fts3tokCursor *)pCursor;
+ Fts3tokTable *pTab = (Fts3tokTable *)(pCursor->pVtab);
+ int rc; /* Return code */
+
+ pCsr->iRowid++;
+ rc = pTab->pMod->xNext(pCsr->pCsr,
+ &pCsr->zToken, &pCsr->nToken,
+ &pCsr->iStart, &pCsr->iEnd, &pCsr->iPos
+ );
+
+ if( rc!=SQLITE_OK ){
+ fts3tokResetCursor(pCsr);
+ if( rc==SQLITE_DONE ) rc = SQLITE_OK;
+ }
+
+ return rc;
+}
+
+/*
+** xFilter - Initialize a cursor to point at the start of its data.
+*/
+static int fts3tokFilterMethod(
+ sqlite3_vtab_cursor *pCursor, /* The cursor used for this query */
+ int idxNum, /* Strategy index */
+ const char *idxStr, /* Unused */
+ int nVal, /* Number of elements in apVal */
+ sqlite3_value **apVal /* Arguments for the indexing scheme */
+){
+ int rc = SQLITE_ERROR;
+ Fts3tokCursor *pCsr = (Fts3tokCursor *)pCursor;
+ Fts3tokTable *pTab = (Fts3tokTable *)(pCursor->pVtab);
+ UNUSED_PARAMETER(idxStr);
+ UNUSED_PARAMETER(nVal);
+
+ fts3tokResetCursor(pCsr);
+ if( idxNum==1 ){
+ const char *zByte = (const char *)sqlite3_value_text(apVal[0]);
+ int nByte = sqlite3_value_bytes(apVal[0]);
+ pCsr->zInput = sqlite3_malloc(nByte+1);
+ if( pCsr->zInput==0 ){
+ rc = SQLITE_NOMEM;
+ }else{
+ memcpy(pCsr->zInput, zByte, nByte);
+ pCsr->zInput[nByte] = 0;
+ rc = pTab->pMod->xOpen(pTab->pTok, pCsr->zInput, nByte, &pCsr->pCsr);
+ if( rc==SQLITE_OK ){
+ pCsr->pCsr->pTokenizer = pTab->pTok;
+ }
+ }
+ }
+
+ if( rc!=SQLITE_OK ) return rc;
+ return fts3tokNextMethod(pCursor);
+}
+
+/*
+** xEof - Return true if the cursor is at EOF, or false otherwise.
+*/
+static int fts3tokEofMethod(sqlite3_vtab_cursor *pCursor){
+ Fts3tokCursor *pCsr = (Fts3tokCursor *)pCursor;
+ return (pCsr->zToken==0);
+}
+
+/*
+** xColumn - Return a column value.
+*/
+static int fts3tokColumnMethod(
+ sqlite3_vtab_cursor *pCursor, /* Cursor to retrieve value from */
+ sqlite3_context *pCtx, /* Context for sqlite3_result_xxx() calls */
+ int iCol /* Index of column to read value from */
+){
+ Fts3tokCursor *pCsr = (Fts3tokCursor *)pCursor;
+
+ /* CREATE TABLE x(input, token, start, end, position) */
+ switch( iCol ){
+ case 0:
+ sqlite3_result_text(pCtx, pCsr->zInput, -1, SQLITE_TRANSIENT);
+ break;
+ case 1:
+ sqlite3_result_text(pCtx, pCsr->zToken, pCsr->nToken, SQLITE_TRANSIENT);
+ break;
+ case 2:
+ sqlite3_result_int(pCtx, pCsr->iStart);
+ break;
+ case 3:
+ sqlite3_result_int(pCtx, pCsr->iEnd);
+ break;
+ default:
+ assert( iCol==4 );
+ sqlite3_result_int(pCtx, pCsr->iPos);
+ break;
+ }
+ return SQLITE_OK;
+}
+
+/*
+** xRowid - Return the current rowid for the cursor.
+*/
+static int fts3tokRowidMethod(
+ sqlite3_vtab_cursor *pCursor, /* Cursor to retrieve value from */
+ sqlite_int64 *pRowid /* OUT: Rowid value */
+){
+ Fts3tokCursor *pCsr = (Fts3tokCursor *)pCursor;
+ *pRowid = (sqlite3_int64)pCsr->iRowid;
+ return SQLITE_OK;
+}
+
+/*
+** Register the fts3tok module with database connection db. Return SQLITE_OK
+** if successful or an error code if sqlite3_create_module() fails.
+*/
+SQLITE_PRIVATE int sqlite3Fts3InitTok(sqlite3 *db, Fts3Hash *pHash){
+ static const sqlite3_module fts3tok_module = {
+ 0, /* iVersion */
+ fts3tokConnectMethod, /* xCreate */
+ fts3tokConnectMethod, /* xConnect */
+ fts3tokBestIndexMethod, /* xBestIndex */
+ fts3tokDisconnectMethod, /* xDisconnect */
+ fts3tokDisconnectMethod, /* xDestroy */
+ fts3tokOpenMethod, /* xOpen */
+ fts3tokCloseMethod, /* xClose */
+ fts3tokFilterMethod, /* xFilter */
+ fts3tokNextMethod, /* xNext */
+ fts3tokEofMethod, /* xEof */
+ fts3tokColumnMethod, /* xColumn */
+ fts3tokRowidMethod, /* xRowid */
+ 0, /* xUpdate */
+ 0, /* xBegin */
+ 0, /* xSync */
+ 0, /* xCommit */
+ 0, /* xRollback */
+ 0, /* xFindFunction */
+ 0, /* xRename */
+ 0, /* xSavepoint */
+ 0, /* xRelease */
+ 0 /* xRollbackTo */
+ };
+ int rc; /* Return code */
+
+ rc = sqlite3_create_module(db, "fts3tokenize", &fts3tok_module, (void*)pHash);
+ return rc;
+}
+
+#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) */
+
+/************** End of fts3_tokenize_vtab.c **********************************/
/************** Begin file fts3_write.c **************************************/
/*
** 2009 Oct 23
diff --git a/src/3rdparty/sqlite3.h b/src/3rdparty/sqlite3.h
index 69b4586a3f..e398838287 100644
--- a/src/3rdparty/sqlite3.h
+++ b/src/3rdparty/sqlite3.h
@@ -107,9 +107,9 @@ extern "C" {
** [sqlite3_libversion_number()], [sqlite3_sourceid()],
** [sqlite_version()] and [sqlite_source_id()].
*/
-#define SQLITE_VERSION "3.7.16.2"
-#define SQLITE_VERSION_NUMBER 3007016
-#define SQLITE_SOURCE_ID "2013-04-12 11:52:43 cbea02d93865ce0e06789db95fd9168ebac970c7"
+#define SQLITE_VERSION "3.7.17"
+#define SQLITE_VERSION_NUMBER 3007017
+#define SQLITE_SOURCE_ID "2013-05-20 00:56:22 118a3b35693b134d56ebd780123b7fd6f1497668"
/*
** CAPI3REF: Run-Time Library Version Numbers
@@ -425,6 +425,8 @@ SQLITE_API int sqlite3_exec(
#define SQLITE_FORMAT 24 /* Auxiliary database format error */
#define SQLITE_RANGE 25 /* 2nd parameter to sqlite3_bind out of range */
#define SQLITE_NOTADB 26 /* File opened that is not a database file */
+#define SQLITE_NOTICE 27 /* Notifications from sqlite3_log() */
+#define SQLITE_WARNING 28 /* Warnings from sqlite3_log() */
#define SQLITE_ROW 100 /* sqlite3_step() has another row ready */
#define SQLITE_DONE 101 /* sqlite3_step() has finished executing */
/* end-of-error-codes */
@@ -475,6 +477,7 @@ SQLITE_API int sqlite3_exec(
#define SQLITE_IOERR_SHMMAP (SQLITE_IOERR | (21<<8))
#define SQLITE_IOERR_SEEK (SQLITE_IOERR | (22<<8))
#define SQLITE_IOERR_DELETE_NOENT (SQLITE_IOERR | (23<<8))
+#define SQLITE_IOERR_MMAP (SQLITE_IOERR | (24<<8))
#define SQLITE_LOCKED_SHAREDCACHE (SQLITE_LOCKED | (1<<8))
#define SQLITE_BUSY_RECOVERY (SQLITE_BUSY | (1<<8))
#define SQLITE_CANTOPEN_NOTEMPDIR (SQLITE_CANTOPEN | (1<<8))
@@ -494,6 +497,8 @@ SQLITE_API int sqlite3_exec(
#define SQLITE_CONSTRAINT_TRIGGER (SQLITE_CONSTRAINT | (7<<8))
#define SQLITE_CONSTRAINT_UNIQUE (SQLITE_CONSTRAINT | (8<<8))
#define SQLITE_CONSTRAINT_VTAB (SQLITE_CONSTRAINT | (9<<8))
+#define SQLITE_NOTICE_RECOVER_WAL (SQLITE_NOTICE | (1<<8))
+#define SQLITE_NOTICE_RECOVER_ROLLBACK (SQLITE_NOTICE | (2<<8))
/*
** CAPI3REF: Flags For File Open Operations
@@ -733,6 +738,9 @@ struct sqlite3_io_methods {
void (*xShmBarrier)(sqlite3_file*);
int (*xShmUnmap)(sqlite3_file*, int deleteFlag);
/* Methods above are valid for version 2 */
+ int (*xFetch)(sqlite3_file*, sqlite3_int64 iOfst, int iAmt, void **pp);
+ int (*xUnfetch)(sqlite3_file*, sqlite3_int64 iOfst, void *p);
+ /* Methods above are valid for version 3 */
/* Additional methods may be added in future releases */
};
@@ -869,7 +877,8 @@ struct sqlite3_io_methods {
** it is able to override built-in [PRAGMA] statements.
**
**
[[SQLITE_FCNTL_BUSYHANDLER]]
-** ^This file-control may be invoked by SQLite on the database file handle
+** ^The [SQLITE_FCNTL_BUSYHANDLER]
+** file-control may be invoked by SQLite on the database file handle
** shortly after it is opened in order to provide a custom VFS with access
** to the connections busy-handler callback. The argument is of type (void **)
** - an array of two (void *) values. The first (void *) actually points
@@ -880,13 +889,24 @@ struct sqlite3_io_methods {
** current operation.
**
**
[[SQLITE_FCNTL_TEMPFILENAME]]
-** ^Application can invoke this file-control to have SQLite generate a
+** ^Application can invoke the [SQLITE_FCNTL_TEMPFILENAME] file-control
+** to have SQLite generate a
** temporary filename using the same algorithm that is followed to generate
** temporary filenames for TEMP tables and other internal uses. The
** argument should be a char** which will be filled with the filename
** written into memory obtained from [sqlite3_malloc()]. The caller should
** invoke [sqlite3_free()] on the result to avoid a memory leak.
**
+**
[[SQLITE_FCNTL_MMAP_SIZE]]
+** The [SQLITE_FCNTL_MMAP_SIZE] file control is used to query or set the
+** maximum number of bytes that will be used for memory-mapped I/O.
+** The argument is a pointer to a value of type sqlite3_int64 that
+** is an advisory maximum number of bytes in the file to memory map. The
+** pointer is overwritten with the old value. The limit is not changed if
+** the value originally pointed to is negative, and so the current limit
+** can be queried by passing in a pointer to a negative number. This
+** file-control is used internally to implement [PRAGMA mmap_size].
+**
**
*/
#define SQLITE_FCNTL_LOCKSTATE 1
@@ -905,6 +925,7 @@ struct sqlite3_io_methods {
#define SQLITE_FCNTL_PRAGMA 14
#define SQLITE_FCNTL_BUSYHANDLER 15
#define SQLITE_FCNTL_TEMPFILENAME 16
+#define SQLITE_FCNTL_MMAP_SIZE 18
/*
** CAPI3REF: Mutex Handle
@@ -1571,7 +1592,9 @@ struct sqlite3_mem_methods {
** page cache implementation into that object.)^
**
** [[SQLITE_CONFIG_LOG]]
SQLITE_CONFIG_LOG
-**
^The SQLITE_CONFIG_LOG option takes two arguments: a pointer to a
+**
The SQLITE_CONFIG_LOG option is used to configure the SQLite
+** global [error log].
+** (^The SQLITE_CONFIG_LOG option takes two arguments: a pointer to a
** function with a call signature of void(*)(void*,int,const char*),
** and a pointer to void. ^If the function pointer is not NULL, it is
** invoked by [sqlite3_log()] to process each logging event. ^If the
@@ -1617,12 +1640,12 @@ struct sqlite3_mem_methods {
**
SQLITE_CONFIG_PCACHE and SQLITE_CONFIG_GETPCACHE
**
These options are obsolete and should not be used by new code.
** They are retained for backwards compatibility but are now no-ops.
-**
+**
**
** [[SQLITE_CONFIG_SQLLOG]]
**
SQLITE_CONFIG_SQLLOG
**
This option is only available if sqlite is compiled with the
-** SQLITE_ENABLE_SQLLOG pre-processor macro defined. The first argument should
+** [SQLITE_ENABLE_SQLLOG] pre-processor macro defined. The first argument should
** be a pointer to a function of type void(*)(void*,sqlite3*,const char*, int).
** The second should be of type (void*). The callback is invoked by the library
** in three separate circumstances, identified by the value passed as the
@@ -1632,7 +1655,23 @@ struct sqlite3_mem_methods {
** fourth parameter is 1, then the SQL statement that the third parameter
** points to has just been executed. Or, if the fourth parameter is 2, then
** the connection being passed as the second parameter is being closed. The
-** third parameter is passed NULL In this case.
+** third parameter is passed NULL In this case. An example of using this
+** configuration option can be seen in the "test_sqllog.c" source file in
+** the canonical SQLite source tree.
+**
+** [[SQLITE_CONFIG_MMAP_SIZE]]
+**
SQLITE_CONFIG_MMAP_SIZE
+**
SQLITE_CONFIG_MMAP_SIZE takes two 64-bit integer (sqlite3_int64) values
+** that are the default mmap size limit (the default setting for
+** [PRAGMA mmap_size]) and the maximum allowed mmap size limit.
+** The default setting can be overridden by each database connection using
+** either the [PRAGMA mmap_size] command, or by using the
+** [SQLITE_FCNTL_MMAP_SIZE] file control. The maximum allowed mmap size
+** cannot be changed at run-time. Nor may the maximum allowed mmap size
+** exceed the compile-time maximum mmap size set by the
+** [SQLITE_MAX_MMAP_SIZE] compile-time option.
+** If either argument to this option is negative, then that argument is
+** changed to its compile-time default.
**
*/
#define SQLITE_CONFIG_SINGLETHREAD 1 /* nil */
@@ -1656,6 +1695,7 @@ struct sqlite3_mem_methods {
#define SQLITE_CONFIG_GETPCACHE2 19 /* sqlite3_pcache_methods2* */
#define SQLITE_CONFIG_COVERING_INDEX_SCAN 20 /* int */
#define SQLITE_CONFIG_SQLLOG 21 /* xSqllog, void* */
+#define SQLITE_CONFIG_MMAP_SIZE 22 /* sqlite3_int64, sqlite3_int64 */
/*
** CAPI3REF: Database Connection Configuration Options
@@ -2489,6 +2529,9 @@ SQLITE_API int sqlite3_set_authorizer(
** as each triggered subprogram is entered. The callbacks for triggers
** contain a UTF-8 SQL comment that identifies the trigger.)^
**
+** The [SQLITE_TRACE_SIZE_LIMIT] compile-time option can be used to limit
+** the length of [bound parameter] expansion in the output of sqlite3_trace().
+**
** ^The callback function registered by sqlite3_profile() is invoked
** as each SQL statement finishes. ^The profile callback contains
** the original statement text and an estimate of wall-clock time
@@ -3027,7 +3070,8 @@ SQLITE_API int sqlite3_limit(sqlite3*, int id, int newVal);
**
** ^If the database schema changes, instead of returning [SQLITE_SCHEMA] as it
** always used to do, [sqlite3_step()] will automatically recompile the SQL
-** statement and try to run it again.
+** statement and try to run it again. As many as [SQLITE_MAX_SCHEMA_RETRY]
+** retries will occur before sqlite3_step() gives up and returns an error.
**
**
**
@@ -3231,6 +3275,9 @@ typedef struct sqlite3_context sqlite3_context;
** parameter [SQLITE_LIMIT_VARIABLE_NUMBER] (default value: 999).
**
** ^The third argument is the value to bind to the parameter.
+** ^If the third parameter to sqlite3_bind_text() or sqlite3_bind_text16()
+** or sqlite3_bind_blob() is a NULL pointer then the fourth parameter
+** is ignored and the end result is the same as sqlite3_bind_null().
**
** ^(In those routines that have a fourth argument, its value is the
** number of bytes in the parameter. To be clear: the value is the
@@ -4187,7 +4234,7 @@ SQLITE_API void sqlite3_set_auxdata(sqlite3_context*, int N, void*, void (*)(voi
** the content before returning.
**
** The typedef is necessary to work around problems in certain
-** C++ compilers. See ticket #2191.
+** C++ compilers.
*/
typedef void (*sqlite3_destructor_type)(void*);
#define SQLITE_STATIC ((sqlite3_destructor_type)0)
@@ -4986,11 +5033,20 @@ SQLITE_API int sqlite3_table_column_metadata(
** ^This interface loads an SQLite extension library from the named file.
**
** ^The sqlite3_load_extension() interface attempts to load an
-** SQLite extension library contained in the file zFile.
+** [SQLite extension] library contained in the file zFile. If
+** the file cannot be loaded directly, attempts are made to load
+** with various operating-system specific extensions added.
+** So for example, if "samplelib" cannot be loaded, then names like
+** "samplelib.so" or "samplelib.dylib" or "samplelib.dll" might
+** be tried also.
**
** ^The entry point is zProc.
-** ^zProc may be 0, in which case the name of the entry point
-** defaults to "sqlite3_extension_init".
+** ^(zProc may be 0, in which case SQLite will try to come up with an
+** entry point name on its own. It first tries "sqlite3_extension_init".
+** If that does not work, it constructs a name "sqlite3_X_init" where the
+** X is consists of the lower-case equivalent of all ASCII alphabetic
+** characters in the filename from the last "/" to the first following
+** "." and omitting any initial "lib".)^
** ^The sqlite3_load_extension() interface returns
** [SQLITE_OK] on success and [SQLITE_ERROR] if something goes wrong.
** ^If an error occurs and pzErrMsg is not 0, then the
@@ -5016,11 +5072,11 @@ SQLITE_API int sqlite3_load_extension(
** CAPI3REF: Enable Or Disable Extension Loading
**
** ^So as not to open security holes in older applications that are
-** unprepared to deal with extension loading, and as a means of disabling
-** extension loading while evaluating user-entered SQL, the following API
+** unprepared to deal with [extension loading], and as a means of disabling
+** [extension loading] while evaluating user-entered SQL, the following API
** is provided to turn the [sqlite3_load_extension()] mechanism on and off.
**
-** ^Extension loading is off by default. See ticket #1863.
+** ^Extension loading is off by default.
** ^Call the sqlite3_enable_load_extension() routine with onoff==1
** to turn extension loading on and call it with onoff==0 to turn
** it back off again.
@@ -5032,7 +5088,7 @@ SQLITE_API int sqlite3_enable_load_extension(sqlite3 *db, int onoff);
**
** ^This interface causes the xEntryPoint() function to be invoked for
** each new [database connection] that is created. The idea here is that
-** xEntryPoint() is the entry point for a statically linked SQLite extension
+** xEntryPoint() is the entry point for a statically linked [SQLite extension]
** that is to be automatically loaded into all new database connections.
**
** ^(Even though the function prototype shows that xEntryPoint() takes
@@ -6812,10 +6868,25 @@ SQLITE_API int sqlite3_unlock_notify(
SQLITE_API int sqlite3_stricmp(const char *, const char *);
SQLITE_API int sqlite3_strnicmp(const char *, const char *, int);
+/*
+** CAPI3REF: String Globbing
+*
+** ^The [sqlite3_strglob(P,X)] interface returns zero if string X matches
+** the glob pattern P, and it returns non-zero if string X does not match
+** the glob pattern P. ^The definition of glob pattern matching used in
+** [sqlite3_strglob(P,X)] is the same as for the "X GLOB P" operator in the
+** SQL dialect used by SQLite. ^The sqlite3_strglob(P,X) function is case
+** sensitive.
+**
+** Note that this routine returns zero on a match and non-zero if the strings
+** do not match, the same as [sqlite3_stricmp()] and [sqlite3_strnicmp()].
+*/
+SQLITE_API int sqlite3_strglob(const char *zGlob, const char *zStr);
+
/*
** CAPI3REF: Error Logging Interface
**
-** ^The [sqlite3_log()] interface writes a message into the error log
+** ^The [sqlite3_log()] interface writes a message into the [error log]
** established by the [SQLITE_CONFIG_LOG] option to [sqlite3_config()].
** ^If logging is enabled, the zFormat string and subsequent arguments are
** used with [sqlite3_snprintf()] to generate the final output string.
diff --git a/src/BroDoc.cc b/src/BroDoc.cc
index 984bdc90a4..c04cd92eca 100644
--- a/src/BroDoc.cc
+++ b/src/BroDoc.cc
@@ -8,6 +8,9 @@
#include "BroDoc.h"
#include "BroDocObj.h"
#include "util.h"
+#include "plugin/Manager.h"
+#include "analyzer/Manager.h"
+#include "analyzer/Component.h"
BroDoc::BroDoc(const std::string& rel, const std::string& abs)
{
@@ -164,84 +167,77 @@ void BroDoc::SetPacketFilter(const std::string& s)
packet_filter.clear();
}
-void BroDoc::AddPortAnalysis(const std::string& analyzer,
- const std::string& ports)
- {
- std::string reST_string = analyzer + "::\n" + ports + "\n\n";
- port_analysis.push_back(reST_string);
- }
-
void BroDoc::WriteDocFile() const
{
- WriteToDoc(".. Automatically generated. Do not edit.\n\n");
+ WriteToDoc(reST_file, ".. Automatically generated. Do not edit.\n\n");
- WriteToDoc(":tocdepth: 3\n\n");
+ WriteToDoc(reST_file, ":tocdepth: 3\n\n");
- WriteSectionHeading(doc_title.c_str(), '=');
+ WriteSectionHeading(reST_file, doc_title.c_str(), '=');
- WriteStringList(".. bro:namespace:: %s\n", modules);
+ WriteStringList(reST_file, ".. bro:namespace:: %s\n", modules);
- WriteToDoc("\n");
+ WriteToDoc(reST_file, "\n");
- // WriteSectionHeading("Overview", '-');
- WriteStringList("%s\n", summary);
+ // WriteSectionHeading(reST_file, "Overview", '-');
+ WriteStringList(reST_file, "%s\n", summary);
- WriteToDoc("\n");
+ WriteToDoc(reST_file, "\n");
if ( ! modules.empty() )
{
- WriteToDoc(":Namespace%s: ", (modules.size() > 1 ? "s" : ""));
- // WriteStringList(":bro:namespace:`%s`", modules);
- WriteStringList("``%s``, ", "``%s``", modules);
- WriteToDoc("\n");
+ WriteToDoc(reST_file, ":Namespace%s: ", (modules.size() > 1 ? "s" : ""));
+ // WriteStringList(reST_file, ":bro:namespace:`%s`", modules);
+ WriteStringList(reST_file, "``%s``, ", "``%s``", modules);
+ WriteToDoc(reST_file, "\n");
}
if ( ! imports.empty() )
{
- WriteToDoc(":Imports: ");
+ WriteToDoc(reST_file, ":Imports: ");
std::list::const_iterator it;
for ( it = imports.begin(); it != imports.end(); ++it )
{
if ( it != imports.begin() )
- WriteToDoc(", ");
+ WriteToDoc(reST_file, ", ");
string pretty(*it);
size_t pos = pretty.find("/index");
if ( pos != std::string::npos && pos + 6 == pretty.size() )
pretty = pretty.substr(0, pos);
- WriteToDoc(":doc:`%s `", pretty.c_str(), it->c_str());
+ WriteToDoc(reST_file, ":doc:`%s `", pretty.c_str(), it->c_str());
}
- WriteToDoc("\n");
+ WriteToDoc(reST_file, "\n");
}
- WriteToDoc(":Source File: :download:`%s`\n",
+ WriteToDoc(reST_file, ":Source File: :download:`%s`\n",
downloadable_filename.c_str());
- WriteToDoc("\n");
+ WriteToDoc(reST_file, "\n");
WriteInterface("Summary", '~', '#', true, true);
if ( ! notices.empty() )
- WriteBroDocObjList(notices, "Notices", '#');
+ WriteBroDocObjList(reST_file, notices, "Notices", '#');
if ( port_analysis.size() || packet_filter.size() )
- WriteSectionHeading("Configuration Changes", '#');
+ WriteSectionHeading(reST_file, "Configuration Changes", '#');
if ( ! port_analysis.empty() )
{
- WriteSectionHeading("Port Analysis", '^');
- WriteToDoc("Loading this script makes the following changes to "
+ WriteSectionHeading(reST_file, "Port Analysis", '^');
+ WriteToDoc(reST_file, "Loading this script makes the following changes to "
":bro:see:`dpd_config`.\n\n");
- WriteStringList("%s, ", "%s", port_analysis);
+ WriteStringList(reST_file, "%s, ", "%s", port_analysis);
}
if ( ! packet_filter.empty() )
{
- WriteSectionHeading("Packet Filter", '^');
- WriteToDoc("Loading this script makes the following changes to "
+ WriteSectionHeading(reST_file, "Packet Filter", '^');
+ WriteToDoc(reST_file, "Loading this script makes the following changes to "
":bro:see:`capture_filters`.\n\n");
- WriteToDoc("Filters added::\n\n");
- WriteToDoc("%s\n", packet_filter.c_str());
+ WriteToDoc(reST_file, "Filters added::\n\n");
+ WriteToDoc(reST_file, "%s\n", packet_filter.c_str());
}
WriteInterface("Detailed Interface", '~', '#', true, false);
@@ -267,23 +263,23 @@ void BroDoc::WriteDocFile() const
void BroDoc::WriteInterface(const char* heading, char underline,
char sub, bool isPublic, bool isShort) const
{
- WriteSectionHeading(heading, underline);
- WriteBroDocObjList(options, isPublic, "Options", sub, isShort);
- WriteBroDocObjList(constants, isPublic, "Constants", sub, isShort);
- WriteBroDocObjList(state_vars, isPublic, "State Variables", sub, isShort);
- WriteBroDocObjList(types, isPublic, "Types", sub, isShort);
- WriteBroDocObjList(events, isPublic, "Events", sub, isShort);
- WriteBroDocObjList(hooks, isPublic, "Hooks", sub, isShort);
- WriteBroDocObjList(functions, isPublic, "Functions", sub, isShort);
- WriteBroDocObjList(redefs, isPublic, "Redefinitions", sub, isShort);
+ WriteSectionHeading(reST_file, heading, underline);
+ WriteBroDocObjList(reST_file, options, isPublic, "Options", sub, isShort);
+ WriteBroDocObjList(reST_file, constants, isPublic, "Constants", sub, isShort);
+ WriteBroDocObjList(reST_file, state_vars, isPublic, "State Variables", sub, isShort);
+ WriteBroDocObjList(reST_file, types, isPublic, "Types", sub, isShort);
+ WriteBroDocObjList(reST_file, events, isPublic, "Events", sub, isShort);
+ WriteBroDocObjList(reST_file, hooks, isPublic, "Hooks", sub, isShort);
+ WriteBroDocObjList(reST_file, functions, isPublic, "Functions", sub, isShort);
+ WriteBroDocObjList(reST_file, redefs, isPublic, "Redefinitions", sub, isShort);
}
-void BroDoc::WriteStringList(const char* format, const char* last_format,
- const std::list& l) const
+void BroDoc::WriteStringList(FILE* f, const char* format, const char* last_format,
+ const std::list& l)
{
if ( l.empty() )
{
- WriteToDoc("\n");
+ WriteToDoc(f, "\n");
return;
}
@@ -292,12 +288,12 @@ void BroDoc::WriteStringList(const char* format, const char* last_format,
last--;
for ( it = l.begin(); it != last; ++it )
- WriteToDoc(format, it->c_str());
+ WriteToDoc(f, format, it->c_str());
- WriteToDoc(last_format, last->c_str());
+ WriteToDoc(f, last_format, last->c_str());
}
-void BroDoc::WriteBroDocObjTable(const BroDocObjList& l) const
+void BroDoc::WriteBroDocObjTable(FILE* f, const BroDocObjList& l)
{
int max_id_col = 0;
int max_com_col = 0;
@@ -317,38 +313,38 @@ void BroDoc::WriteBroDocObjTable(const BroDocObjList& l) const
}
// Start table.
- WriteRepeatedChar('=', max_id_col);
- WriteToDoc(" ");
+ WriteRepeatedChar(f, '=', max_id_col);
+ WriteToDoc(f, " ");
if ( max_com_col == 0 )
- WriteToDoc("=");
+ WriteToDoc(f, "=");
else
- WriteRepeatedChar('=', max_com_col);
+ WriteRepeatedChar(f, '=', max_com_col);
- WriteToDoc("\n");
+ WriteToDoc(f, "\n");
for ( it = l.begin(); it != l.end(); ++it )
{
if ( it != l.begin() )
- WriteToDoc("\n\n");
- (*it)->WriteReSTCompact(reST_file, max_id_col);
+ WriteToDoc(f, "\n\n");
+ (*it)->WriteReSTCompact(f, max_id_col);
}
// End table.
- WriteToDoc("\n");
- WriteRepeatedChar('=', max_id_col);
- WriteToDoc(" ");
+ WriteToDoc(f, "\n");
+ WriteRepeatedChar(f, '=', max_id_col);
+ WriteToDoc(f, " ");
if ( max_com_col == 0 )
- WriteToDoc("=");
+ WriteToDoc(f, "=");
else
- WriteRepeatedChar('=', max_com_col);
+ WriteRepeatedChar(f, '=', max_com_col);
- WriteToDoc("\n\n");
+ WriteToDoc(f, "\n\n");
}
-void BroDoc::WriteBroDocObjList(const BroDocObjList& l, bool wantPublic,
- const char* heading, char underline, bool isShort) const
+void BroDoc::WriteBroDocObjList(FILE* f, const BroDocObjList& l, bool wantPublic,
+ const char* heading, char underline, bool isShort)
{
if ( l.empty() )
return;
@@ -366,7 +362,7 @@ void BroDoc::WriteBroDocObjList(const BroDocObjList& l, bool wantPublic,
if ( it == l.end() )
return;
- WriteSectionHeading(heading, underline);
+ WriteSectionHeading(f, heading, underline);
BroDocObjList filtered_list;
@@ -377,13 +373,13 @@ void BroDoc::WriteBroDocObjList(const BroDocObjList& l, bool wantPublic,
}
if ( isShort )
- WriteBroDocObjTable(filtered_list);
+ WriteBroDocObjTable(f, filtered_list);
else
- WriteBroDocObjList(filtered_list);
+ WriteBroDocObjList(f, filtered_list);
}
-void BroDoc::WriteBroDocObjList(const BroDocObjMap& m, bool wantPublic,
- const char* heading, char underline, bool isShort) const
+void BroDoc::WriteBroDocObjList(FILE* f, const BroDocObjMap& m, bool wantPublic,
+ const char* heading, char underline, bool isShort)
{
BroDocObjMap::const_iterator it;
BroDocObjList l;
@@ -391,24 +387,24 @@ void BroDoc::WriteBroDocObjList(const BroDocObjMap& m, bool wantPublic,
for ( it = m.begin(); it != m.end(); ++it )
l.push_back(it->second);
- WriteBroDocObjList(l, wantPublic, heading, underline, isShort);
+ WriteBroDocObjList(f, l, wantPublic, heading, underline, isShort);
}
-void BroDoc::WriteBroDocObjList(const BroDocObjList& l, const char* heading,
- char underline) const
+void BroDoc::WriteBroDocObjList(FILE* f, const BroDocObjList& l, const char* heading,
+ char underline)
{
- WriteSectionHeading(heading, underline);
- WriteBroDocObjList(l);
+ WriteSectionHeading(f, heading, underline);
+ WriteBroDocObjList(f, l);
}
-void BroDoc::WriteBroDocObjList(const BroDocObjList& l) const
+void BroDoc::WriteBroDocObjList(FILE* f, const BroDocObjList& l)
{
for ( BroDocObjList::const_iterator it = l.begin(); it != l.end(); ++it )
- (*it)->WriteReST(reST_file);
+ (*it)->WriteReST(f);
}
-void BroDoc::WriteBroDocObjList(const BroDocObjMap& m, const char* heading,
- char underline) const
+void BroDoc::WriteBroDocObjList(FILE* f, const BroDocObjMap& m, const char* heading,
+ char underline)
{
BroDocObjMap::const_iterator it;
BroDocObjList l;
@@ -416,28 +412,28 @@ void BroDoc::WriteBroDocObjList(const BroDocObjMap& m, const char* heading,
for ( it = m.begin(); it != m.end(); ++it )
l.push_back(it->second);
- WriteBroDocObjList(l, heading, underline);
+ WriteBroDocObjList(f, l, heading, underline);
}
-void BroDoc::WriteToDoc(const char* format, ...) const
+void BroDoc::WriteToDoc(FILE* f, const char* format, ...)
{
va_list argp;
va_start(argp, format);
- vfprintf(reST_file, format, argp);
+ vfprintf(f, format, argp);
va_end(argp);
}
-void BroDoc::WriteSectionHeading(const char* heading, char underline) const
+void BroDoc::WriteSectionHeading(FILE* f, const char* heading, char underline)
{
- WriteToDoc("%s\n", heading);
- WriteRepeatedChar(underline, strlen(heading));
- WriteToDoc("\n");
+ WriteToDoc(f, "%s\n", heading);
+ WriteRepeatedChar(f, underline, strlen(heading));
+ WriteToDoc(f, "\n");
}
-void BroDoc::WriteRepeatedChar(char c, size_t n) const
+void BroDoc::WriteRepeatedChar(FILE* f, char c, size_t n)
{
for ( size_t i = 0; i < n; ++i )
- WriteToDoc("%c", c);
+ WriteToDoc(f, "%c", c);
}
void BroDoc::FreeBroDocObjPtrList(BroDocObjList& l)
@@ -459,3 +455,143 @@ void BroDoc::AddFunction(BroDocObj* o)
else
functions[o->Name()]->Combine(o);
}
+
+static void WritePluginSectionHeading(FILE* f, const plugin::Plugin* p)
+ {
+ string name = p->Name();
+
+ fprintf(f, "%s\n", name.c_str());
+ for ( size_t i = 0; i < name.size(); ++i )
+ fprintf(f, "-");
+ fprintf(f, "\n\n");
+
+ fprintf(f, "%s\n\n", p->Description());
+ }
+
+static void WriteAnalyzerComponent(FILE* f, const analyzer::Component* c)
+ {
+ EnumType* atag = analyzer_mgr->GetTagEnumType();
+ string tag = fmt("ANALYZER_%s", c->CanonicalName());
+
+ if ( atag->Lookup("Analyzer", tag.c_str()) < 0 )
+ reporter->InternalError("missing analyzer tag for %s", tag.c_str());
+
+ fprintf(f, ":bro:enum:`Analyzer::%s`\n\n", tag.c_str());
+ }
+
+static void WritePluginComponents(FILE* f, const plugin::Plugin* p)
+ {
+ plugin::Plugin::component_list components = p->Components();
+ plugin::Plugin::component_list::const_iterator it;
+
+ fprintf(f, "Components\n");
+ fprintf(f, "++++++++++\n\n");
+
+ for ( it = components.begin(); it != components.end(); ++it )
+ {
+ switch ( (*it)->Type() ) {
+ case plugin::component::ANALYZER:
+ WriteAnalyzerComponent(f,
+ dynamic_cast(*it));
+ break;
+ case plugin::component::READER:
+ reporter->InternalError("docs for READER component unimplemented");
+ case plugin::component::WRITER:
+ reporter->InternalError("docs for WRITER component unimplemented");
+ default:
+ reporter->InternalError("docs for unknown component unimplemented");
+ }
+ }
+ }
+
+static void WritePluginBifItems(FILE* f, const plugin::Plugin* p,
+ plugin::BifItem::Type t, const string& heading)
+ {
+ plugin::Plugin::bif_item_list bifitems = p->BifItems();
+ plugin::Plugin::bif_item_list::iterator it = bifitems.begin();
+
+ while ( it != bifitems.end() )
+ {
+ if ( it->GetType() != t )
+ it = bifitems.erase(it);
+ else
+ ++it;
+ }
+
+ if ( bifitems.empty() )
+ return;
+
+ fprintf(f, "%s\n", heading.c_str());
+ for ( size_t i = 0; i < heading.size(); ++i )
+ fprintf(f, "+");
+ fprintf(f, "\n\n");
+
+ for ( it = bifitems.begin(); it != bifitems.end(); ++it )
+ {
+ BroDocObj* o = doc_ids[it->GetID()];
+
+ if ( o )
+ o->WriteReST(f);
+ else
+ reporter->Warning("No docs for ID: %s\n", it->GetID());
+ }
+ }
+
+static void WriteAnalyzerTagDefn(FILE* f, EnumType* e)
+ {
+ e = new CommentedEnumType(e);
+ e->SetTypeID(copy_string("Analyzer::Tag"));
+
+ ID* dummy_id = new ID(copy_string("Analyzer::Tag"), SCOPE_GLOBAL, true);
+ dummy_id->SetType(e);
+ dummy_id->MakeType();
+
+ list* r = new list();
+ r->push_back("Unique identifiers for protocol analyzers.");
+
+ BroDocObj bdo(dummy_id, r, true);
+
+ bdo.WriteReST(f);
+ }
+
+static bool IsAnalyzerPlugin(const plugin::Plugin* p)
+ {
+ plugin::Plugin::component_list components = p->Components();
+ plugin::Plugin::component_list::const_iterator it;
+
+ for ( it = components.begin(); it != components.end(); ++it )
+ if ( (*it)->Type() != plugin::component::ANALYZER )
+ return false;
+
+ return true;
+ }
+
+void CreateProtoAnalyzerDoc(const char* filename)
+ {
+ FILE* f = fopen(filename, "w");
+
+ fprintf(f, "Protocol Analyzer Reference\n");
+ fprintf(f, "===========================\n\n");
+
+ WriteAnalyzerTagDefn(f, analyzer_mgr->GetTagEnumType());
+
+ plugin::Manager::plugin_list plugins = plugin_mgr->Plugins();
+ plugin::Manager::plugin_list::const_iterator it;
+
+ for ( it = plugins.begin(); it != plugins.end(); ++it )
+ {
+ if ( ! IsAnalyzerPlugin(*it) )
+ continue;
+
+ WritePluginSectionHeading(f, *it);
+ WritePluginComponents(f, *it);
+ WritePluginBifItems(f, *it, plugin::BifItem::CONSTANT,
+ "Options/Constants");
+ WritePluginBifItems(f, *it, plugin::BifItem::GLOBAL, "Globals");
+ WritePluginBifItems(f, *it, plugin::BifItem::TYPE, "Types");
+ WritePluginBifItems(f, *it, plugin::BifItem::EVENT, "Events");
+ WritePluginBifItems(f, *it, plugin::BifItem::FUNCTION, "Functions");
+ }
+
+ fclose(f);
+ }
diff --git a/src/BroDoc.h b/src/BroDoc.h
index 79f02b7110..9f92f821f8 100644
--- a/src/BroDoc.h
+++ b/src/BroDoc.h
@@ -81,15 +81,6 @@ public:
*/
void SetPacketFilter(const std::string& s);
- /**
- * Schedules documentation of a given set of ports being associated
- * with a particular analyzer as a result of the current script
- * being loaded -- the way the "dpd_config" table is changed.
- * @param analyzer An analyzer that changed the "dpd_config" table.
- * @param ports The set of ports assigned to the analyzer in table.
- */
- void AddPortAnalysis(const std::string& analyzer, const std::string& ports);
-
/**
* Schedules documentation of a script option. An option is
* defined as any variable in the script that is declared 'const'
@@ -242,7 +233,115 @@ public:
return reST_filename.c_str();
}
-protected:
+ typedef std::list BroDocObjList;
+ typedef std::map BroDocObjMap;
+
+ /**
+ * Writes out a table of BroDocObj's to the reST document
+ * @param f The file to write to.
+ * @param l A list of BroDocObj pointers
+ */
+ static void WriteBroDocObjTable(FILE* f, const BroDocObjList& l);
+
+ /**
+ * Writes out given number of characters to reST document
+ * @param f The file to write to.
+ * @param c the character to write
+ * @param n the number of characters to write
+ */
+ static void WriteRepeatedChar(FILE* f, char c, size_t n);
+
+ /**
+ * A wrapper to fprintf() that always uses the reST document
+ * for the FILE* argument.
+ * @param f The file to write to.
+ * @param format A printf style format string.
+ */
+ static void WriteToDoc(FILE* f, const char* format, ...);
+
+ /**
+ * Writes out a list of strings to the reST document.
+ * If the list is empty, prints a newline character.
+ * @param f The file to write to.
+ * @param format A printf style format string for elements of the list
+ * except for the last one in the list
+ * @param last_format A printf style format string to use for the last
+ * element of the list
+ * @param l A reference to a list of strings
+ */
+ static void WriteStringList(FILE* f, const char* format, const char* last_format,
+ const std::list& l);
+
+ /**
+ * @see WriteStringList(FILE* f, const char*, const char*,
+ * const std::list&>)
+ */
+ static void WriteStringList(FILE* f, const char* format,
+ const std::list& l){
+ WriteStringList(f, format, format, l);
+ }
+
+ /**
+ * Writes out a list of BroDocObj objects to the reST document
+ * @param f The file to write to.
+ * @param l A list of BroDocObj pointers
+ * @param wantPublic If true, filter out objects that are not declared
+ * in the global scope. If false, filter out those that are in
+ * the global scope.
+ * @param heading The title of the section to create in the reST doc.
+ * @param underline The character to use to underline the reST
+ * section heading.
+ * @param isShort Whether to write the full documentation or a "short"
+ * version (a single sentence)
+ */
+ static void WriteBroDocObjList(FILE* f, const BroDocObjList& l, bool wantPublic,
+ const char* heading, char underline,
+ bool isShort);
+
+ /**
+ * Wraps the BroDocObjMap into a BroDocObjList and the writes that list
+ * to the reST document
+ * @see WriteBroDocObjList(FILE* f, const BroDocObjList&, bool, const char*, char,
+ bool)
+ */
+ static void WriteBroDocObjList(FILE* f, const BroDocObjMap& m, bool wantPublic,
+ const char* heading, char underline,
+ bool isShort);
+
+ /**
+ * Writes out a list of BroDocObj objects to the reST document
+ * @param l A list of BroDocObj pointers
+ * @param heading The title of the section to create in the reST doc.
+ * @param underline The character to use to underline the reST
+ * section heading.
+ */
+ static void WriteBroDocObjList(FILE* f, const BroDocObjList& l, const char* heading,
+ char underline);
+
+ /**
+ * Writes out a list of BroDocObj objects to the reST document
+ * @param l A list of BroDocObj pointers
+ */
+ static void WriteBroDocObjList(FILE* f, const BroDocObjList& l);
+
+ /**
+ * Wraps the BroDocObjMap into a BroDocObjList and the writes that list
+ * to the reST document
+ * @see WriteBroDocObjList(FILE* f, const BroDocObjList&, const char*, char)
+ */
+ static void WriteBroDocObjList(FILE* f, const BroDocObjMap& m, const char* heading,
+ char underline);
+
+ /**
+ * Writes out a reST section heading
+ * @param f The file to write to.
+ * @param heading The title of the heading to create
+ * @param underline The character to use to underline the section title
+ * within the reST document
+ */
+ static void WriteSectionHeading(FILE* f, const char* heading, char underline);
+
+private:
FILE* reST_file;
std::string reST_filename;
std::string source_filename; // points to the basename of source file
@@ -255,9 +354,6 @@ protected:
std::list imports;
std::list port_analysis;
- typedef std::list BroDocObjList;
- typedef std::map BroDocObjMap;
-
BroDocObjList options;
BroDocObjList constants;
BroDocObjList state_vars;
@@ -272,107 +368,6 @@ protected:
BroDocObjList all;
- /**
- * Writes out a list of strings to the reST document.
- * If the list is empty, prints a newline character.
- * @param format A printf style format string for elements of the list
- * except for the last one in the list
- * @param last_format A printf style format string to use for the last
- * element of the list
- * @param l A reference to a list of strings
- */
- void WriteStringList(const char* format, const char* last_format,
- const std::list& l) const;
-
- /**
- * @see WriteStringList(const char*, const char*,
- * const std::list&>)
- */
- void WriteStringList(const char* format,
- const std::list& l) const
- {
- WriteStringList(format, format, l);
- }
-
-
- /**
- * Writes out a table of BroDocObj's to the reST document
- * @param l A list of BroDocObj pointers
- */
- void WriteBroDocObjTable(const BroDocObjList& l) const;
-
- /**
- * Writes out a list of BroDocObj objects to the reST document
- * @param l A list of BroDocObj pointers
- * @param wantPublic If true, filter out objects that are not declared
- * in the global scope. If false, filter out those that are in
- * the global scope.
- * @param heading The title of the section to create in the reST doc.
- * @param underline The character to use to underline the reST
- * section heading.
- * @param isShort Whether to write the full documentation or a "short"
- * version (a single sentence)
- */
- void WriteBroDocObjList(const BroDocObjList& l, bool wantPublic,
- const char* heading, char underline,
- bool isShort) const;
-
- /**
- * Wraps the BroDocObjMap into a BroDocObjList and the writes that list
- * to the reST document
- * @see WriteBroDocObjList(const BroDocObjList&, bool, const char*, char,
- bool)
- */
- void WriteBroDocObjList(const BroDocObjMap& m, bool wantPublic,
- const char* heading, char underline,
- bool isShort) const;
-
- /**
- * Writes out a list of BroDocObj objects to the reST document
- * @param l A list of BroDocObj pointers
- * @param heading The title of the section to create in the reST doc.
- * @param underline The character to use to underline the reST
- * section heading.
- */
- void WriteBroDocObjList(const BroDocObjList& l, const char* heading,
- char underline) const;
-
- /**
- * Writes out a list of BroDocObj objects to the reST document
- * @param l A list of BroDocObj pointers
- */
- void WriteBroDocObjList(const BroDocObjList& l) const;
-
- /**
- * Wraps the BroDocObjMap into a BroDocObjList and the writes that list
- * to the reST document
- * @see WriteBroDocObjList(const BroDocObjList&, const char*, char)
- */
- void WriteBroDocObjList(const BroDocObjMap& m, const char* heading,
- char underline) const;
-
- /**
- * A wrapper to fprintf() that always uses the reST document
- * for the FILE* argument.
- * @param format A printf style format string.
- */
- void WriteToDoc(const char* format, ...) const;
-
- /**
- * Writes out a reST section heading
- * @param heading The title of the heading to create
- * @param underline The character to use to underline the section title
- * within the reST document
- */
- void WriteSectionHeading(const char* heading, char underline) const;
-
- /**
- * Writes out given number of characters to reST document
- * @param c the character to write
- * @param n the number of characters to write
- */
- void WriteRepeatedChar(char c, size_t n) const;
-
/**
* Writes out the reST for either the script's public or private interface
* @param heading The title of the interfaces section heading
@@ -387,7 +382,6 @@ protected:
*/
void WriteInterface(const char* heading, char underline, char subunderline,
bool isPublic, bool isShort) const;
-private:
/**
* Frees memory allocated to BroDocObj's objects in a given list.
@@ -413,4 +407,10 @@ private:
};
};
+/**
+ * Writes out plugin index documentation for all analyzer plugins.
+ * @param filename the name of the file to write.
+ */
+void CreateProtoAnalyzerDoc(const char* filename);
+
#endif
diff --git a/src/BroDocObj.cc b/src/BroDocObj.cc
index 12753ea15d..4316b3113a 100644
--- a/src/BroDocObj.cc
+++ b/src/BroDocObj.cc
@@ -4,6 +4,8 @@
#include "ID.h"
#include "BroDocObj.h"
+map doc_ids = map();
+
BroDocObj* BroDocObj::last = 0;
BroDocObj::BroDocObj(const ID* id, std::list*& reST,
@@ -16,6 +18,7 @@ BroDocObj::BroDocObj(const ID* id, std::list*& reST,
is_fake_id = is_fake;
use_role = 0;
FormulateShortDesc();
+ doc_ids[id->Name()] = this;
}
BroDocObj::~BroDocObj()
diff --git a/src/BroDocObj.h b/src/BroDocObj.h
index cb512f8cda..ab42dc3c94 100644
--- a/src/BroDocObj.h
+++ b/src/BroDocObj.h
@@ -4,6 +4,7 @@
#include
#include
#include
+#include