Page MenuHomePhabricator (Chris)

No OneTemporary

Size
4 MB
Referenced Files
None
Subscribers
None
This file is larger than 256 KB, so syntax highlighting was skipped.
diff --git a/mongodb-1.3.4/scripts/presets/standalone-24.json b/mongodb-1.3.4/scripts/presets/standalone-24.json
deleted file mode 100644
index 4c247864..00000000
--- a/mongodb-1.3.4/scripts/presets/standalone-24.json
+++ /dev/null
@@ -1,15 +0,0 @@
-{
- "name": "mongod",
- "id" : "STANDALONE_24",
- "procParams": {
- "dbpath": "/tmp/standalone-24/",
- "ipv6": true,
- "logappend": true,
- "logpath": "/tmp/standalone-24/mongod.log",
- "journal": true,
- "port": 2500,
- "setParameter": {"enableTestCommands": 1}
- },
- "version": "24-release"
-}
-
diff --git a/mongodb-1.3.4/scripts/presets/standalone-26.json b/mongodb-1.3.4/scripts/presets/standalone-26.json
deleted file mode 100644
index 62ead849..00000000
--- a/mongodb-1.3.4/scripts/presets/standalone-26.json
+++ /dev/null
@@ -1,15 +0,0 @@
-{
- "name": "mongod",
- "id" : "STANDALONE_26",
- "procParams": {
- "dbpath": "/tmp/standalone-26/",
- "ipv6": true,
- "logappend": true,
- "logpath": "/tmp/standalone-26/mongod.log",
- "journal": true,
- "port": 2600,
- "setParameter": {"enableTestCommands": 1}
- },
- "version": "26-release"
-}
-
diff --git a/mongodb-1.3.4/scripts/ubuntu/mongo-orchestration.sh b/mongodb-1.3.4/scripts/ubuntu/mongo-orchestration.sh
deleted file mode 100644
index 72d014af..00000000
--- a/mongodb-1.3.4/scripts/ubuntu/mongo-orchestration.sh
+++ /dev/null
@@ -1,33 +0,0 @@
-# Enable MongoDB Enterprise repo
-apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 7F0CEB10
-# 3.2 key
-apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv EA312927
-echo 'deb http://repo.mongodb.com/apt/ubuntu precise/mongodb-enterprise/3.2 multiverse' | sudo tee /etc/apt/sources.list.d/mongodb-enterprise-3.2.list
-echo 'deb http://repo.mongodb.com/apt/ubuntu precise/mongodb-enterprise/3.0 multiverse' | sudo tee /etc/apt/sources.list.d/mongodb-enterprise-3.0.list
-echo 'deb http://repo.mongodb.com/apt/ubuntu precise/mongodb-enterprise/2.6 multiverse' | sudo tee /etc/apt/sources.list.d/mongodb-enterprise-2.6.list
-echo 'deb http://repo.mongodb.com/apt/ubuntu precise/mongodb-enterprise/2.4 multiverse' | sudo tee /etc/apt/sources.list.d/mongodb-enterprise-2.4.list
-apt-get update
-
-apt-get install -y libsnmp15 libgsasl7
-
-sudo apt-get download mongodb-enterprise-server=3.2.0
-sudo apt-get download mongodb-enterprise-mongos=3.2.0
-sudo apt-get download mongodb-enterprise-server=3.0.3
-sudo apt-get download mongodb-enterprise-server=2.6.9
-sudo apt-get download mongodb-10gen-enterprise=2.4.13
-dpkg -x mongodb-10gen-enterprise_2.4.13_amd64.deb 2.4.13
-dpkg -x mongodb-enterprise-server_2.6.9_amd64.deb 2.6.9
-dpkg -x mongodb-enterprise-server_3.0.3_amd64.deb 3.0.3
-dpkg -x mongodb-enterprise-server_3.2.0_amd64.deb 3.2.0
-dpkg -x mongodb-enterprise-mongos_3.2.0_amd64.deb 3.2.0
-
-
-
-# Python stuff for mongo-orchestration
-apt-get install -y python python-dev python-pip
-
-pip install --upgrade 'git+https://github.com/10gen/mongo-orchestration.git#egg=mongo_orchestration'
-
-# Launch mongo-orchestration
-mongo-orchestration -f mongo-orchestration-config.json -b 192.168.112.10 --enable-majority-read-concern start
-
diff --git a/mongodb-1.3.4/src/libbson/VERSION_CURRENT b/mongodb-1.3.4/src/libbson/VERSION_CURRENT
deleted file mode 100644
index 0bfbd573..00000000
--- a/mongodb-1.3.4/src/libbson/VERSION_CURRENT
+++ /dev/null
@@ -1 +0,0 @@
-1.8.2
\ No newline at end of file
diff --git a/mongodb-1.3.4/src/libbson/VERSION_RELEASED b/mongodb-1.3.4/src/libbson/VERSION_RELEASED
deleted file mode 100644
index 0bfbd573..00000000
--- a/mongodb-1.3.4/src/libbson/VERSION_RELEASED
+++ /dev/null
@@ -1 +0,0 @@
-1.8.2
\ No newline at end of file
diff --git a/mongodb-1.3.4/src/libmongoc/VERSION_CURRENT b/mongodb-1.3.4/src/libmongoc/VERSION_CURRENT
deleted file mode 100644
index 0bfbd573..00000000
--- a/mongodb-1.3.4/src/libmongoc/VERSION_CURRENT
+++ /dev/null
@@ -1 +0,0 @@
-1.8.2
\ No newline at end of file
diff --git a/mongodb-1.3.4/src/libmongoc/VERSION_RELEASED b/mongodb-1.3.4/src/libmongoc/VERSION_RELEASED
deleted file mode 100644
index 0bfbd573..00000000
--- a/mongodb-1.3.4/src/libmongoc/VERSION_RELEASED
+++ /dev/null
@@ -1 +0,0 @@
-1.8.2
\ No newline at end of file
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-cmd.c b/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-cmd.c
deleted file mode 100644
index 8e06982a..00000000
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-cmd.c
+++ /dev/null
@@ -1,336 +0,0 @@
-/*
- * Copyright 2017 MongoDB, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-#include "mongoc-cmd-private.h"
-#include "mongoc-read-prefs-private.h"
-#include "mongoc-trace-private.h"
-#include "mongoc-client-private.h"
-#include "mongoc-write-concern-private.h"
-/* For strcasecmp on Windows */
-#include "mongoc-util-private.h"
-
-
-void
-mongoc_cmd_parts_init (mongoc_cmd_parts_t *parts,
- const char *db_name,
- mongoc_query_flags_t user_query_flags,
- const bson_t *command_body)
-{
- parts->body = command_body;
- parts->user_query_flags = user_query_flags;
- parts->read_prefs = NULL;
- parts->is_write_command = false;
- bson_init (&parts->extra);
- bson_init (&parts->assembled_body);
-
- parts->assembled.db_name = db_name;
- parts->assembled.command = NULL;
- parts->assembled.query_flags = MONGOC_QUERY_NONE;
-}
-
-
-/*
- *--------------------------------------------------------------------------
- *
- * mongoc_cmd_parts_append_opts --
- *
- * Take an iterator over user-supplied options document and append the
- * options to @parts->command_extra, taking the selected server's max
- * wire version into account.
- *
- * Return:
- * True if the options were successfully applied. If any options are
- * invalid, returns false and fills out @error. In that case @parts is
- * invalid and must not be used.
- *
- * Side effects:
- * May partly apply options before returning an error.
- *
- *--------------------------------------------------------------------------
- */
-
-bool
-mongoc_cmd_parts_append_opts (mongoc_cmd_parts_t *parts,
- bson_iter_t *iter,
- int max_wire_version,
- bson_error_t *error)
-{
- bool is_fam;
-
- ENTRY;
-
- /* not yet assembled */
- BSON_ASSERT (!parts->assembled.command);
-
- is_fam =
- !strcasecmp (_mongoc_get_command_name (parts->body), "findandmodify");
-
- while (bson_iter_next (iter)) {
- if (BSON_ITER_IS_KEY (iter, "collation")) {
- if (max_wire_version < WIRE_VERSION_COLLATION) {
- bson_set_error (error,
- MONGOC_ERROR_COMMAND,
- MONGOC_ERROR_PROTOCOL_BAD_WIRE_VERSION,
- "The selected server does not support collation");
- RETURN (false);
- }
-
- } else if (BSON_ITER_IS_KEY (iter, "writeConcern")) {
- if (!_mongoc_write_concern_iter_is_valid (iter)) {
- bson_set_error (error,
- MONGOC_ERROR_COMMAND,
- MONGOC_ERROR_COMMAND_INVALID_ARG,
- "Invalid writeConcern");
- RETURN (false);
- }
-
- if ((is_fam && max_wire_version < WIRE_VERSION_FAM_WRITE_CONCERN) ||
- (!is_fam && max_wire_version < WIRE_VERSION_CMD_WRITE_CONCERN)) {
- continue;
- }
-
- } else if (BSON_ITER_IS_KEY (iter, "readConcern")) {
- if (max_wire_version < WIRE_VERSION_READ_CONCERN) {
- bson_set_error (error,
- MONGOC_ERROR_COMMAND,
- MONGOC_ERROR_PROTOCOL_BAD_WIRE_VERSION,
- "The selected server does not support readConcern");
- RETURN (false);
- }
- } else if (BSON_ITER_IS_KEY (iter, "serverId")) {
- continue;
- }
-
- bson_append_iter (&parts->extra, bson_iter_key (iter), -1, iter);
- }
-
- RETURN (true);
-}
-
-
-/* Update result with the read prefs, following Server Selection Spec.
- * The driver must have discovered the server is a mongos.
- */
-static void
-_cmd_parts_apply_read_preferences_mongos (mongoc_cmd_parts_t *parts)
-{
- mongoc_read_mode_t mode;
- const bson_t *tags = NULL;
- bson_t child;
- const char *mode_str;
- int64_t stale;
-
- mode = mongoc_read_prefs_get_mode (parts->read_prefs);
- if (parts->read_prefs) {
- tags = mongoc_read_prefs_get_tags (parts->read_prefs);
- }
-
- /* Server Selection Spec says:
- *
- * For mode 'primary', drivers MUST NOT set the slaveOK wire protocol flag
- * and MUST NOT use $readPreference
- *
- * For mode 'secondary', drivers MUST set the slaveOK wire protocol flag and
- * MUST also use $readPreference
- *
- * For mode 'primaryPreferred', drivers MUST set the slaveOK wire protocol
- * flag and MUST also use $readPreference
- *
- * For mode 'secondaryPreferred', drivers MUST set the slaveOK wire protocol
- * flag. If the read preference contains a non-empty tag_sets parameter,
- * drivers MUST use $readPreference; otherwise, drivers MUST NOT use
- * $readPreference
- *
- * For mode 'nearest', drivers MUST set the slaveOK wire protocol flag and
- * MUST also use $readPreference
- */
- if (mode == MONGOC_READ_SECONDARY_PREFERRED && bson_empty0 (tags)) {
- parts->assembled.query_flags |= MONGOC_QUERY_SLAVE_OK;
- } else if (mode != MONGOC_READ_PRIMARY) {
- parts->assembled.query_flags |= MONGOC_QUERY_SLAVE_OK;
-
- /* Server Selection Spec: "When any $ modifier is used, including the
- * $readPreference modifier, the query MUST be provided using the $query
- * modifier".
- *
- * This applies to commands, too.
- */
-
- if (bson_has_field (parts->body, "$query")) {
- bson_concat (&parts->assembled_body, parts->body);
- } else {
- bson_append_document (
- &parts->assembled_body, "$query", 6, parts->body);
- }
-
- bson_append_document_begin (
- &parts->assembled_body, "$readPreference", 15, &child);
-
- mode_str = _mongoc_read_mode_as_str (mode);
- bson_append_utf8 (&child, "mode", 4, mode_str, -1);
- if (!bson_empty0 (tags)) {
- bson_append_array (&child, "tags", 4, tags);
- }
-
- stale = mongoc_read_prefs_get_max_staleness_seconds (parts->read_prefs);
- if (stale != MONGOC_NO_MAX_STALENESS) {
- bson_append_int64 (&child, "maxStalenessSeconds", 19, stale);
- }
-
- bson_append_document_end (&parts->assembled_body, &child);
- parts->assembled.command = &parts->assembled_body;
- }
-}
-
-
-/*
- *--------------------------------------------------------------------------
- *
- * mongoc_cmd_parts_assemble --
- *
- * Assemble the command body, options, and read preference into one
- * command.
- *
- * Side effects:
- * Sets @parts->command_ptr and @parts->query_flags. Concatenates
- * @parts->body and @parts->command_extra into @parts->assembled if
- * needed.
- *
- *--------------------------------------------------------------------------
- */
-
-void
-mongoc_cmd_parts_assemble (mongoc_cmd_parts_t *parts,
- const mongoc_server_stream_t *server_stream)
-{
- mongoc_server_description_type_t server_type;
-
- ENTRY;
-
- BSON_ASSERT (parts);
- BSON_ASSERT (server_stream);
-
- server_type = server_stream->sd->type;
-
- /* must not be assembled already */
- BSON_ASSERT (!parts->assembled.command);
- BSON_ASSERT (bson_empty (&parts->assembled_body));
-
- /* begin with raw flags/cmd as assembled flags/cmd, might change below */
- parts->assembled.command = parts->body;
- parts->assembled.query_flags = parts->user_query_flags;
- parts->assembled.server_id = server_stream->sd->id;
-
- if (!parts->is_write_command) {
- switch (server_stream->topology_type) {
- case MONGOC_TOPOLOGY_SINGLE:
- if (server_type == MONGOC_SERVER_MONGOS) {
- _cmd_parts_apply_read_preferences_mongos (parts);
- } else {
- /* Server Selection Spec: for topology type single and server types
- * besides mongos, "clients MUST always set the slaveOK wire
- * protocol flag on reads to ensure that any server type can handle
- * the request."
- */
- parts->assembled.query_flags |= MONGOC_QUERY_SLAVE_OK;
- }
-
- break;
-
- case MONGOC_TOPOLOGY_RS_NO_PRIMARY:
- case MONGOC_TOPOLOGY_RS_WITH_PRIMARY:
- /* Server Selection Spec: for RS topology types, "For all read
- * preferences modes except primary, clients MUST set the slaveOK wire
- * protocol flag to ensure that any suitable server can handle the
- * request. Clients MUST NOT set the slaveOK wire protocol flag if the
- * read preference mode is primary.
- */
- if (parts->read_prefs &&
- parts->read_prefs->mode != MONGOC_READ_PRIMARY) {
- parts->assembled.query_flags |= MONGOC_QUERY_SLAVE_OK;
- }
-
- break;
-
- case MONGOC_TOPOLOGY_SHARDED:
- _cmd_parts_apply_read_preferences_mongos (parts);
- break;
-
- case MONGOC_TOPOLOGY_UNKNOWN:
- case MONGOC_TOPOLOGY_DESCRIPTION_TYPES:
- default:
- /* must not call mongoc_cmd_parts_assemble w/ unknown topology type */
- BSON_ASSERT (false);
- }
- } /* if (!parts->is_write_command) */
-
- if (!bson_empty (&parts->extra)) {
- /* Did we already copy the command body? */
- if (parts->assembled.command == parts->body) {
- bson_concat (&parts->assembled_body, parts->body);
- bson_concat (&parts->assembled_body, &parts->extra);
- parts->assembled.command = &parts->assembled_body;
- }
- }
-
- EXIT;
-}
-
-/*
- *--------------------------------------------------------------------------
- *
- * mongoc_cmd_parts_assemble_simple --
- *
- * Sets @parts->assembled.command and @parts->query_flags, without
- * applying any server-specific logic.
- *
- *--------------------------------------------------------------------------
- */
-
-void
-mongoc_cmd_parts_assemble_simple (mongoc_cmd_parts_t *parts, uint32_t server_id)
-{
- /* must not be assembled already, must have no options set */
- BSON_ASSERT (!parts->assembled.command);
- BSON_ASSERT (bson_empty (&parts->assembled_body));
- BSON_ASSERT (bson_empty (&parts->extra));
-
- parts->assembled.query_flags = parts->user_query_flags;
- parts->assembled.command = parts->body;
- parts->assembled.server_id = server_id;
-}
-
-
-/*
- *--------------------------------------------------------------------------
- *
- * mongoc_cmd_parts_cleanup --
- *
- * Free memory associated with a stack-allocated mongoc_cmd_parts_t.
- *
- * Side effects:
- * None.
- *
- *--------------------------------------------------------------------------
- */
-
-void
-mongoc_cmd_parts_cleanup (mongoc_cmd_parts_t *parts)
-{
- bson_destroy (&parts->extra);
- bson_destroy (&parts->assembled_body);
-}
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-write-command.c b/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-write-command.c
deleted file mode 100644
index d8e0056d..00000000
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-write-command.c
+++ /dev/null
@@ -1,2027 +0,0 @@
-/*
- * Copyright 2014 MongoDB, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <bson.h>
-
-#include "mongoc-client-private.h"
-#include "mongoc-error.h"
-#include "mongoc-trace-private.h"
-#include "mongoc-write-command-private.h"
-#include "mongoc-write-concern-private.h"
-#include "mongoc-util-private.h"
-
-
-/*
- * TODO:
- *
- * - Remove error parameter to ops, favor result->error.
- */
-
-#define WRITE_CONCERN_DOC(wc) \
- (wc) ? (_mongoc_write_concern_get_bson ((mongoc_write_concern_t *) (wc))) \
- : (&gEmptyWriteConcern)
-
-typedef void (*mongoc_write_op_t) (mongoc_write_command_t *command,
- mongoc_client_t *client,
- mongoc_server_stream_t *server_stream,
- const char *database,
- const char *collection,
- const mongoc_write_concern_t *write_concern,
- uint32_t offset,
- mongoc_write_result_t *result,
- bson_error_t *error);
-
-
-static bson_t gEmptyWriteConcern = BSON_INITIALIZER;
-
-/* indexed by MONGOC_WRITE_COMMAND_DELETE, INSERT, UPDATE */
-static const char *gCommandNames[] = {"delete", "insert", "update"};
-static const char *gCommandFields[] = {"deletes", "documents", "updates"};
-static const uint32_t gCommandFieldLens[] = {7, 9, 7};
-
-static int32_t
-_mongoc_write_result_merge_arrays (uint32_t offset,
- mongoc_write_result_t *result,
- bson_t *dest,
- bson_iter_t *iter);
-
-static bool
-_is_duplicate_key_error (int32_t code)
-{
- return code == 11000 || code == 16460 || /* see SERVER-11493 */
- code == 11001 || /* duplicate key for updates before 2.6 */
- code == 12582; /* mongos before 2.6 */
-}
-
-
-void
-_mongoc_write_command_insert_append (mongoc_write_command_t *command,
- const bson_t *document)
-{
- const char *key;
- bson_iter_t iter;
- bson_oid_t oid;
- bson_t tmp;
- char keydata[16];
-
- ENTRY;
-
- BSON_ASSERT (command);
- BSON_ASSERT (command->type == MONGOC_WRITE_COMMAND_INSERT);
- BSON_ASSERT (document);
- BSON_ASSERT (document->len >= 5);
-
- key = NULL;
- bson_uint32_to_string (command->n_documents, &key, keydata, sizeof keydata);
-
- BSON_ASSERT (key);
-
- /*
- * If the document does not contain an "_id" field, we need to generate
- * a new oid for "_id".
- */
- if (!bson_iter_init_find (&iter, document, "_id")) {
- bson_init (&tmp);
- bson_oid_init (&oid, NULL);
- BSON_APPEND_OID (&tmp, "_id", &oid);
- bson_concat (&tmp, document);
- BSON_APPEND_DOCUMENT (command->documents, key, &tmp);
- bson_destroy (&tmp);
- } else {
- BSON_APPEND_DOCUMENT (command->documents, key, document);
- }
-
- command->n_documents++;
-
- EXIT;
-}
-
-void
-_mongoc_write_command_update_append (mongoc_write_command_t *command,
- const bson_t *selector,
- const bson_t *update,
- const bson_t *opts)
-{
- const char *key;
- char keydata[16];
- bson_t doc;
-
- ENTRY;
-
- BSON_ASSERT (command);
- BSON_ASSERT (command->type == MONGOC_WRITE_COMMAND_UPDATE);
- BSON_ASSERT (selector && update);
-
- bson_init (&doc);
- BSON_APPEND_DOCUMENT (&doc, "q", selector);
- BSON_APPEND_DOCUMENT (&doc, "u", update);
- if (opts) {
- bson_concat (&doc, opts);
- command->flags.has_collation |= bson_has_field (opts, "collation");
- }
-
- key = NULL;
- bson_uint32_to_string (command->n_documents, &key, keydata, sizeof keydata);
- BSON_ASSERT (key);
- BSON_APPEND_DOCUMENT (command->documents, key, &doc);
- command->n_documents++;
-
- bson_destroy (&doc);
-
- EXIT;
-}
-
-void
-_mongoc_write_command_delete_append (mongoc_write_command_t *command,
- const bson_t *selector,
- const bson_t *opts)
-{
- const char *key;
- char keydata[16];
- bson_t doc;
-
- ENTRY;
-
- BSON_ASSERT (command);
- BSON_ASSERT (command->type == MONGOC_WRITE_COMMAND_DELETE);
- BSON_ASSERT (selector);
-
- BSON_ASSERT (selector->len >= 5);
-
- bson_init (&doc);
- BSON_APPEND_DOCUMENT (&doc, "q", selector);
- if (opts) {
- bson_concat (&doc, opts);
- command->flags.has_collation |= bson_has_field (opts, "collation");
- }
-
- key = NULL;
- bson_uint32_to_string (command->n_documents, &key, keydata, sizeof keydata);
- BSON_ASSERT (key);
- BSON_APPEND_DOCUMENT (command->documents, key, &doc);
- command->n_documents++;
-
- bson_destroy (&doc);
-
- EXIT;
-}
-
-void
-_mongoc_write_command_init_insert (mongoc_write_command_t *command, /* IN */
- const bson_t *document, /* IN */
- mongoc_bulk_write_flags_t flags, /* IN */
- int64_t operation_id, /* IN */
- bool allow_bulk_op_insert) /* IN */
-{
- ENTRY;
-
- BSON_ASSERT (command);
-
- command->type = MONGOC_WRITE_COMMAND_INSERT;
- command->documents = bson_new ();
- command->n_documents = 0;
- command->flags = flags;
- command->u.insert.allow_bulk_op_insert = (uint8_t) allow_bulk_op_insert;
- command->operation_id = operation_id;
-
- /* must handle NULL document from mongoc_collection_insert_bulk */
- if (document) {
- _mongoc_write_command_insert_append (command, document);
- }
-
- EXIT;
-}
-
-
-void
-_mongoc_write_command_init_delete (mongoc_write_command_t *command, /* IN */
- const bson_t *selector, /* IN */
- const bson_t *opts, /* IN */
- mongoc_bulk_write_flags_t flags, /* IN */
- int64_t operation_id) /* IN */
-{
- ENTRY;
-
- BSON_ASSERT (command);
- BSON_ASSERT (selector);
-
- command->type = MONGOC_WRITE_COMMAND_DELETE;
- command->documents = bson_new ();
- command->n_documents = 0;
- command->flags = flags;
- command->operation_id = operation_id;
-
- _mongoc_write_command_delete_append (command, selector, opts);
-
- EXIT;
-}
-
-
-void
-_mongoc_write_command_init_update (mongoc_write_command_t *command, /* IN */
- const bson_t *selector, /* IN */
- const bson_t *update, /* IN */
- const bson_t *opts, /* IN */
- mongoc_bulk_write_flags_t flags, /* IN */
- int64_t operation_id) /* IN */
-{
- ENTRY;
-
- BSON_ASSERT (command);
- BSON_ASSERT (selector);
- BSON_ASSERT (update);
-
- command->type = MONGOC_WRITE_COMMAND_UPDATE;
- command->documents = bson_new ();
- command->n_documents = 0;
- command->flags = flags;
- command->operation_id = operation_id;
-
- _mongoc_write_command_update_append (command, selector, update, opts);
-
- EXIT;
-}
-
-
-/* takes initialized bson_t *doc and begins formatting a write command */
-static void
-_mongoc_write_command_init (bson_t *doc,
- mongoc_write_command_t *command,
- const char *collection,
- const mongoc_write_concern_t *write_concern)
-{
- bson_iter_t iter;
-
- ENTRY;
-
- if (!command->n_documents || !bson_iter_init (&iter, command->documents) ||
- !bson_iter_next (&iter)) {
- EXIT;
- }
-
- BSON_APPEND_UTF8 (doc, gCommandNames[command->type], collection);
- BSON_APPEND_DOCUMENT (
- doc, "writeConcern", WRITE_CONCERN_DOC (write_concern));
- BSON_APPEND_BOOL (doc, "ordered", command->flags.ordered);
-
- if (command->flags.bypass_document_validation !=
- MONGOC_BYPASS_DOCUMENT_VALIDATION_DEFAULT) {
- BSON_APPEND_BOOL (doc,
- "bypassDocumentValidation",
- !!command->flags.bypass_document_validation);
- }
-
- EXIT;
-}
-
-
-static void
-_mongoc_monitor_legacy_write (mongoc_client_t *client,
- mongoc_write_command_t *command,
- const char *db,
- const char *collection,
- const mongoc_write_concern_t *write_concern,
- mongoc_server_stream_t *stream,
- int64_t request_id)
-{
- bson_t doc;
- mongoc_apm_command_started_t event;
-
- ENTRY;
-
- if (!client->apm_callbacks.started) {
- EXIT;
- }
-
- bson_init (&doc);
- _mongoc_write_command_init (&doc, command, collection, write_concern);
-
- /* copy the whole documents buffer as e.g. "updates": [...] */
- BSON_APPEND_ARRAY (&doc, gCommandFields[command->type], command->documents);
-
- mongoc_apm_command_started_init (&event,
- &doc,
- db,
- gCommandNames[command->type],
- request_id,
- command->operation_id,
- &stream->sd->host,
- stream->sd->id,
- client->apm_context);
-
- client->apm_callbacks.started (&event);
-
- mongoc_apm_command_started_cleanup (&event);
- bson_destroy (&doc);
-}
-
-
-static void
-append_write_err (bson_t *doc,
- uint32_t code,
- const char *errmsg,
- size_t errmsg_len,
- const bson_t *errinfo)
-{
- bson_t array = BSON_INITIALIZER;
- bson_t child;
-
- BSON_ASSERT (errmsg);
-
- /* writeErrors: [{index: 0, code: code, errmsg: errmsg, errInfo: {...}}] */
- bson_append_document_begin (&array, "0", 1, &child);
- bson_append_int32 (&child, "index", 5, 0);
- bson_append_int32 (&child, "code", 4, (int32_t) code);
- bson_append_utf8 (&child, "errmsg", 6, errmsg, (int) errmsg_len);
- if (errinfo) {
- bson_append_document (&child, "errInfo", 7, errinfo);
- }
-
- bson_append_document_end (&array, &child);
- bson_append_array (doc, "writeErrors", 11, &array);
-
- bson_destroy (&array);
-}
-
-
-static void
-append_write_concern_err (bson_t *doc, const char *errmsg, size_t errmsg_len)
-{
- bson_t array = BSON_INITIALIZER;
- bson_t child;
- bson_t errinfo;
-
- BSON_ASSERT (errmsg);
-
- /* writeConcernErrors: [{code: 64,
- * errmsg: errmsg,
- * errInfo: {wtimeout: true}}] */
- bson_append_document_begin (&array, "0", 1, &child);
- bson_append_int32 (&child, "code", 4, 64);
- bson_append_utf8 (&child, "errmsg", 6, errmsg, (int) errmsg_len);
- bson_append_document_begin (&child, "errInfo", 7, &errinfo);
- bson_append_bool (&errinfo, "wtimeout", 8, true);
- bson_append_document_end (&child, &errinfo);
- bson_append_document_end (&array, &child);
- bson_append_array (doc, "writeConcernErrors", 18, &array);
-
- bson_destroy (&array);
-}
-
-
-static bool
-get_upserted_id (const bson_t *update, bson_value_t *upserted_id)
-{
- bson_iter_t iter;
- bson_iter_t id_iter;
-
- /* Versions of MongoDB before 2.6 don't return the _id for an upsert if _id
- * is not an ObjectId, so find it in the update document's query "q" or
- * update "u". It must be in one or both: if it were in neither the _id
- * would be server-generated, therefore an ObjectId, therefore returned and
- * we wouldn't call this function. If _id is in both the update document
- * *and* the query spec the update document _id takes precedence.
- */
-
- bson_iter_init (&iter, update);
-
- if (bson_iter_find_descendant (&iter, "u._id", &id_iter)) {
- bson_value_copy (bson_iter_value (&id_iter), upserted_id);
- return true;
- } else {
- bson_iter_init (&iter, update);
-
- if (bson_iter_find_descendant (&iter, "q._id", &id_iter)) {
- bson_value_copy (bson_iter_value (&id_iter), upserted_id);
- return true;
- }
- }
-
- /* server bug? */
- return false;
-}
-
-
-static void
-append_upserted (bson_t *doc, const bson_value_t *upserted_id)
-{
- bson_t array = BSON_INITIALIZER;
- bson_t child;
-
- /* append upserted: [{index: 0, _id: upserted_id}]*/
- bson_append_document_begin (&array, "0", 1, &child);
- bson_append_int32 (&child, "index", 5, 0);
- bson_append_value (&child, "_id", 3, upserted_id);
- bson_append_document_end (&array, &child);
-
- bson_append_array (doc, "upserted", 8, &array);
-
- bson_destroy (&array);
-}
-
-
-/* fire command-succeeded event as if we'd used a modern write command.
- * note, cluster.request_id was incremented once for the write, again
- * for the getLastError, so cluster.request_id is no longer valid; used the
- * passed-in request_id instead.
- */
-static void
-_mongoc_monitor_legacy_write_succeeded (mongoc_client_t *client,
- int64_t duration,
- mongoc_write_command_t *command,
- const bson_t *gle,
- mongoc_server_stream_t *stream,
- int64_t request_id)
-{
- bson_iter_t iter;
- bson_t doc;
- int64_t ok = 1;
- int64_t n = 0;
- uint32_t code = 8;
- bool wtimeout = false;
-
- /* server error message */
- const char *errmsg = NULL;
- size_t errmsg_len = 0;
-
- /* server errInfo subdocument */
- bool has_errinfo = false;
- uint32_t len;
- const uint8_t *data;
- bson_t errinfo;
-
- /* server upsertedId value */
- bool has_upserted_id = false;
- bson_value_t upserted_id;
-
- /* server updatedExisting value */
- bool has_updated_existing = false;
- bool updated_existing = false;
-
- mongoc_apm_command_succeeded_t event;
-
- ENTRY;
-
- if (!client->apm_callbacks.succeeded) {
- EXIT;
- }
-
- /* first extract interesting fields from getlasterror response */
- if (gle) {
- bson_iter_init (&iter, gle);
- while (bson_iter_next (&iter)) {
- if (!strcmp (bson_iter_key (&iter), "ok")) {
- ok = bson_iter_as_int64 (&iter);
- } else if (!strcmp (bson_iter_key (&iter), "n")) {
- n = bson_iter_as_int64 (&iter);
- } else if (!strcmp (bson_iter_key (&iter), "code")) {
- code = (uint32_t) bson_iter_as_int64 (&iter);
- if (code == 0) {
- /* server sent non-numeric error code? */
- code = 8;
- }
- } else if (!strcmp (bson_iter_key (&iter), "upserted")) {
- has_upserted_id = true;
- bson_value_copy (bson_iter_value (&iter), &upserted_id);
- } else if (!strcmp (bson_iter_key (&iter), "updatedExisting")) {
- has_updated_existing = true;
- updated_existing = bson_iter_as_bool (&iter);
- } else if ((!strcmp (bson_iter_key (&iter), "err") ||
- !strcmp (bson_iter_key (&iter), "errmsg")) &&
- BSON_ITER_HOLDS_UTF8 (&iter)) {
- errmsg = bson_iter_utf8_unsafe (&iter, &errmsg_len);
- } else if (!strcmp (bson_iter_key (&iter), "errInfo") &&
- BSON_ITER_HOLDS_DOCUMENT (&iter)) {
- bson_iter_document (&iter, &len, &data);
- bson_init_static (&errinfo, data, len);
- has_errinfo = true;
- } else if (!strcmp (bson_iter_key (&iter), "wtimeout")) {
- wtimeout = true;
- }
- }
- }
-
- /* based on PyMongo's _convert_write_result() */
- bson_init (&doc);
- bson_append_int32 (&doc, "ok", 2, (int32_t) ok);
-
- if (errmsg && !wtimeout) {
- /* Failure, but pass to the success callback. Command Monitoring Spec:
- * "Commands that executed on the server and return a status of {ok: 1}
- * are considered successful commands and fire CommandSucceededEvent.
- * Commands that have write errors are included since the actual command
- * did succeed, only writes failed." */
- append_write_err (
- &doc, code, errmsg, errmsg_len, has_errinfo ? &errinfo : NULL);
- } else {
- /* Success, perhaps with a writeConcernError. */
- if (errmsg) {
- append_write_concern_err (&doc, errmsg, errmsg_len);
- }
-
- if (command->type == MONGOC_WRITE_COMMAND_INSERT) {
- /* GLE result for insert is always 0 in most MongoDB versions. */
- n = command->n_documents;
- } else if (command->type == MONGOC_WRITE_COMMAND_UPDATE) {
- if (has_upserted_id) {
- append_upserted (&doc, &upserted_id);
- } else if (has_updated_existing && !updated_existing && n == 1) {
- has_upserted_id =
- get_upserted_id (&command->documents[0], &upserted_id);
-
- if (has_upserted_id) {
- append_upserted (&doc, &upserted_id);
- }
- }
- }
- }
-
- bson_append_int32 (&doc, "n", 1, (int32_t) n);
-
- mongoc_apm_command_succeeded_init (&event,
- duration,
- &doc,
- gCommandNames[command->type],
- request_id,
- command->operation_id,
- &stream->sd->host,
- stream->sd->id,
- client->apm_context);
-
- client->apm_callbacks.succeeded (&event);
-
- mongoc_apm_command_succeeded_cleanup (&event);
- bson_destroy (&doc);
-
- if (has_upserted_id) {
- bson_value_destroy (&upserted_id);
- }
-
- EXIT;
-}
-
-
-/*
- *-------------------------------------------------------------------------
- *
- * too_large_error --
- *
- * Fill a bson_error_t and optional bson_t with error info after
- * receiving a document for bulk insert, update, or remove that is
- * larger than max_bson_size.
- *
- * "err_doc" should be NULL or an empty initialized bson_t.
- *
- * Returns:
- * None.
- *
- * Side effects:
- * "error" and optionally "err_doc" are filled out.
- *
- *-------------------------------------------------------------------------
- */
-
-static void
-too_large_error (bson_error_t *error,
- int32_t idx,
- int32_t len,
- int32_t max_bson_size,
- bson_t *err_doc)
-{
- bson_set_error (error,
- MONGOC_ERROR_BSON,
- MONGOC_ERROR_BSON_INVALID,
- "Document %u is too large for the cluster. "
- "Document is %u bytes, max is %d.",
- idx,
- len,
- max_bson_size);
-
- if (err_doc) {
- BSON_APPEND_INT32 (err_doc, "index", idx);
- BSON_APPEND_UTF8 (err_doc, "err", error->message);
- BSON_APPEND_INT32 (err_doc, "code", MONGOC_ERROR_BSON_INVALID);
- }
-}
-
-
-static void
-_mongoc_write_command_delete_legacy (
- mongoc_write_command_t *command,
- mongoc_client_t *client,
- mongoc_server_stream_t *server_stream,
- const char *database,
- const char *collection,
- const mongoc_write_concern_t *write_concern,
- uint32_t offset,
- mongoc_write_result_t *result,
- bson_error_t *error)
-{
- int64_t started;
- int32_t max_bson_obj_size;
- const uint8_t *data;
- mongoc_rpc_t rpc;
- uint32_t request_id;
- bson_iter_t iter;
- bson_iter_t q_iter;
- uint32_t len;
- int64_t limit = 0;
- bson_t *gle = NULL;
- char ns[MONGOC_NAMESPACE_MAX + 1];
- bool r;
-
- ENTRY;
-
- BSON_ASSERT (command);
- BSON_ASSERT (client);
- BSON_ASSERT (database);
- BSON_ASSERT (server_stream);
- BSON_ASSERT (collection);
-
- started = bson_get_monotonic_time ();
-
- max_bson_obj_size = mongoc_server_stream_max_bson_obj_size (server_stream);
-
- r = bson_iter_init (&iter, command->documents);
- BSON_ASSERT (r);
- if (!command->n_documents || !bson_iter_next (&iter)) {
- bson_set_error (error,
- MONGOC_ERROR_COLLECTION,
- MONGOC_ERROR_COLLECTION_DELETE_FAILED,
- "Cannot do an empty delete.");
- result->failed = true;
- EXIT;
- }
-
- bson_snprintf (ns, sizeof ns, "%s.%s", database, collection);
-
- do {
- /* the document is like { "q": { <selector> }, limit: <0 or 1> } */
- r = (bson_iter_recurse (&iter, &q_iter) &&
- bson_iter_find (&q_iter, "q") && BSON_ITER_HOLDS_DOCUMENT (&q_iter));
-
- BSON_ASSERT (r);
- bson_iter_document (&q_iter, &len, &data);
- BSON_ASSERT (data);
- BSON_ASSERT (len >= 5);
- if (len > max_bson_obj_size) {
- too_large_error (error, 0, len, max_bson_obj_size, NULL);
- result->failed = true;
- EXIT;
- }
-
- request_id = ++client->cluster.request_id;
-
- rpc.header.msg_len = 0;
- rpc.header.request_id = request_id;
- rpc.header.response_to = 0;
- rpc.header.opcode = MONGOC_OPCODE_DELETE;
- rpc.delete_.zero = 0;
- rpc.delete_.collection = ns;
-
- if (bson_iter_find (&q_iter, "limit") &&
- (BSON_ITER_HOLDS_INT (&q_iter))) {
- limit = bson_iter_as_int64 (&q_iter);
- }
-
- rpc.delete_.flags =
- limit ? MONGOC_DELETE_SINGLE_REMOVE : MONGOC_DELETE_NONE;
- rpc.delete_.selector = data;
-
- _mongoc_monitor_legacy_write (client,
- command,
- database,
- collection,
- write_concern,
- server_stream,
- request_id);
-
- if (!mongoc_cluster_sendv_to_server (
- &client->cluster, &rpc, server_stream, write_concern, error)) {
- result->failed = true;
- EXIT;
- }
-
- if (mongoc_write_concern_is_acknowledged (write_concern)) {
- if (!_mongoc_client_recv_gle (client, server_stream, &gle, error)) {
- result->failed = true;
- EXIT;
- }
-
- _mongoc_write_result_merge_legacy (
- result,
- command,
- gle,
- client->error_api_version,
- MONGOC_ERROR_COLLECTION_DELETE_FAILED,
- offset);
-
- offset++;
- }
-
- _mongoc_monitor_legacy_write_succeeded (client,
- bson_get_monotonic_time () -
- started,
- command,
- gle,
- server_stream,
- request_id);
-
- if (gle) {
- bson_destroy (gle);
- gle = NULL;
- }
-
- started = bson_get_monotonic_time ();
- } while (bson_iter_next (&iter));
-
- EXIT;
-}
-
-
-static void
-_mongoc_write_command_insert_legacy (
- mongoc_write_command_t *command,
- mongoc_client_t *client,
- mongoc_server_stream_t *server_stream,
- const char *database,
- const char *collection,
- const mongoc_write_concern_t *write_concern,
- uint32_t offset,
- mongoc_write_result_t *result,
- bson_error_t *error)
-{
- int64_t started;
- uint32_t current_offset;
- mongoc_iovec_t *iov;
- const uint8_t *data;
- mongoc_rpc_t rpc;
- bson_iter_t iter;
- uint32_t len;
- bson_t *gle = NULL;
- uint32_t size = 0;
- bool has_more;
- char ns[MONGOC_NAMESPACE_MAX + 1];
- bool r;
- uint32_t n_docs_in_batch;
- uint32_t request_id = 0;
- uint32_t idx = 0;
- int32_t max_msg_size;
- int32_t max_bson_obj_size;
- bool singly;
-
- ENTRY;
-
- BSON_ASSERT (command);
- BSON_ASSERT (client);
- BSON_ASSERT (database);
- BSON_ASSERT (server_stream);
- BSON_ASSERT (collection);
- BSON_ASSERT (command->type == MONGOC_WRITE_COMMAND_INSERT);
-
- started = bson_get_monotonic_time ();
- current_offset = offset;
-
- max_bson_obj_size = mongoc_server_stream_max_bson_obj_size (server_stream);
- max_msg_size = mongoc_server_stream_max_msg_size (server_stream);
-
- singly = !command->u.insert.allow_bulk_op_insert;
-
- r = bson_iter_init (&iter, command->documents);
- BSON_ASSERT (r);
-
- if (!command->n_documents || !bson_iter_next (&iter)) {
- bson_set_error (error,
- MONGOC_ERROR_COLLECTION,
- MONGOC_ERROR_COLLECTION_INSERT_FAILED,
- "Cannot do an empty insert.");
- result->failed = true;
- EXIT;
- }
-
- bson_snprintf (ns, sizeof ns, "%s.%s", database, collection);
-
- iov = (mongoc_iovec_t *) bson_malloc ((sizeof *iov) * command->n_documents);
-
-again:
- has_more = false;
- n_docs_in_batch = 0;
- size = (uint32_t) (sizeof (mongoc_rpc_header_t) + 4 + strlen (database) + 1 +
- strlen (collection) + 1);
-
- do {
- BSON_ASSERT (BSON_ITER_HOLDS_DOCUMENT (&iter));
- BSON_ASSERT (n_docs_in_batch <= idx);
- BSON_ASSERT (idx < command->n_documents);
-
- bson_iter_document (&iter, &len, &data);
-
- BSON_ASSERT (data);
- BSON_ASSERT (len >= 5);
-
- if (len > max_bson_obj_size) {
- /* document is too large */
- bson_t write_err_doc = BSON_INITIALIZER;
-
- too_large_error (error, idx, len, max_bson_obj_size, &write_err_doc);
-
- _mongoc_write_result_merge_legacy (
- result,
- command,
- &write_err_doc,
- client->error_api_version,
- MONGOC_ERROR_COLLECTION_INSERT_FAILED,
- offset + idx);
-
- bson_destroy (&write_err_doc);
-
- if (command->flags.ordered) {
- /* send the batch so far (if any) and return the error */
- break;
- }
- } else if ((n_docs_in_batch == 1 && singly) ||
- size > (max_msg_size - len)) {
- /* batch is full, send it and then start the next batch */
- has_more = true;
- break;
- } else {
- /* add document to batch and continue building the batch */
- iov[n_docs_in_batch].iov_base = (void *) data;
- iov[n_docs_in_batch].iov_len = len;
- size += len;
- n_docs_in_batch++;
- }
-
- idx++;
- } while (bson_iter_next (&iter));
-
- if (n_docs_in_batch) {
- request_id = ++client->cluster.request_id;
-
- rpc.header.msg_len = 0;
- rpc.header.request_id = request_id;
- rpc.header.response_to = 0;
- rpc.header.opcode = MONGOC_OPCODE_INSERT;
- rpc.insert.flags =
- ((command->flags.ordered) ? MONGOC_INSERT_NONE
- : MONGOC_INSERT_CONTINUE_ON_ERROR);
- rpc.insert.collection = ns;
- rpc.insert.documents = iov;
- rpc.insert.n_documents = n_docs_in_batch;
-
- _mongoc_monitor_legacy_write (client,
- command,
- database,
- collection,
- write_concern,
- server_stream,
- request_id);
-
- if (!mongoc_cluster_sendv_to_server (
- &client->cluster, &rpc, server_stream, write_concern, error)) {
- result->failed = true;
- GOTO (cleanup);
- }
-
- if (mongoc_write_concern_is_acknowledged (write_concern)) {
- bool err = false;
- bson_iter_t citer;
-
- if (!_mongoc_client_recv_gle (client, server_stream, &gle, error)) {
- result->failed = true;
- GOTO (cleanup);
- }
-
- err = (bson_iter_init_find (&citer, gle, "err") &&
- bson_iter_as_bool (&citer));
-
- /*
- * Overwrite the "n" field since it will be zero. Otherwise, our
- * merge_legacy code will not know how many we tried in this batch.
- */
- if (!err && bson_iter_init_find (&citer, gle, "n") &&
- BSON_ITER_HOLDS_INT32 (&citer) && !bson_iter_int32 (&citer)) {
- bson_iter_overwrite_int32 (&citer, n_docs_in_batch);
- }
- }
-
- _mongoc_monitor_legacy_write_succeeded (client,
- bson_get_monotonic_time () -
- started,
- command,
- gle,
- server_stream,
- request_id);
-
- started = bson_get_monotonic_time ();
- }
-
-cleanup:
-
- if (gle) {
- _mongoc_write_result_merge_legacy (result,
- command,
- gle,
- client->error_api_version,
- MONGOC_ERROR_COLLECTION_INSERT_FAILED,
- current_offset);
-
- current_offset = offset + idx;
- bson_destroy (gle);
- gle = NULL;
- }
-
- if (has_more) {
- GOTO (again);
- }
-
- bson_free (iov);
-
- EXIT;
-}
-
-
-void
-_empty_error (mongoc_write_command_t *command, bson_error_t *error)
-{
- static const uint32_t codes[] = {MONGOC_ERROR_COLLECTION_DELETE_FAILED,
- MONGOC_ERROR_COLLECTION_INSERT_FAILED,
- MONGOC_ERROR_COLLECTION_UPDATE_FAILED};
-
- bson_set_error (error,
- MONGOC_ERROR_COLLECTION,
- codes[command->type],
- "Cannot do an empty %s",
- gCommandNames[command->type]);
-}
-
-
-bool
-_mongoc_write_command_will_overflow (uint32_t len_so_far,
- uint32_t document_len,
- uint32_t n_documents_written,
- int32_t max_bson_size,
- int32_t max_write_batch_size)
-{
- /* max BSON object size + 16k bytes.
- * server guarantees there is enough room: SERVER-10643
- */
- int32_t max_cmd_size = max_bson_size + 16384;
-
- BSON_ASSERT (max_bson_size);
-
- if (len_so_far + document_len > max_cmd_size) {
- return true;
- } else if (max_write_batch_size > 0 &&
- n_documents_written >= max_write_batch_size) {
- return true;
- }
-
- return false;
-}
-
-
-static void
-_mongoc_write_command_update_legacy (
- mongoc_write_command_t *command,
- mongoc_client_t *client,
- mongoc_server_stream_t *server_stream,
- const char *database,
- const char *collection,
- const mongoc_write_concern_t *write_concern,
- uint32_t offset,
- mongoc_write_result_t *result,
- bson_error_t *error)
-{
- int64_t started;
- int32_t max_bson_obj_size;
- mongoc_rpc_t rpc;
- uint32_t request_id = 0;
- bson_iter_t iter, subiter, subsubiter;
- bson_t doc;
- bool has_update, has_selector, is_upsert;
- bson_t update, selector;
- bson_t *gle = NULL;
- const uint8_t *data = NULL;
- uint32_t len = 0;
- size_t err_offset;
- bool val = false;
- char ns[MONGOC_NAMESPACE_MAX + 1];
- int32_t affected = 0;
- int vflags = (BSON_VALIDATE_UTF8 | BSON_VALIDATE_UTF8_ALLOW_NULL |
- BSON_VALIDATE_DOLLAR_KEYS | BSON_VALIDATE_DOT_KEYS);
-
- ENTRY;
-
- BSON_ASSERT (command);
- BSON_ASSERT (client);
- BSON_ASSERT (database);
- BSON_ASSERT (server_stream);
- BSON_ASSERT (collection);
-
- started = bson_get_monotonic_time ();
-
- max_bson_obj_size = mongoc_server_stream_max_bson_obj_size (server_stream);
-
- bson_iter_init (&iter, command->documents);
- while (bson_iter_next (&iter)) {
- if (bson_iter_recurse (&iter, &subiter) &&
- bson_iter_find (&subiter, "u") &&
- BSON_ITER_HOLDS_DOCUMENT (&subiter)) {
- bson_iter_document (&subiter, &len, &data);
- bson_init_static (&doc, data, len);
-
- if (bson_iter_init (&subsubiter, &doc) &&
- bson_iter_next (&subsubiter) &&
- (bson_iter_key (&subsubiter)[0] != '$') &&
- !bson_validate (
- &doc, (bson_validate_flags_t) vflags, &err_offset)) {
- result->failed = true;
- bson_set_error (error,
- MONGOC_ERROR_BSON,
- MONGOC_ERROR_BSON_INVALID,
- "update document is corrupt or contains "
- "invalid keys including $ or .");
- EXIT;
- }
- } else {
- result->failed = true;
- bson_set_error (error,
- MONGOC_ERROR_BSON,
- MONGOC_ERROR_BSON_INVALID,
- "updates is malformed.");
- EXIT;
- }
- }
-
- bson_snprintf (ns, sizeof ns, "%s.%s", database, collection);
-
- bson_iter_init (&iter, command->documents);
- while (bson_iter_next (&iter)) {
- request_id = ++client->cluster.request_id;
-
- rpc.header.msg_len = 0;
- rpc.header.request_id = request_id;
- rpc.header.response_to = 0;
- rpc.header.opcode = MONGOC_OPCODE_UPDATE;
- rpc.update.zero = 0;
- rpc.update.collection = ns;
- rpc.update.flags = MONGOC_UPDATE_NONE;
-
- has_update = false;
- has_selector = false;
- is_upsert = false;
-
- bson_iter_recurse (&iter, &subiter);
- while (bson_iter_next (&subiter)) {
- if (strcmp (bson_iter_key (&subiter), "u") == 0) {
- bson_iter_document (&subiter, &len, &data);
- if (len > max_bson_obj_size) {
- too_large_error (error, 0, len, max_bson_obj_size, NULL);
- result->failed = true;
- EXIT;
- }
-
- rpc.update.update = data;
- bson_init_static (&update, data, len);
- has_update = true;
- } else if (strcmp (bson_iter_key (&subiter), "q") == 0) {
- bson_iter_document (&subiter, &len, &data);
- if (len > max_bson_obj_size) {
- too_large_error (error, 0, len, max_bson_obj_size, NULL);
- result->failed = true;
- EXIT;
- }
-
- rpc.update.selector = data;
- bson_init_static (&selector, data, len);
- has_selector = true;
- } else if (strcmp (bson_iter_key (&subiter), "multi") == 0) {
- val = bson_iter_bool (&subiter);
- if (val) {
- rpc.update.flags = (mongoc_update_flags_t) (
- rpc.update.flags | MONGOC_UPDATE_MULTI_UPDATE);
- }
- } else if (strcmp (bson_iter_key (&subiter), "upsert") == 0) {
- val = bson_iter_bool (&subiter);
- if (val) {
- rpc.update.flags = (mongoc_update_flags_t) (
- rpc.update.flags | MONGOC_UPDATE_UPSERT);
- }
- is_upsert = true;
- }
- }
-
- _mongoc_monitor_legacy_write (client,
- command,
- database,
- collection,
- write_concern,
- server_stream,
- request_id);
-
- if (!mongoc_cluster_sendv_to_server (
- &client->cluster, &rpc, server_stream, write_concern, error)) {
- result->failed = true;
- EXIT;
- }
-
- if (mongoc_write_concern_is_acknowledged (write_concern)) {
- if (!_mongoc_client_recv_gle (client, server_stream, &gle, error)) {
- result->failed = true;
- EXIT;
- }
-
- if (bson_iter_init_find (&subiter, gle, "n") &&
- BSON_ITER_HOLDS_INT32 (&subiter)) {
- affected = bson_iter_int32 (&subiter);
- }
-
- /*
- * CDRIVER-372:
- *
- * Versions of MongoDB before 2.6 don't return the _id for an
- * upsert if _id is not an ObjectId.
- */
- if (is_upsert && affected &&
- !bson_iter_init_find (&subiter, gle, "upserted") &&
- bson_iter_init_find (&subiter, gle, "updatedExisting") &&
- BSON_ITER_HOLDS_BOOL (&subiter) && !bson_iter_bool (&subiter)) {
- if (has_update && bson_iter_init_find (&subiter, &update, "_id")) {
- _ignore_value (bson_append_iter (gle, "upserted", 8, &subiter));
- } else if (has_selector &&
- bson_iter_init_find (&subiter, &selector, "_id")) {
- _ignore_value (bson_append_iter (gle, "upserted", 8, &subiter));
- }
- }
-
- _mongoc_write_result_merge_legacy (
- result,
- command,
- gle,
- client->error_api_version,
- MONGOC_ERROR_COLLECTION_UPDATE_FAILED,
- offset);
-
- offset++;
- }
-
- _mongoc_monitor_legacy_write_succeeded (client,
- bson_get_monotonic_time () -
- started,
- command,
- gle,
- server_stream,
- request_id);
-
- if (gle) {
- bson_destroy (gle);
- gle = NULL;
- }
-
- started = bson_get_monotonic_time ();
- }
-}
-
-
-static mongoc_write_op_t gLegacyWriteOps[3] = {
- _mongoc_write_command_delete_legacy,
- _mongoc_write_command_insert_legacy,
- _mongoc_write_command_update_legacy};
-
-
-static void
-_mongoc_write_command (mongoc_write_command_t *command,
- mongoc_client_t *client,
- mongoc_server_stream_t *server_stream,
- const char *database,
- const char *collection,
- const mongoc_write_concern_t *write_concern,
- uint32_t offset,
- mongoc_write_result_t *result,
- bson_error_t *error)
-{
- mongoc_cmd_parts_t parts;
- const uint8_t *data;
- bson_iter_t iter;
- const char *key;
- uint32_t len = 0;
- bson_t tmp;
- bson_t ar;
- bson_t cmd;
- bson_t reply;
- char str[16];
- bool has_more;
- bool ret = false;
- uint32_t i;
- int32_t max_bson_obj_size;
- int32_t max_write_batch_size;
- int32_t min_wire_version;
- uint32_t overhead;
- uint32_t key_len;
-
- ENTRY;
-
- BSON_ASSERT (command);
- BSON_ASSERT (client);
- BSON_ASSERT (database);
- BSON_ASSERT (server_stream);
- BSON_ASSERT (collection);
-
- bson_init (&cmd);
- max_bson_obj_size = mongoc_server_stream_max_bson_obj_size (server_stream);
- max_write_batch_size =
- mongoc_server_stream_max_write_batch_size (server_stream);
-
- /*
- * If we have an unacknowledged write and the server supports the legacy
- * opcodes, then submit the legacy opcode so we don't need to wait for
- * a response from the server.
- */
-
- min_wire_version = server_stream->sd->min_wire_version;
- if ((min_wire_version == 0) &&
- !mongoc_write_concern_is_acknowledged (write_concern)) {
- if (command->flags.bypass_document_validation !=
- MONGOC_BYPASS_DOCUMENT_VALIDATION_DEFAULT) {
- bson_set_error (
- error,
- MONGOC_ERROR_COMMAND,
- MONGOC_ERROR_COMMAND_INVALID_ARG,
- "Cannot set bypassDocumentValidation for unacknowledged writes");
- EXIT;
- }
- if (command->flags.has_collation) {
- bson_set_error (error,
- MONGOC_ERROR_COMMAND,
- MONGOC_ERROR_COMMAND_INVALID_ARG,
- "Cannot set collation for unacknowledged writes");
- EXIT;
- }
- gLegacyWriteOps[command->type](command,
- client,
- server_stream,
- database,
- collection,
- write_concern,
- offset,
- result,
- error);
- EXIT;
- }
- if (command->flags.has_collation &&
- server_stream->sd->max_wire_version < WIRE_VERSION_COLLATION) {
- bson_set_error (error,
- MONGOC_ERROR_COMMAND,
- MONGOC_ERROR_PROTOCOL_BAD_WIRE_VERSION,
- "Collation is not supported by the selected server");
- EXIT;
- }
-
- if (!command->n_documents || !bson_iter_init (&iter, command->documents) ||
- !bson_iter_next (&iter)) {
- _empty_error (command, error);
- result->failed = true;
- EXIT;
- }
-
-again:
- has_more = false;
- i = 0;
-
- _mongoc_write_command_init (&cmd, command, collection, write_concern);
-
- /* 1 byte to specify array type, 1 byte for field name's null terminator */
- overhead = cmd.len + 2 + gCommandFieldLens[command->type];
-
- if (!_mongoc_write_command_will_overflow (overhead,
- command->documents->len,
- command->n_documents,
- max_bson_obj_size,
- max_write_batch_size)) {
- /* copy the whole documents buffer as e.g. "updates": [...] */
- bson_append_array (&cmd,
- gCommandFields[command->type],
- gCommandFieldLens[command->type],
- command->documents);
- i = command->n_documents;
- } else {
- bson_append_array_begin (&cmd,
- gCommandFields[command->type],
- gCommandFieldLens[command->type],
- &ar);
-
- do {
- BSON_ASSERT (BSON_ITER_HOLDS_DOCUMENT (&iter));
- bson_iter_document (&iter, &len, &data);
-
- /* append array element like "0": { ... doc ... } */
- key_len = (uint32_t) bson_uint32_to_string (i, &key, str, sizeof str);
-
- /* 1 byte to specify document type, 1 byte for key's null terminator */
- if (_mongoc_write_command_will_overflow (overhead,
- key_len + len + 2 + ar.len,
- i,
- max_bson_obj_size,
- max_write_batch_size)) {
- has_more = true;
- break;
- }
-
- BSON_ASSERT (bson_init_static (&tmp, data, len));
- BSON_APPEND_DOCUMENT (&ar, key, &tmp);
-
- bson_destroy (&tmp);
-
- i++;
- } while (bson_iter_next (&iter));
-
- bson_append_array_end (&cmd, &ar);
- }
-
- if (!i) {
- too_large_error (error, i, len, max_bson_obj_size, NULL);
- result->failed = true;
- ret = false;
-
- /* the current document is too large, continue to the next */
- if (!bson_iter_next (&iter)) {
- GOTO (cleanup);
- }
- } else {
- mongoc_cmd_parts_init (&parts, database, MONGOC_QUERY_NONE, &cmd);
- parts.is_write_command = true;
- parts.assembled.operation_id = command->operation_id;
- ret = mongoc_cluster_run_command_monitored (
- &client->cluster, &parts, server_stream, &reply, error);
-
- if (!ret) {
- result->failed = true;
- if (bson_empty (&reply)) {
- /* The command not only failed,
- * the roundtrip to the server failed and the node was disconnected
- */
- result->must_stop = true;
- }
- }
-
- _mongoc_write_result_merge (result, command, &reply, offset);
- offset += i;
- bson_destroy (&reply);
- mongoc_cmd_parts_cleanup (&parts);
- }
-
- if (has_more && (ret || !command->flags.ordered) && !result->must_stop) {
- bson_reinit (&cmd);
- GOTO (again);
- }
-
-cleanup:
- bson_destroy (&cmd);
- EXIT;
-}
-
-
-void
-_mongoc_write_command_execute (
- mongoc_write_command_t *command, /* IN */
- mongoc_client_t *client, /* IN */
- mongoc_server_stream_t *server_stream, /* IN */
- const char *database, /* IN */
- const char *collection, /* IN */
- const mongoc_write_concern_t *write_concern, /* IN */
- uint32_t offset, /* IN */
- mongoc_write_result_t *result) /* OUT */
-{
- ENTRY;
-
- BSON_ASSERT (command);
- BSON_ASSERT (client);
- BSON_ASSERT (server_stream);
- BSON_ASSERT (database);
- BSON_ASSERT (collection);
- BSON_ASSERT (result);
-
- if (!write_concern) {
- write_concern = client->write_concern;
- }
-
- if (!mongoc_write_concern_is_valid (write_concern)) {
- bson_set_error (&result->error,
- MONGOC_ERROR_COMMAND,
- MONGOC_ERROR_COMMAND_INVALID_ARG,
- "The write concern is invalid.");
- result->failed = true;
- EXIT;
- }
-
- if (server_stream->sd->max_wire_version >= WIRE_VERSION_WRITE_CMD) {
- _mongoc_write_command (command,
- client,
- server_stream,
- database,
- collection,
- write_concern,
- offset,
- result,
- &result->error);
- } else {
- if (command->flags.bypass_document_validation !=
- MONGOC_BYPASS_DOCUMENT_VALIDATION_DEFAULT) {
- bson_set_error (
- &result->error,
- MONGOC_ERROR_COMMAND,
- MONGOC_ERROR_COMMAND_INVALID_ARG,
- "Cannot set bypassDocumentValidation for unacknowledged writes");
- result->failed = true;
- EXIT;
- }
- if (command->flags.has_collation &&
- server_stream->sd->max_wire_version < WIRE_VERSION_COLLATION) {
- bson_set_error (&result->error,
- MONGOC_ERROR_COMMAND,
- MONGOC_ERROR_COMMAND_INVALID_ARG,
- "Cannot set collation for unacknowledged writes");
- result->failed = true;
- EXIT;
- }
- gLegacyWriteOps[command->type](command,
- client,
- server_stream,
- database,
- collection,
- write_concern,
- offset,
- result,
- &result->error);
- }
-
- EXIT;
-}
-
-
-void
-_mongoc_write_command_destroy (mongoc_write_command_t *command)
-{
- ENTRY;
-
- if (command) {
- bson_destroy (command->documents);
- }
-
- EXIT;
-}
-
-
-void
-_mongoc_write_result_init (mongoc_write_result_t *result) /* IN */
-{
- ENTRY;
-
- BSON_ASSERT (result);
-
- memset (result, 0, sizeof *result);
-
- bson_init (&result->upserted);
- bson_init (&result->writeConcernErrors);
- bson_init (&result->writeErrors);
-
- EXIT;
-}
-
-
-void
-_mongoc_write_result_destroy (mongoc_write_result_t *result)
-{
- ENTRY;
-
- BSON_ASSERT (result);
-
- bson_destroy (&result->upserted);
- bson_destroy (&result->writeConcernErrors);
- bson_destroy (&result->writeErrors);
-
- EXIT;
-}
-
-
-static void
-_mongoc_write_result_append_upsert (mongoc_write_result_t *result,
- int32_t idx,
- const bson_value_t *value)
-{
- bson_t child;
- const char *keyptr = NULL;
- char key[12];
- int len;
-
- BSON_ASSERT (result);
- BSON_ASSERT (value);
-
- len = (int) bson_uint32_to_string (
- result->upsert_append_count, &keyptr, key, sizeof key);
-
- bson_append_document_begin (&result->upserted, keyptr, len, &child);
- BSON_APPEND_INT32 (&child, "index", idx);
- BSON_APPEND_VALUE (&child, "_id", value);
- bson_append_document_end (&result->upserted, &child);
-
- result->upsert_append_count++;
-}
-
-
-static void
-_append_write_concern_err_legacy (mongoc_write_result_t *result,
- const char *err,
- int32_t code)
-{
- char str[16];
- const char *key;
- size_t keylen;
- bson_t write_concern_error;
-
- /* don't set result->failed; record the write concern err and continue */
- keylen = bson_uint32_to_string (
- result->n_writeConcernErrors, &key, str, sizeof str);
-
- BSON_ASSERT (keylen < INT_MAX);
-
- bson_append_document_begin (
- &result->writeConcernErrors, key, (int) keylen, &write_concern_error);
-
- bson_append_int32 (&write_concern_error, "code", 4, code);
- bson_append_utf8 (&write_concern_error, "errmsg", 6, err, -1);
- bson_append_document_end (&result->writeConcernErrors, &write_concern_error);
- result->n_writeConcernErrors++;
-}
-
-
-static void
-_append_write_err_legacy (mongoc_write_result_t *result,
- const char *err,
- mongoc_error_domain_t domain,
- int32_t code,
- uint32_t offset)
-{
- bson_t holder, write_errors, child;
- bson_iter_t iter;
-
- BSON_ASSERT (code > 0);
-
- if (!result->error.domain) {
- bson_set_error (&result->error, domain, (uint32_t) code, "%s", err);
- }
-
- /* stop processing, if result->ordered */
- result->failed = true;
-
- bson_init (&holder);
- bson_append_array_begin (&holder, "0", 1, &write_errors);
- bson_append_document_begin (&write_errors, "0", 1, &child);
-
- /* set error's "index" to 0; fixed up in _mongoc_write_result_merge_arrays */
- bson_append_int32 (&child, "index", 5, 0);
- bson_append_int32 (&child, "code", 4, code);
- bson_append_utf8 (&child, "errmsg", 6, err, -1);
- bson_append_document_end (&write_errors, &child);
- bson_append_array_end (&holder, &write_errors);
- bson_iter_init (&iter, &holder);
- bson_iter_next (&iter);
-
- _mongoc_write_result_merge_arrays (
- offset, result, &result->writeErrors, &iter);
-
- bson_destroy (&holder);
-}
-
-
-void
-_mongoc_write_result_merge_legacy (mongoc_write_result_t *result, /* IN */
- mongoc_write_command_t *command, /* IN */
- const bson_t *reply, /* IN */
- int32_t error_api_version,
- mongoc_error_code_t default_code,
- uint32_t offset)
-{
- const bson_value_t *value;
- bson_iter_t iter;
- bson_iter_t ar;
- bson_iter_t citer;
- const char *err = NULL;
- int32_t code = 0;
- int32_t n = 0;
- int32_t upsert_idx = 0;
- mongoc_error_domain_t domain;
-
- ENTRY;
-
- BSON_ASSERT (result);
- BSON_ASSERT (reply);
-
- domain = error_api_version >= MONGOC_ERROR_API_VERSION_2
- ? MONGOC_ERROR_SERVER
- : MONGOC_ERROR_COLLECTION;
-
- if (bson_iter_init_find (&iter, reply, "n") &&
- BSON_ITER_HOLDS_INT32 (&iter)) {
- n = bson_iter_int32 (&iter);
- }
-
- if (bson_iter_init_find (&iter, reply, "err") &&
- BSON_ITER_HOLDS_UTF8 (&iter)) {
- err = bson_iter_utf8 (&iter, NULL);
- }
-
- if (bson_iter_init_find (&iter, reply, "code") &&
- BSON_ITER_HOLDS_INT32 (&iter)) {
- code = bson_iter_int32 (&iter);
- }
-
- if (_is_duplicate_key_error (code)) {
- code = MONGOC_ERROR_DUPLICATE_KEY;
- }
-
- if (code || err) {
- if (!err) {
- err = "unknown error";
- }
-
- if (bson_iter_init_find (&iter, reply, "wtimeout") &&
- bson_iter_as_bool (&iter)) {
- if (!code) {
- code = (int32_t) MONGOC_ERROR_WRITE_CONCERN_ERROR;
- }
-
- _append_write_concern_err_legacy (result, err, code);
- } else {
- if (!code) {
- code = (int32_t) default_code;
- }
-
- _append_write_err_legacy (result, err, domain, code, offset);
- }
- }
-
- switch (command->type) {
- case MONGOC_WRITE_COMMAND_INSERT:
- if (n) {
- result->nInserted += n;
- }
- break;
- case MONGOC_WRITE_COMMAND_DELETE:
- result->nRemoved += n;
- break;
- case MONGOC_WRITE_COMMAND_UPDATE:
- if (bson_iter_init_find (&iter, reply, "upserted") &&
- !BSON_ITER_HOLDS_ARRAY (&iter)) {
- result->nUpserted += n;
- value = bson_iter_value (&iter);
- _mongoc_write_result_append_upsert (result, offset, value);
- } else if (bson_iter_init_find (&iter, reply, "upserted") &&
- BSON_ITER_HOLDS_ARRAY (&iter)) {
- result->nUpserted += n;
- if (bson_iter_recurse (&iter, &ar)) {
- while (bson_iter_next (&ar)) {
- if (BSON_ITER_HOLDS_DOCUMENT (&ar) &&
- bson_iter_recurse (&ar, &citer) &&
- bson_iter_find (&citer, "_id")) {
- value = bson_iter_value (&citer);
- _mongoc_write_result_append_upsert (
- result, offset + upsert_idx, value);
- upsert_idx++;
- }
- }
- }
- } else if ((n == 1) &&
- bson_iter_init_find (&iter, reply, "updatedExisting") &&
- BSON_ITER_HOLDS_BOOL (&iter) && !bson_iter_bool (&iter)) {
- result->nUpserted += n;
- } else {
- result->nMatched += n;
- }
- break;
- default:
- break;
- }
-
- result->omit_nModified = true;
-
- EXIT;
-}
-
-
-static int32_t
-_mongoc_write_result_merge_arrays (uint32_t offset,
- mongoc_write_result_t *result, /* IN */
- bson_t *dest, /* IN */
- bson_iter_t *iter) /* IN */
-{
- const bson_value_t *value;
- bson_iter_t ar;
- bson_iter_t citer;
- int32_t idx;
- int32_t count = 0;
- int32_t aridx;
- bson_t child;
- const char *keyptr = NULL;
- char key[12];
- int len;
-
- ENTRY;
-
- BSON_ASSERT (result);
- BSON_ASSERT (dest);
- BSON_ASSERT (iter);
- BSON_ASSERT (BSON_ITER_HOLDS_ARRAY (iter));
-
- aridx = bson_count_keys (dest);
-
- if (bson_iter_recurse (iter, &ar)) {
- while (bson_iter_next (&ar)) {
- if (BSON_ITER_HOLDS_DOCUMENT (&ar) &&
- bson_iter_recurse (&ar, &citer)) {
- len =
- (int) bson_uint32_to_string (aridx++, &keyptr, key, sizeof key);
- bson_append_document_begin (dest, keyptr, len, &child);
- while (bson_iter_next (&citer)) {
- if (BSON_ITER_IS_KEY (&citer, "index")) {
- idx = bson_iter_int32 (&citer) + offset;
- BSON_APPEND_INT32 (&child, "index", idx);
- } else {
- value = bson_iter_value (&citer);
- BSON_APPEND_VALUE (&child, bson_iter_key (&citer), value);
- }
- }
- bson_append_document_end (dest, &child);
- count++;
- }
- }
- }
-
- RETURN (count);
-}
-
-
-void
-_mongoc_write_result_merge (mongoc_write_result_t *result, /* IN */
- mongoc_write_command_t *command, /* IN */
- const bson_t *reply, /* IN */
- uint32_t offset)
-{
- int32_t server_index = 0;
- const bson_value_t *value;
- bson_iter_t iter;
- bson_iter_t citer;
- bson_iter_t ar;
- int32_t n_upserted = 0;
- int32_t affected = 0;
-
- ENTRY;
-
- BSON_ASSERT (result);
- BSON_ASSERT (reply);
-
- if (bson_iter_init_find (&iter, reply, "n") &&
- BSON_ITER_HOLDS_INT32 (&iter)) {
- affected = bson_iter_int32 (&iter);
- }
-
- if (bson_iter_init_find (&iter, reply, "writeErrors") &&
- BSON_ITER_HOLDS_ARRAY (&iter) && bson_iter_recurse (&iter, &citer) &&
- bson_iter_next (&citer)) {
- result->failed = true;
- }
-
- switch (command->type) {
- case MONGOC_WRITE_COMMAND_INSERT:
- result->nInserted += affected;
- break;
- case MONGOC_WRITE_COMMAND_DELETE:
- result->nRemoved += affected;
- break;
- case MONGOC_WRITE_COMMAND_UPDATE:
-
- /* server returns each upserted _id with its index into this batch
- * look for "upserted": [{"index": 4, "_id": ObjectId()}, ...] */
- if (bson_iter_init_find (&iter, reply, "upserted")) {
- if (BSON_ITER_HOLDS_ARRAY (&iter) &&
- (bson_iter_recurse (&iter, &ar))) {
- while (bson_iter_next (&ar)) {
- if (BSON_ITER_HOLDS_DOCUMENT (&ar) &&
- bson_iter_recurse (&ar, &citer) &&
- bson_iter_find (&citer, "index") &&
- BSON_ITER_HOLDS_INT32 (&citer)) {
- server_index = bson_iter_int32 (&citer);
-
- if (bson_iter_recurse (&ar, &citer) &&
- bson_iter_find (&citer, "_id")) {
- value = bson_iter_value (&citer);
- _mongoc_write_result_append_upsert (
- result, offset + server_index, value);
- n_upserted++;
- }
- }
- }
- }
- result->nUpserted += n_upserted;
- /*
- * XXX: The following addition to nMatched needs some checking.
- * I'm highly skeptical of it.
- */
- result->nMatched += BSON_MAX (0, (affected - n_upserted));
- } else {
- result->nMatched += affected;
- }
- /*
- * SERVER-13001 - in a mixed sharded cluster a call to update could
- * return nModified (>= 2.6) or not (<= 2.4). If any call does not
- * return nModified we can't report a valid final count so omit the
- * field completely.
- */
- if (bson_iter_init_find (&iter, reply, "nModified") &&
- BSON_ITER_HOLDS_INT32 (&iter)) {
- result->nModified += bson_iter_int32 (&iter);
- } else {
- /*
- * nModified could be BSON_TYPE_NULL, which should also be omitted.
- */
- result->omit_nModified = true;
- }
- break;
- default:
- BSON_ASSERT (false);
- break;
- }
-
- if (bson_iter_init_find (&iter, reply, "writeErrors") &&
- BSON_ITER_HOLDS_ARRAY (&iter)) {
- _mongoc_write_result_merge_arrays (
- offset, result, &result->writeErrors, &iter);
- }
-
- if (bson_iter_init_find (&iter, reply, "writeConcernError") &&
- BSON_ITER_HOLDS_DOCUMENT (&iter)) {
- uint32_t len;
- const uint8_t *data;
- bson_t write_concern_error;
- char str[16];
- const char *key;
-
- /* writeConcernError is a subdocument in the server response
- * append it to the result->writeConcernErrors array */
- bson_iter_document (&iter, &len, &data);
- bson_init_static (&write_concern_error, data, len);
-
- bson_uint32_to_string (
- result->n_writeConcernErrors, &key, str, sizeof str);
-
- bson_append_document (
- &result->writeConcernErrors, key, -1, &write_concern_error);
-
- result->n_writeConcernErrors++;
- }
-
- EXIT;
-}
-
-
-/*
- * If error is not set, set code from first document in array like
- * [{"code": 64, "errmsg": "duplicate"}, ...]. Format the error message
- * from all errors in array.
-*/
-static void
-_set_error_from_response (bson_t *bson_array,
- mongoc_error_domain_t domain,
- const char *error_type,
- bson_error_t *error /* OUT */)
-{
- bson_iter_t array_iter;
- bson_iter_t doc_iter;
- bson_string_t *compound_err;
- const char *errmsg = NULL;
- int32_t code = 0;
- uint32_t n_keys, i;
-
- compound_err = bson_string_new (NULL);
- n_keys = bson_count_keys (bson_array);
- if (n_keys > 1) {
- bson_string_append_printf (
- compound_err, "Multiple %s errors: ", error_type);
- }
-
- if (!bson_empty0 (bson_array) && bson_iter_init (&array_iter, bson_array)) {
- /* get first code and all error messages */
- i = 0;
-
- while (bson_iter_next (&array_iter)) {
- if (BSON_ITER_HOLDS_DOCUMENT (&array_iter) &&
- bson_iter_recurse (&array_iter, &doc_iter)) {
- /* parse doc, which is like {"code": 64, "errmsg": "duplicate"} */
- while (bson_iter_next (&doc_iter)) {
- /* use the first error code we find */
- if (BSON_ITER_IS_KEY (&doc_iter, "code") && code == 0) {
- code = bson_iter_int32 (&doc_iter);
- } else if (BSON_ITER_IS_KEY (&doc_iter, "errmsg")) {
- errmsg = bson_iter_utf8 (&doc_iter, NULL);
-
- /* build message like 'Multiple write errors: "foo", "bar"' */
- if (n_keys > 1) {
- bson_string_append_printf (compound_err, "\"%s\"", errmsg);
- if (i < n_keys - 1) {
- bson_string_append (compound_err, ", ");
- }
- } else {
- /* single error message */
- bson_string_append (compound_err, errmsg);
- }
- }
- }
-
- i++;
- }
- }
-
- if (code && compound_err->len) {
- bson_set_error (
- error, domain, (uint32_t) code, "%s", compound_err->str);
- }
- }
-
- bson_string_free (compound_err, true);
-}
-
-
-bool
-_mongoc_write_result_complete (
- mongoc_write_result_t *result, /* IN */
- int32_t error_api_version, /* IN */
- const mongoc_write_concern_t *wc, /* IN */
- mongoc_error_domain_t err_domain_override, /* IN */
- bson_t *bson, /* OUT */
- bson_error_t *error) /* OUT */
-{
- mongoc_error_domain_t domain;
-
- ENTRY;
-
- BSON_ASSERT (result);
-
- if (error_api_version >= MONGOC_ERROR_API_VERSION_2) {
- domain = MONGOC_ERROR_SERVER;
- } else if (err_domain_override) {
- domain = err_domain_override;
- } else if (result->error.domain) {
- domain = (mongoc_error_domain_t) result->error.domain;
- } else {
- domain = MONGOC_ERROR_COLLECTION;
- }
-
- if (bson && mongoc_write_concern_is_acknowledged (wc)) {
- BSON_APPEND_INT32 (bson, "nInserted", result->nInserted);
- BSON_APPEND_INT32 (bson, "nMatched", result->nMatched);
- if (!result->omit_nModified) {
- BSON_APPEND_INT32 (bson, "nModified", result->nModified);
- }
- BSON_APPEND_INT32 (bson, "nRemoved", result->nRemoved);
- BSON_APPEND_INT32 (bson, "nUpserted", result->nUpserted);
- if (!bson_empty0 (&result->upserted)) {
- BSON_APPEND_ARRAY (bson, "upserted", &result->upserted);
- }
- BSON_APPEND_ARRAY (bson, "writeErrors", &result->writeErrors);
- if (result->n_writeConcernErrors) {
- BSON_APPEND_ARRAY (
- bson, "writeConcernErrors", &result->writeConcernErrors);
- }
- }
-
- /* set bson_error_t from first write error or write concern error */
- _set_error_from_response (
- &result->writeErrors, domain, "write", &result->error);
-
- if (!result->error.code) {
- _set_error_from_response (&result->writeConcernErrors,
- MONGOC_ERROR_WRITE_CONCERN,
- "write concern",
- &result->error);
- }
-
- if (error) {
- memcpy (error, &result->error, sizeof *error);
- }
-
- RETURN (!result->failed && result->error.code == 0);
-}
diff --git a/mongodb-1.3.4/tests/apm/monitoring-commandFailed-001.phpt b/mongodb-1.3.4/tests/apm/monitoring-commandFailed-001.phpt
deleted file mode 100644
index dbc4faa7..00000000
--- a/mongodb-1.3.4/tests/apm/monitoring-commandFailed-001.phpt
+++ /dev/null
@@ -1,56 +0,0 @@
---TEST--
-MongoDB\Driver\Monitoring\CommandFailedEvent
---SKIPIF--
-<?php require __DIR__ . "/../utils/basic-skipif.inc"; CLEANUP(STANDALONE); ?>
---FILE--
-<?php
-require_once __DIR__ . "/../utils/basic.inc";
-
-$m = new MongoDB\Driver\Manager(STANDALONE);
-
-class MySubscriber implements MongoDB\Driver\Monitoring\CommandSubscriber
-{
- public function commandStarted( \MongoDB\Driver\Monitoring\CommandStartedEvent $event )
- {
- echo "started: ", $event->getCommandName(), "\n";
- }
-
- public function commandSucceeded( \MongoDB\Driver\Monitoring\CommandSucceededEvent $event )
- {
- }
-
- public function commandFailed( \MongoDB\Driver\Monitoring\CommandFailedEvent $event )
- {
- echo "failed: ", $event->getCommandName(), "\n";
- echo "- getError() returns an object: ", is_object( $event->getError() ) ? 'yes' : 'no', "\n";
- echo "- getError() returns an MongoDB\Driver\Exception\Exception object: ", $event->getError() instanceof MongoDB\Driver\Exception\Exception ? 'yes' : 'no', "\n";
- echo "- getDurationMicros() returns an integer: ", is_integer( $event->getDurationMicros() ) ? 'yes' : 'no', "\n";
- echo "- getDurationMicros() returns > 0: ", $event->getDurationMicros() > 0 ? 'yes' : 'no', "\n";
- echo "- getCommandName() returns a string: ", is_string( $event->getCommandName() ) ? 'yes' : 'no', "\n";
- echo "- getCommandName() returns '", $event->getCommandName(), "'\n";
- echo "- getServer() returns an object: ", is_object( $event->getServer() ) ? 'yes' : 'no', "\n";
- echo "- getServer() returns a Server object: ", $event->getServer() instanceof MongoDB\Driver\Server ? 'yes' : 'no', "\n";
- echo "- getOperationId() returns a string: ", is_string( $event->getOperationId() ) ? 'yes' : 'no', "\n";
- echo "- getRequestId() returns a string: ", is_string( $event->getRequestId() ) ? 'yes' : 'no', "\n";
- }
-}
-
-$subscriber = new MySubscriber;
-
-MongoDB\Driver\Monitoring\addSubscriber( $subscriber );
-
-CLEANUP( STANDALONE );
-?>
---EXPECT--
-started: drop
-failed: drop
-- getError() returns an object: yes
-- getError() returns an MongoDB\Driver\Exception\Exception object: yes
-- getDurationMicros() returns an integer: yes
-- getDurationMicros() returns > 0: yes
-- getCommandName() returns a string: yes
-- getCommandName() returns 'drop'
-- getServer() returns an object: yes
-- getServer() returns a Server object: yes
-- getOperationId() returns a string: yes
-- getRequestId() returns a string: yes
diff --git a/mongodb-1.3.4/tests/apm/monitoring-commandFailed-002.phpt b/mongodb-1.3.4/tests/apm/monitoring-commandFailed-002.phpt
deleted file mode 100644
index df6cab19..00000000
--- a/mongodb-1.3.4/tests/apm/monitoring-commandFailed-002.phpt
+++ /dev/null
@@ -1,43 +0,0 @@
---TEST--
-MongoDB\Driver\Monitoring\CommandFailedEvent: requestId and operationId match
---SKIPIF--
-<?php require __DIR__ . "/../utils/basic-skipif.inc"; CLEANUP(STANDALONE) ?>
---FILE--
-<?php
-require_once __DIR__ . "/../utils/basic.inc";
-
-$m = new MongoDB\Driver\Manager(STANDALONE);
-
-class MySubscriber implements MongoDB\Driver\Monitoring\CommandSubscriber
-{
- public function commandStarted( \MongoDB\Driver\Monitoring\CommandStartedEvent $event )
- {
- echo "started: ", $event->getCommandName(), "\n";
- $this->startRequestId = $event->getRequestId();
- $this->startOperationId = $event->getOperationId();
- }
-
- public function commandSucceeded( \MongoDB\Driver\Monitoring\CommandSucceededEvent $event )
- {
- }
-
- public function commandFailed( \MongoDB\Driver\Monitoring\CommandFailedEvent $event )
- {
- echo "failed: ", $event->getCommandName(), "\n";
- echo "- requestId matches: ", $this->startRequestId == $event->getRequestId() ? 'yes' : 'no', " \n";
- echo "- operationId matches: ", $this->startOperationId == $event->getOperationId() ? 'yes' : 'no', " \n";
- }
-}
-
-$query = new MongoDB\Driver\Query( [] );
-$subscriber = new MySubscriber;
-
-MongoDB\Driver\Monitoring\addSubscriber( $subscriber );
-
-CLEANUP( STANDALONE );
-?>
---EXPECT--
-started: drop
-failed: drop
-- requestId matches: yes
-- operationId matches: yes
diff --git a/mongodb-1.3.4/tests/apm/monitoring-commandStarted-001.phpt b/mongodb-1.3.4/tests/apm/monitoring-commandStarted-001.phpt
deleted file mode 100644
index d7bba94c..00000000
--- a/mongodb-1.3.4/tests/apm/monitoring-commandStarted-001.phpt
+++ /dev/null
@@ -1,55 +0,0 @@
---TEST--
-MongoDB\Driver\Monitoring\CommandStartedEvent
---SKIPIF--
-<?php require __DIR__ . "/../utils/basic-skipif.inc"; CLEANUP(STANDALONE) ?>
---FILE--
-<?php
-require_once __DIR__ . "/../utils/basic.inc";
-
-$m = new MongoDB\Driver\Manager(STANDALONE);
-
-class MySubscriber implements MongoDB\Driver\Monitoring\CommandSubscriber
-{
- public function commandStarted( \MongoDB\Driver\Monitoring\CommandStartedEvent $event )
- {
- echo "started: ", $event->getCommandName(), "\n";
- echo "- getCommand() returns an object: ", is_object( $event->getCommand() ) ? 'yes' : 'no', "\n";
- echo "- getCommand() returns a stdClass object: ", $event->getCommand() instanceof stdClass ? 'yes' : 'no', "\n";
- echo "- getDatabaseName() returns a string: ", is_string( $event->getDatabaseName() ) ? 'yes' : 'no', "\n";
- echo "- getDatabaseName() returns '", $event->getDatabaseName(), "'\n";
- echo "- getCommandName() returns a string: ", is_string( $event->getCommandName() ) ? 'yes' : 'no', "\n";
- echo "- getCommandName() returns '", $event->getCommandName(), "'\n";
- echo "- getServer() returns an object: ", is_object( $event->getServer() ) ? 'yes' : 'no', "\n";
- echo "- getServer() returns a Server object: ", $event->getServer() instanceof MongoDB\Driver\Server ? 'yes' : 'no', "\n";
- echo "- getOperationId() returns a string: ", is_string( $event->getOperationId() ) ? 'yes' : 'no', "\n";
- echo "- getRequestId() returns a string: ", is_string( $event->getRequestId() ) ? 'yes' : 'no', "\n";
- }
-
- public function commandSucceeded( \MongoDB\Driver\Monitoring\CommandSucceededEvent $event )
- {
- }
-
- public function commandFailed( \MongoDB\Driver\Monitoring\CommandFailedEvent $event )
- {
- }
-}
-
-$query = new MongoDB\Driver\Query( [] );
-$subscriber = new MySubscriber;
-
-MongoDB\Driver\Monitoring\addSubscriber( $subscriber );
-
-$cursor = $m->executeQuery( "demo.test", $query );
-?>
---EXPECT--
-started: find
-- getCommand() returns an object: yes
-- getCommand() returns a stdClass object: yes
-- getDatabaseName() returns a string: yes
-- getDatabaseName() returns 'demo'
-- getCommandName() returns a string: yes
-- getCommandName() returns 'find'
-- getServer() returns an object: yes
-- getServer() returns a Server object: yes
-- getOperationId() returns a string: yes
-- getRequestId() returns a string: yes
diff --git a/mongodb-1.3.4/tests/apm/monitoring-commandSucceeded-001.phpt b/mongodb-1.3.4/tests/apm/monitoring-commandSucceeded-001.phpt
deleted file mode 100644
index b1a43201..00000000
--- a/mongodb-1.3.4/tests/apm/monitoring-commandSucceeded-001.phpt
+++ /dev/null
@@ -1,57 +0,0 @@
---TEST--
-MongoDB\Driver\Monitoring\CommandSucceededEvent
---SKIPIF--
-<?php require __DIR__ . "/../utils/basic-skipif.inc"; CLEANUP(STANDALONE) ?>
---FILE--
-<?php
-require_once __DIR__ . "/../utils/basic.inc";
-
-$m = new MongoDB\Driver\Manager(STANDALONE);
-
-class MySubscriber implements MongoDB\Driver\Monitoring\CommandSubscriber
-{
- public function commandStarted( \MongoDB\Driver\Monitoring\CommandStartedEvent $event )
- {
- echo "started: ", $event->getCommandName(), "\n";
- }
-
- public function commandSucceeded( \MongoDB\Driver\Monitoring\CommandSucceededEvent $event )
- {
- echo "succeeded: ", $event->getCommandName(), "\n";
- echo "- getReply() returns an object: ", is_object( $event->getReply() ) ? 'yes' : 'no', "\n";
- echo "- getReply() returns a stdClass object: ", $event->getReply() instanceof stdClass ? 'yes' : 'no', "\n";
- echo "- getDurationMicros() returns an integer: ", is_integer( $event->getDurationMicros() ) ? 'yes' : 'no', "\n";
- echo "- getDurationMicros() returns > 0: ", $event->getDurationMicros() > 0 ? 'yes' : 'no', "\n";
- echo "- getCommandName() returns a string: ", is_string( $event->getCommandName() ) ? 'yes' : 'no', "\n";
- echo "- getCommandName() returns '", $event->getCommandName(), "'\n";
- echo "- getServer() returns an object: ", is_object( $event->getServer() ) ? 'yes' : 'no', "\n";
- echo "- getServer() returns a Server object: ", $event->getServer() instanceof MongoDB\Driver\Server ? 'yes' : 'no', "\n";
- echo "- getOperationId() returns a string: ", is_string( $event->getOperationId() ) ? 'yes' : 'no', "\n";
- echo "- getRequestId() returns a string: ", is_string( $event->getRequestId() ) ? 'yes' : 'no', "\n";
- }
-
- public function commandFailed( \MongoDB\Driver\Monitoring\CommandFailedEvent $event )
- {
- }
-}
-
-$query = new MongoDB\Driver\Query( [] );
-$subscriber = new MySubscriber;
-
-MongoDB\Driver\Monitoring\addSubscriber( $subscriber );
-
-$cursor = $m->executeQuery( "demo.test", $query );
-?>
---EXPECT--
-started: find
-succeeded: find
-- getReply() returns an object: yes
-- getReply() returns a stdClass object: yes
-- getDurationMicros() returns an integer: yes
-- getDurationMicros() returns > 0: yes
-- getCommandName() returns a string: yes
-- getCommandName() returns 'find'
-- getServer() returns an object: yes
-- getServer() returns a Server object: yes
-- getOperationId() returns a string: yes
-- getRequestId() returns a string: yes
diff --git a/mongodb-1.3.4/tests/apm/monitoring-commandSucceeded-002.phpt b/mongodb-1.3.4/tests/apm/monitoring-commandSucceeded-002.phpt
deleted file mode 100644
index 17c7cf44..00000000
--- a/mongodb-1.3.4/tests/apm/monitoring-commandSucceeded-002.phpt
+++ /dev/null
@@ -1,43 +0,0 @@
---TEST--
-MongoDB\Driver\Monitoring\CommandSucceededEvent: requestId and operationId match
---SKIPIF--
-<?php require __DIR__ . "/../utils/basic-skipif.inc"; CLEANUP(STANDALONE) ?>
---FILE--
-<?php
-require_once __DIR__ . "/../utils/basic.inc";
-
-$m = new MongoDB\Driver\Manager(STANDALONE);
-
-class MySubscriber implements MongoDB\Driver\Monitoring\CommandSubscriber
-{
- public function commandStarted( \MongoDB\Driver\Monitoring\CommandStartedEvent $event )
- {
- echo "started: ", $event->getCommandName(), "\n";
- $this->startRequestId = $event->getRequestId();
- $this->startOperationId = $event->getOperationId();
- }
-
- public function commandSucceeded( \MongoDB\Driver\Monitoring\CommandSucceededEvent $event )
- {
- echo "succeeded: ", $event->getCommandName(), "\n";
- echo "- requestId matches: ", $this->startRequestId == $event->getRequestId() ? 'yes' : 'no', " \n";
- echo "- operationId matches: ", $this->startOperationId == $event->getOperationId() ? 'yes' : 'no', " \n";
- }
-
- public function commandFailed( \MongoDB\Driver\Monitoring\CommandFailedEvent $event )
- {
- }
-}
-
-$query = new MongoDB\Driver\Query( [] );
-$subscriber = new MySubscriber;
-
-MongoDB\Driver\Monitoring\addSubscriber( $subscriber );
-
-$cursor = $m->executeQuery( "demo.test", $query );
-?>
---EXPECT--
-started: find
-succeeded: find
-- requestId matches: yes
-- operationId matches: yes
diff --git a/mongodb-1.3.4/tests/bson/bson-toPHP_error-005.phpt b/mongodb-1.3.4/tests/bson/bson-toPHP_error-005.phpt
deleted file mode 100644
index 2e64a6bf..00000000
--- a/mongodb-1.3.4/tests/bson/bson-toPHP_error-005.phpt
+++ /dev/null
@@ -1,30 +0,0 @@
---TEST--
-MongoDB\BSON\toPHP(): BSON decoding ignores unsupported BSON types
---FILE--
-<?php
-$tests = [
- pack('VCa*xx', 10, 0x06, 'foo'), // undefined
- pack('VCa*xVa*xx12x', 37, 0x0C, 'foo', 11, 'collection'), // DBPointer
- pack('VCa*xVa*xx', 18, 0x0E, 'foo', 4, 'bar'), // symbol
-];
-
-ini_set('mongodb.debug', 'stdout');
-foreach ($tests as $bson) {
- var_dump(MongoDB\BSON\toPHP($bson));
-}
-ini_set('mongodb.debug', 'off');
-
-?>
-===DONE===
-<?php exit(0); ?>
---EXPECTF--
-[%s] PHONGO-BSON: WARNING > Detected unsupported BSON type 0x06 (undefined) for fieldname "foo"
-object(stdClass)#%d (%d) {
-}
-[%s] PHONGO-BSON: WARNING > Detected unsupported BSON type 0x0C (DBPointer) for fieldname "foo"
-object(stdClass)#%d (%d) {
-}
-[%s] PHONGO-BSON: WARNING > Detected unsupported BSON type 0x0E (symbol) for fieldname "foo"
-object(stdClass)#%d (%d) {
-}
-===DONE===
diff --git a/mongodb-1.3.4/tests/bson/bson-toPHP_error-006.phpt b/mongodb-1.3.4/tests/bson/bson-toPHP_error-006.phpt
deleted file mode 100644
index 67c03beb..00000000
--- a/mongodb-1.3.4/tests/bson/bson-toPHP_error-006.phpt
+++ /dev/null
@@ -1,60 +0,0 @@
---TEST--
-MongoDB\BSON\toPHP(): BSON decoding shows multiple warnings
---FILE--
-<?php
-
-$tests = [
- // two undefined fields in root document
- pack('VCa*xCa*xx', 13, 0x06, 'u1', 0x06, 'u2'),
- // undefined field and symbol field in root document
- pack('VCa*xCa*xVa*xx', 21, 0x06, 'u1', 0x0E, 's1', 4, 'foo'),
- // two undefined fields in root (first) and embedded (second) documents
- pack('VCa*xCa*xVCa*xxx', 22, 0x06, 'u1', 0x03, 'e1', 9, 0x06, 'u2'),
- // two undefined fields in embedded (first) and root (second) documents
- pack('VCa*xVCa*xxCa*xx', 22, 0x03, 'e1', 9, 0x06, 'u1', 0x06, 'u2'),
- // two undefined fields in separate embedded documents
- pack('VCa*xVCa*xxCa*xVCa*xxx', 31, 0x03, 'e1', 9, 0x06, 'u1', 0x03, 'e2', 9, 0x06, 'u2'),
-];
-
-ini_set('mongodb.debug', 'stdout');
-foreach ($tests as $bson) {
- var_dump(MongoDB\BSON\toPHP($bson));
-}
-ini_set('mongodb.debug', 'off');
-?>
-===DONE===
-<?php exit(0); ?>
---EXPECTF--
-[%s] PHONGO-BSON: WARNING > Detected unsupported BSON type 0x06 (undefined) for fieldname "u1"
-[%s] PHONGO-BSON: WARNING > Detected unsupported BSON type 0x06 (undefined) for fieldname "u2"
-object(stdClass)#%d (%d) {
-}
-[%s] PHONGO-BSON: WARNING > Detected unsupported BSON type 0x06 (undefined) for fieldname "u1"
-[%s] PHONGO-BSON: WARNING > Detected unsupported BSON type 0x0E (symbol) for fieldname "s1"
-object(stdClass)#%d (%d) {
-}
-[%s] PHONGO-BSON: WARNING > Detected unsupported BSON type 0x06 (undefined) for fieldname "u1"
-[%s] PHONGO-BSON: WARNING > Detected unsupported BSON type 0x06 (undefined) for fieldname "u2"
-object(stdClass)#%d (%d) {
- ["e1"]=>
- object(stdClass)#%d (0) {
- }
-}
-[%s] PHONGO-BSON: WARNING > Detected unsupported BSON type 0x06 (undefined) for fieldname "u1"
-[%s] PHONGO-BSON: WARNING > Detected unsupported BSON type 0x06 (undefined) for fieldname "u2"
-object(stdClass)#%d (%d) {
- ["e1"]=>
- object(stdClass)#%d (0) {
- }
-}
-[%s] PHONGO-BSON: WARNING > Detected unsupported BSON type 0x06 (undefined) for fieldname "u1"
-[%s] PHONGO-BSON: WARNING > Detected unsupported BSON type 0x06 (undefined) for fieldname "u2"
-object(stdClass)#%d (%d) {
- ["e1"]=>
- object(stdClass)#%d (0) {
- }
- ["e2"]=>
- object(stdClass)#%d (0) {
- }
-}
-===DONE===
diff --git a/mongodb-1.3.4/tests/bulk/bulkwrite-insert-002.phpt b/mongodb-1.3.4/tests/bulk/bulkwrite-insert-002.phpt
deleted file mode 100644
index 200dc2aa..00000000
--- a/mongodb-1.3.4/tests/bulk/bulkwrite-insert-002.phpt
+++ /dev/null
@@ -1,47 +0,0 @@
---TEST--
-MongoDB\Driver\BulkWrite::insert() with legacy index (pre-2.6 server)
---SKIPIF--
-<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
-<?php NEEDS('STANDALONE_24'); CLEANUP(STANDALONE_24); ?>
---FILE--
-<?php
-require_once __DIR__ . "/../utils/basic.inc";
-
-$manager = new MongoDB\Driver\Manager(STANDALONE_24);
-
-$legacyIndex = [
- 'key' => ['a.b' => 1],
- 'name' => 'a.b_1',
- 'ns' => NS,
-];
-
-$bulk = new MongoDB\Driver\BulkWrite();
-$bulk->insert($legacyIndex);
-$result = $manager->executeBulkWrite(DATABASE_NAME . '.system.indexes', $bulk);
-printf("Created %d index(es)\n", $result->getInsertedCount());
-
-$cursor = $manager->executeQuery(DATABASE_NAME . '.system.indexes', new MongoDB\Driver\Query(['name' => 'a.b_1']));
-var_dump($cursor->toArray());
-
-?>
-===DONE===
-<?php exit(0); ?>
---EXPECTF--
-Created 1 index(es)
-array(1) {
- [0]=>
- object(stdClass)#%d (%d) {
- ["v"]=>
- int(1)
- ["name"]=>
- string(5) "a.b_1"
- ["key"]=>
- object(stdClass)#%d (%d) {
- ["a.b"]=>
- int(1)
- }
- ["ns"]=>
- string(%d) "%s"
- }
-}
-===DONE===
diff --git a/mongodb-1.3.4/tests/bulk/bulkwrite-insert-003.phpt b/mongodb-1.3.4/tests/bulk/bulkwrite-insert-003.phpt
deleted file mode 100644
index 84ec6f57..00000000
--- a/mongodb-1.3.4/tests/bulk/bulkwrite-insert-003.phpt
+++ /dev/null
@@ -1,56 +0,0 @@
---TEST--
-MongoDB\Driver\BulkWrite::insert() with legacy index false positive (2.6+ server)
---SKIPIF--
-<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
-<?php NEEDS('STANDALONE'); CLEANUP(STANDALONE); ?>
---FILE--
-<?php
-require_once __DIR__ . "/../utils/basic.inc";
-
-/* Note: 2.6+ servers use the createIndexes command instead of inserting to a
- * "system.indexes" collection. The purpose of this test is to ensure that a
- * false positive in our legacy index detection does not interfere with inserts
- * for newer servers. Although the driver will set libmongoc's "legacyIndex"
- * option and allow dots in BSON keys, we are not testing the server's BSON
- * validation here and will only attempt to insert a valid document. */
-$manager = new MongoDB\Driver\Manager(STANDALONE);
-
-$legacyIndex = [
- 'key' => ['a' => 1], // Do not attempt to use dots in BSON keys
- 'name' => 'a_1',
- 'ns' => NS,
-];
-
-$bulk = new MongoDB\Driver\BulkWrite();
-$bulk->insert($legacyIndex);
-$result = $manager->executeBulkWrite(NS, $bulk);
-printf("Inserted %d document(s)\n", $result->getInsertedCount());
-
-$cursor = $manager->executeQuery(NS, new MongoDB\Driver\Query(['name' => 'a_1']));
-var_dump($cursor->toArray());
-
-?>
-===DONE===
-<?php exit(0); ?>
---EXPECTF--
-Inserted 1 document(s)
-array(1) {
- [0]=>
- object(stdClass)#%d (%d) {
- ["_id"]=>
- object(MongoDB\BSON\ObjectId)#%d (%d) {
- ["oid"]=>
- string(24) "%x"
- }
- ["key"]=>
- object(stdClass)#%d (%d) {
- ["a"]=>
- int(1)
- }
- ["name"]=>
- string(3) "a_1"
- ["ns"]=>
- string(32) "phongo.bulk_bulkwrite_insert_003"
- }
-}
-===DONE===
diff --git a/mongodb-1.3.4/tests/bulk/bulkwrite-insert_error-004.phpt b/mongodb-1.3.4/tests/bulk/bulkwrite-insert_error-004.phpt
deleted file mode 100644
index 445afd34..00000000
--- a/mongodb-1.3.4/tests/bulk/bulkwrite-insert_error-004.phpt
+++ /dev/null
@@ -1,34 +0,0 @@
---TEST--
-MongoDB\Driver\BulkWrite::insert() with invalid insert document (legacy index)
---FILE--
-<?php
-
-require_once __DIR__ . '/../utils/tools.php';
-
-$bulk = new MongoDB\Driver\BulkWrite;
-
-echo throws(function() use ($bulk) {
- $bulk->insert(['' => 1, 'key' => ['a.b' => 1], 'name' => 'a.b_1', 'ns' => 'foo']);
-}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n\n";
-
-echo throws(function() use ($bulk) {
- $bulk->insert(['$x' => 1, 'key' => ['a.b' => 1], 'name' => 'a.b_1', 'ns' => 'foo']);
-}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n\n";
-
-echo throws(function() use ($bulk) {
- $bulk->insert(["\xc3\x28" => 1, 'key' => ['a.b' => 1], 'name' => 'a.b_1', 'ns' => 'foo']);
-}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
-
-?>
-===DONE===
-<?php exit(0); ?>
---EXPECT--
-OK: Got MongoDB\Driver\Exception\InvalidArgumentException
-legacy index document contains invalid key: empty key
-
-OK: Got MongoDB\Driver\Exception\InvalidArgumentException
-legacy index document contains invalid key: keys cannot begin with "$": "$x"
-
-OK: Got MongoDB\Driver\Exception\InvalidArgumentException
-legacy index document contains invalid key: corrupt BSON
-===DONE===
diff --git a/mongodb-1.3.4/tests/writeResult/writeresult-getmodifiedcount-003.phpt b/mongodb-1.3.4/tests/writeResult/writeresult-getmodifiedcount-003.phpt
deleted file mode 100644
index daa2da9b..00000000
--- a/mongodb-1.3.4/tests/writeResult/writeresult-getmodifiedcount-003.phpt
+++ /dev/null
@@ -1,28 +0,0 @@
---TEST--
-MongoDB\Driver\WriteResult::getModifiedCount() not available for legacy writes
---SKIPIF--
-<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
-<?php NEEDS('STANDALONE_24'); CLEANUP(STANDALONE_24); ?>
---FILE--
-<?php
-require_once __DIR__ . "/../utils/basic.inc";
-
-$manager = new MongoDB\Driver\Manager(STANDALONE_24);
-
-$bulk = new MongoDB\Driver\BulkWrite;
-$bulk->insert(['x' => 1]);
-$bulk->update(['x' => 1], ['$set' => ['y' => 3]]);
-$bulk->update(['x' => 2], ['$set' => ['y' => 1]], ['upsert' => true]);
-$bulk->update(['x' => 3], ['$set' => ['y' => 2]], ['upsert' => true]);
-$bulk->delete(['x' => 1]);
-
-$result = $manager->executeBulkWrite(NS, $bulk);
-
-var_dump($result->getModifiedCount());
-
-?>
-===DONE===
-<?php exit(0); ?>
---EXPECT--
-NULL
-===DONE===
diff --git a/mongodb-1.3.4/CREDITS b/mongodb-1.4.2/CREDITS
similarity index 100%
rename from mongodb-1.3.4/CREDITS
rename to mongodb-1.4.2/CREDITS
diff --git a/mongodb-1.3.4/LICENSE b/mongodb-1.4.2/LICENSE
similarity index 100%
rename from mongodb-1.3.4/LICENSE
rename to mongodb-1.4.2/LICENSE
diff --git a/mongodb-1.3.4/Makefile.frag b/mongodb-1.4.2/Makefile.frag
similarity index 83%
rename from mongodb-1.3.4/Makefile.frag
rename to mongodb-1.4.2/Makefile.frag
index 88a12441..126e964b 100644
--- a/mongodb-1.3.4/Makefile.frag
+++ b/mongodb-1.4.2/Makefile.frag
@@ -1,80 +1,81 @@
-.PHONY: coverage testclean package package.xml
+.PHONY: coverage test-clean package package.xml
DATE=`date +%Y-%m-%d--%H-%M-%S`
MONGODB_VERSION=$(shell php -n -dextension=modules/mongodb.so -r 'echo MONGODB_VERSION;')
MONGODB_MINOR=$(shell echo $(MONGODB_VERSION) | cut -d. -f1,2)
MONGODB_STABILITY=$(shell php -n -dextension=modules/mongodb.so -r 'echo MONGODB_STABILITY;')
help:
@echo -e "\t$$ make vm"
@echo -e "\t - Launches VMs for running multiple MongoDB variations"
@echo -e "\t$$ make list-servers"
@echo -e "\t - Lists running servers, and their URIs"
@echo -e "\t$$ make test-bootstrap"
@echo -e "\t - Starts up MongoDB through mongo-orchestration"
@echo ""
@echo -e "\t$$ make coveralls"
@echo -e "\t - Creates code coverage report using coveralls"
@echo -e "\t$$ make coverage"
@echo -e "\t - Creates code coverage report using gcov"
@echo ""
@echo -e "\t$$ make distcheck"
@echo -e "\t - Builds the archive, runs the virtual tests"
@echo ""
@echo -e "\t$$ make package.xml"
@echo -e "\t - Creates a package.xml file with empty release notes"
@echo -e "\t$$ make package"
@echo -e "\t - Creates the pecl archive to use for provisioning"
@echo -e "\t$$ make test-virtual"
@echo -e "\t - Provisions some VMs, installs the pecl archive and executes the tests"
mv-coverage:
@if test -e $(top_srcdir)/coverage; then \
echo "Moving previous coverage run to coverage-$(DATE)"; \
mv coverage coverage-$(DATE); \
fi
lcov-coveralls:
lcov --gcov-tool $(top_srcdir)/.llvm-cov.sh --capture --directory . --output-file .coverage.lcov --no-external
lcov-local:
lcov --gcov-tool $(top_srcdir)/.llvm-cov.sh --capture --derive-func-data --directory . --output-file .coverage.lcov --no-external
coverage: mv-coverage lcov-local
genhtml .coverage.lcov --legend --title "mongodb code coverage" --output-directory coverage
coveralls: mv-coverage lcov-coveralls
coveralls --exclude src/libbson --exclude src/libmongoc --exclude src/contrib --exclude lib --exclude tests
vm:
@command -v vagrant >/dev/null 2>&1 || { echo >&2 "Vagrant needs to be installed to run vms"; exit 1; }
@vagrant up ldap mo
list-servers:
php scripts/list-servers.php
test-bootstrap:
+ vagrant reload mo
+ vagrant ssh mo -c 'sudo rm -f /home/vagrant/server.pid'
+ vagrant ssh mo -c 'sudo mongo-orchestration -f mongo-orchestration-config.json -b 192.168.112.10 --enable-majority-read-concern start'
php scripts/start-servers.php
distcheck: package test-virtual
test-virtual: package
sh ./scripts/run-tests-on.sh freebsd
sh ./scripts/run-tests-on.sh precise32
sh ./scripts/run-tests-on.sh precise64
-testclean:
- @for group in generic standalone; do \
- find $(top_srcdir)/tests/$$group -type f -name "*.diff" -o -name "*.exp" -o -name "*.log" -o -name "*.mem" -o -name "*.out" -o -name "*.php" -o -name "*.sh" | xargs rm -f; \
- done;
+test-clean:
+ find $(top_srcdir)/tests -not \( -path $(top_srcdir)/tests/utils -prune \) -type f -name "*.diff" -o -name "*.exp" -o -name "*.log" -o -name "*.mem" -o -name "*.out" -o -name "*.php" -o -name "*.sh" | xargs -r rm
package:
pecl package package.xml
package.xml:
php bin/prep-release.php $(MONGODB_VERSION) $(MONGODB_STABILITY)
diff --git a/mongodb-1.3.4/README.md b/mongodb-1.4.2/README.md
similarity index 100%
rename from mongodb-1.3.4/README.md
rename to mongodb-1.4.2/README.md
diff --git a/mongodb-1.3.4/Vagrantfile b/mongodb-1.4.2/Vagrantfile
similarity index 92%
rename from mongodb-1.3.4/Vagrantfile
rename to mongodb-1.4.2/Vagrantfile
index 2e40f15a..f3c19df3 100644
--- a/mongodb-1.3.4/Vagrantfile
+++ b/mongodb-1.4.2/Vagrantfile
@@ -1,89 +1,86 @@
# -*- mode: ruby -*-
# vi: set ft=ruby et sw=2 :
Vagrant.configure(2) do |config|
config.vm.synced_folder ".", "/phongo"
config.vm.provider "vmware_workstation" do |vmware, override|
vmware.vmx["memsize"] = "8192"
vmware.vmx["numvcpus"] = "2"
end
config.vm.provider "virtualbox" do |virtualbox|
virtualbox.memory = 2048
virtualbox.cpus = 2
end
config.vm.define "mo", primary: true do |mo|
mo.vm.network "private_network", ip: "192.168.112.10"
- mo.vm.box = "http://files.vagrantup.com/precise64.box"
- mo.vm.provider "vmware_workstation" do |vmware, override|
- override.vm.box_url = 'http://files.vagrantup.com/precise64_vmware.box'
- override.vm.provision "shell", path: "scripts/vmware/kernel.sh", privileged: true
- end
+ mo.vm.box = "ubuntu/trusty64"
mo.vm.provision "shell", path: "scripts/ubuntu/essentials.sh", privileged: true
+ mo.vm.provision "file", source: "scripts/ubuntu/get-pip.py", destination: "get-pip.py"
mo.vm.provision "file", source: "scripts/ubuntu/mongo-orchestration-config.json", destination: "mongo-orchestration-config.json"
mo.vm.provision "shell", path: "scripts/ubuntu/mongo-orchestration.sh", privileged: true
mo.vm.provision "shell", path: "scripts/ubuntu/ldap/install.sh", privileged: true
end
config.vm.define "ldap", autostart: false do |ldap|
ldap.vm.network "private_network", ip: "192.168.112.20"
ldap.vm.box = "http://puppet-vagrant-boxes.puppetlabs.com/centos-64-x64-vbox4210-nocm.box"
ldap.vm.provider "vmware_workstation" do |vmware, override|
override.vm.box_url = "https://dl.dropbox.com/u/5721940/vagrant-boxes/vagrant-centos-6.4-x86_64-vmware_fusion.box"
override.vm.provision "shell", path: "scripts/vmware/kernel.sh", privileged: true
end
ldap.vm.provision "shell", path: "scripts/centos/essentials.sh", privileged: true
ldap.vm.provision "shell", path: "scripts/centos/ldap/install.sh", privileged: true
end
config.vm.define "freebsd", autostart: false do |bsd|
bsd.vm.network "private_network", ip: "192.168.112.30"
bsd.vm.box = "geoffgarside/freebsd-10.0"
bsd.vm.provision "shell", path: "scripts/freebsd/essentials.sh", privileged: true
bsd.vm.provision "file", source: "/tmp/PHONGO-SERVERS.json", destination: "/tmp/PHONGO-SERVERS.json"
bsd.vm.provision "file", source: "scripts/configs/.gdbinit", destination: "/home/vagrant/.gdbinit"
bsd.vm.provision "shell", path: "scripts/freebsd/phongo.sh", privileged: true
bsd.vm.synced_folder ".", "/phongo", :nfs => true, id: "vagrant-root"
end
config.vm.define "precise64" do |linux|
linux.vm.network "private_network", ip: "192.168.112.40"
linux.vm.box = "http://files.vagrantup.com/precise64.box"
linux.vm.provider "vmware_workstation" do |vmware, override|
override.vm.box_url = 'http://files.vagrantup.com/precise64_vmware.box'
override.vm.provision "shell", path: "scripts/vmware/kernel.sh", privileged: true
end
linux.vm.provision "shell", path: "scripts/ubuntu/essentials.sh", privileged: true
linux.vm.provision "file", source: "/tmp/PHONGO-SERVERS.json", destination: "/tmp/PHONGO-SERVERS.json"
linux.vm.provision "file", source: "scripts/configs/.gdbinit", destination: "/home/vagrant/.gdbinit"
linux.vm.provision "shell", path: "scripts/ubuntu/phongo.sh", privileged: true
end
config.vm.define "precise32" do |linux|
linux.vm.network "private_network", ip: "192.168.112.50"
linux.vm.box = "bjori/precise32"
linux.vm.provider "vmware_workstation" do |vmware, override|
override.vm.box_url = "bjori/precise32"
override.vm.provision "shell", path: "scripts/vmware/kernel.sh", privileged: true
end
linux.vm.provision "shell", path: "scripts/ubuntu/essentials.sh", privileged: true
linux.vm.provision "file", source: "/tmp/PHONGO-SERVERS.json", destination: "/tmp/PHONGO-SERVERS.json"
linux.vm.provision "file", source: "scripts/configs/.gdbinit", destination: "/home/vagrant/.gdbinit"
linux.vm.provision "shell", path: "scripts/ubuntu/phongo.sh", privileged: true
end
end
diff --git a/mongodb-1.3.4/config.m4 b/mongodb-1.4.2/config.m4
similarity index 74%
rename from mongodb-1.3.4/config.m4
rename to mongodb-1.4.2/config.m4
index 252d2876..14a9ca54 100644
--- a/mongodb-1.3.4/config.m4
+++ b/mongodb-1.4.2/config.m4
@@ -1,504 +1,540 @@
dnl config.m4 for extension mongodb
-PHP_ARG_ENABLE(mongodb, whether to enable mongodb support,
-[ --enable-mongodb Enable mongodb support])
-PHP_ARG_WITH(openssl-dir, OpenSSL dir for mongodb,
-[ --with-openssl-dir[=DIR] openssl install prefix], yes, no)
-PHP_ARG_WITH(system-ciphers, whether to use system default cipher list instead of hardcoded value,
-[ --with-system-ciphers OPENSSL: Use system default cipher list instead of hardcoded value], no, no)
+PHP_ARG_ENABLE([mongodb],
+ [whether to enable MongoDB support],
+ [AC_HELP_STRING([--enable-mongodb],
+ [Enable MongoDB support])])
dnl borrowed from libmongoc configure.ac
dnl AS_VAR_COPY is available in AC 2.64 and on, but we only require 2.60.
dnl If we're on an older version, we define it ourselves:
m4_ifndef([AS_VAR_COPY],
[m4_define([AS_VAR_COPY],
[AS_LITERAL_IF([$1[]$2], [$1=$$2], [eval $1=\$$2])])])
dnl Get "user-set cflags" here, before we've added the flags we use by default
AS_VAR_COPY(MONGOC_USER_SET_CFLAGS, [CFLAGS])
AC_SUBST(MONGOC_USER_SET_CFLAGS)
AS_VAR_COPY(MONGOC_USER_SET_LDFLAGS, [LDFLAGS])
AC_SUBST(MONGOC_USER_SET_LDFLAGS)
AS_VAR_COPY(MONGOC_CC, [CC])
AC_SUBST(MONGOC_CC)
dnl borrowed from PHP acinclude.m4
AC_DEFUN([PHP_BSON_BIGENDIAN],
[AC_CACHE_CHECK([whether byte ordering is bigendian], ac_cv_c_bigendian_php,
[
ac_cv_c_bigendian_php=unknown
AC_TRY_RUN(
[
int main(void)
{
short one = 1;
char *cp = (char *)&one;
if (*cp == 0) {
return(0);
} else {
return(1);
}
}
], [ac_cv_c_bigendian_php=yes], [ac_cv_c_bigendian_php=no], [ac_cv_c_bigendian_php=unknown])
])
if test $ac_cv_c_bigendian_php = yes; then
AC_SUBST(BSON_BYTE_ORDER, 4321)
else
AC_SUBST(BSON_BYTE_ORDER, 1234)
fi
])
dnl Borrowed from sapi/fpm/config.m4
AC_DEFUN([PHP_BSON_CLOCK],
[
have_clock_gettime=no
AC_MSG_CHECKING([for clock_gettime])
AC_TRY_LINK([ #include <time.h> ], [struct timespec ts; clock_gettime(CLOCK_MONOTONIC, &ts);], [
have_clock_gettime=yes
AC_MSG_RESULT([yes])
], [
AC_MSG_RESULT([no])
])
if test "$have_clock_gettime" = "no"; then
AC_MSG_CHECKING([for clock_gettime in -lrt])
SAVED_LIBS="$LIBS"
LIBS="$LIBS -lrt"
AC_TRY_LINK([ #include <time.h> ], [struct timespec ts; clock_gettime(CLOCK_MONOTONIC, &ts);], [
have_clock_gettime=yes
AC_MSG_RESULT([yes])
], [
LIBS="$SAVED_LIBS"
AC_MSG_RESULT([no])
])
fi
if test "$have_clock_gettime" = "yes"; then
AC_SUBST(BSON_HAVE_CLOCK_GETTIME, 1)
fi
])
-AC_MSG_CHECKING(PHP version)
-PHP_FOUND_VERSION=`${PHP_CONFIG} --version`
-PHP_FOUND_VERNUM=`echo "${PHP_FOUND_VERSION}" | $AWK 'BEGIN { FS = "."; } { printf "%d", ([$]1 * 100 + [$]2) * 100 + [$]3;}'`
-AC_MSG_RESULT($PHP_FOUND_VERNUM)
+if test "$PHP_MONGODB" != "no"; then
+ AC_MSG_CHECKING([Check for supported PHP versions])
+ PHP_MONGODB_FOUND_VERSION=`${PHP_CONFIG} --version`
+ PHP_MONGODB_FOUND_VERNUM=`echo "${PHP_MONGODB_FOUND_VERSION}" | $AWK 'BEGIN { FS = "."; } { printf "%d", ([$]1 * 100 + [$]2) * 100 + [$]3;}'`
+ AC_MSG_RESULT($PHP_MONGODB_FOUND_VERSION)
+ if test "$PHP_MONGODB_FOUND_VERNUM" -lt "50500"; then
+ AC_MSG_ERROR([not supported. Need a PHP version >= 5.5.0 (found $PHP_MONGODB_FOUND_VERSION)])
+ fi
-if test "$MONGODB" != "no"; then
- PHP_ARG_ENABLE(developer-flags, whether to enable developer build flags,
- [ --enable-developer-flags Enable developer flags],, no)
+ PHP_ARG_ENABLE([developer-flags],
+ [whether to enable developer build flags],
+ [AC_HELP_STRING([--enable-developer-flags],
+ [MongoDB: Enable developer flags [default=no]])],
+ [no],
+ [no])
if test "$PHP_DEVELOPER_FLAGS" = "yes"; then
dnl Warn about functions which might be candidates for format attributes
PHP_CHECK_GCC_ARG(-Wmissing-format-attribute, _MAINTAINER_CFLAGS="$_MAINTAINER_CFLAGS -Wmissing-format-attribute")
dnl Avoid duplicating values for an enum
PHP_CHECK_GCC_ARG(-Wduplicate-enum, _MAINTAINER_CFLAGS="$_MAINTAINER_CFLAGS -Wduplicate-enum")
dnl Warns on mismatches between #ifndef and #define header guards
PHP_CHECK_GCC_ARG(-Wheader-guard, _MAINTAINER_CFLAGS="$_MAINTAINER_CFLAGS -Wheader-guard")
dnl logical not of a non-boolean expression
PHP_CHECK_GCC_ARG(-Wlogical-not-parentheses, _MAINTAINER_CFLAGS="$_MAINTAINER_CFLAGS -Wlogical-not-parentheses")
dnl Warn about suspicious uses of logical operators in expressions
PHP_CHECK_GCC_ARG(-Wlogical-op, _MAINTAINER_CFLAGS="$_MAINTAINER_CFLAGS -Wlogical-op")
dnl memory error detector.
dnl FIXME: -fsanitize=address,undefined for clang. The PHP_CHECK_GCC_ARG macro isn't happy about that string :(
PHP_CHECK_GCC_ARG(-fsanitize-address, _MAINTAINER_CFLAGS="$_MAINTAINER_CFLAGS -fsanitize-address")
dnl Enable frame debugging
PHP_CHECK_GCC_ARG(-fno-omit-frame-pointer, _MAINTAINER_CFLAGS="$_MAINTAINER_CFLAGS -fno-omit-frame-pointer")
dnl Make sure we don't optimize calls
PHP_CHECK_GCC_ARG(-fno-optimize-sibling-calls, _MAINTAINER_CFLAGS="$_MAINTAINER_CFLAGS -fno-optimize-sibling-calls")
PHP_CHECK_GCC_ARG(-Wlogical-op-parentheses, _MAINTAINER_CFLAGS="$_MAINTAINER_CFLAGS -Wlogical-op-parentheses")
+ PHP_CHECK_GCC_ARG(-Wpointer-bool-conversion, _MAINTAINER_CFLAGS="$_MAINTAINER_CFLAGS -Wpointer-bool-conversion")
PHP_CHECK_GCC_ARG(-Wbool-conversion, _MAINTAINER_CFLAGS="$_MAINTAINER_CFLAGS -Wbool-conversion")
PHP_CHECK_GCC_ARG(-Wloop-analysis, _MAINTAINER_CFLAGS="$_MAINTAINER_CFLAGS -Wloop-analysis")
PHP_CHECK_GCC_ARG(-Wsizeof-array-argument, _MAINTAINER_CFLAGS="$_MAINTAINER_CFLAGS -Wsizeof-array-argument")
PHP_CHECK_GCC_ARG(-Wstring-conversion, _MAINTAINER_CFLAGS="$_MAINTAINER_CFLAGS -Wstring-conversion")
PHP_CHECK_GCC_ARG(-Wno-variadic-macros, _MAINTAINER_CFLAGS="$_MAINTAINER_CFLAGS -Wno-variadic-macros")
PHP_CHECK_GCC_ARG(-Wno-sign-compare, _MAINTAINER_CFLAGS="$_MAINTAINER_CFLAGS -Wno-sign-compare")
PHP_CHECK_GCC_ARG(-fstack-protector, _MAINTAINER_CFLAGS="$_MAINTAINER_CFLAGS -fstack-protector")
PHP_CHECK_GCC_ARG(-fno-exceptions, _MAINTAINER_CFLAGS="$_MAINTAINER_CFLAGS -fno-exceptions")
PHP_CHECK_GCC_ARG(-Wformat-security, _MAINTAINER_CFLAGS="$_MAINTAINER_CFLAGS -Wformat-security")
PHP_CHECK_GCC_ARG(-Wformat-nonliteral, _MAINTAINER_CFLAGS="$_MAINTAINER_CFLAGS -Wformat-nonliteral")
PHP_CHECK_GCC_ARG(-Winit-self, _MAINTAINER_CFLAGS="$_MAINTAINER_CFLAGS -Winit-self")
PHP_CHECK_GCC_ARG(-Wwrite-strings, _MAINTAINER_CFLAGS="$_MAINTAINER_CFLAGS -Wwrite-strings")
PHP_CHECK_GCC_ARG(-Wenum-compare, _MAINTAINER_CFLAGS="$_MAINTAINER_CFLAGS -Wenum-compare")
PHP_CHECK_GCC_ARG(-Wempty-body, _MAINTAINER_CFLAGS="$_MAINTAINER_CFLAGS -Wempty-body")
PHP_CHECK_GCC_ARG(-Wparentheses, _MAINTAINER_CFLAGS="$_MAINTAINER_CFLAGS -Wparentheses")
PHP_CHECK_GCC_ARG(-Wdeclaration-after-statement, _MAINTAINER_CFLAGS="$_MAINTAINER_CFLAGS -Wdeclaration-after-statement")
+ PHP_CHECK_GCC_ARG(-Wmaybe-uninitialized, _MAINTAINER_CFLAGS="$_MAINTAINER_CFLAGS -Wmaybe-uninitialized")
+ PHP_CHECK_GCC_ARG(-Wimplicit-fallthrough, _MAINTAINER_CFLAGS="$_MAINTAINER_CFLAGS -Wimplicit-fallthrough")
PHP_CHECK_GCC_ARG(-Werror, _MAINTAINER_CFLAGS="$_MAINTAINER_CFLAGS -Werror")
PHP_CHECK_GCC_ARG(-Wextra, _MAINTAINER_CFLAGS="$_MAINTAINER_CFLAGS -Wextra")
PHP_CHECK_GCC_ARG(-Wno-unused-parameter, _MAINTAINER_CFLAGS="$_MAINTAINER_CFLAGS -Wno-unused-parameter")
PHP_CHECK_GCC_ARG(-Wno-unused-but-set-variable, _MAINTAINER_CFLAGS="$_MAINTAINER_CFLAGS -Wno-unused-but-set-variable")
PHP_CHECK_GCC_ARG(-Wno-missing-field-initializers, _MAINTAINER_CFLAGS="$_MAINTAINER_CFLAGS -Wno-missing-field-initializers")
MAINTAINER_CFLAGS="$_MAINTAINER_CFLAGS"
STD_CFLAGS="-g -O0 -Wall"
fi
- PHP_ARG_ENABLE(coverage, whether to enable code coverage,
- [ --enable-coverage Enable developer code coverage information],, no)
+ PHP_ARG_ENABLE([coverage],
+ [whether to enable code coverage],
+ [AC_HELP_STRING([--enable-coverage],
+ [MongoDB: Enable developer code coverage information [default=no]])],
+ [no],
+ [no])
if test "$PHP_COVERAGE" = "yes"; then
PHP_CHECK_GCC_ARG(-fprofile-arcs, COVERAGE_CFLAGS="$COVERAGE_CFLAGS -fprofile-arcs")
PHP_CHECK_GCC_ARG(-ftest-coverage, COVERAGE_CFLAGS="$COVERAGE_CFLAGS -ftest-coverage")
EXTRA_LDFLAGS="$COVERAGE_CFLAGS"
fi
PHP_MONGODB_CFLAGS="$STD_CFLAGS $MAINTAINER_CFLAGS $COVERAGE_CFLAGS"
PHP_MONGODB_SOURCES="\
php_phongo.c \
phongo_compat.c \
src/bson.c \
src/bson-encode.c \
src/BSON/Binary.c \
src/BSON/BinaryInterface.c \
+ src/BSON/DBPointer.c \
src/BSON/Decimal128.c \
src/BSON/Decimal128Interface.c \
src/BSON/Javascript.c \
src/BSON/JavascriptInterface.c \
src/BSON/MaxKey.c \
src/BSON/MaxKeyInterface.c \
src/BSON/MinKey.c \
src/BSON/MinKeyInterface.c \
src/BSON/ObjectId.c \
src/BSON/ObjectIdInterface.c \
src/BSON/Persistable.c \
src/BSON/Regex.c \
src/BSON/RegexInterface.c \
src/BSON/Serializable.c \
+ src/BSON/Symbol.c \
src/BSON/Timestamp.c \
src/BSON/TimestampInterface.c \
src/BSON/Type.c \
+ src/BSON/Undefined.c \
src/BSON/Unserializable.c \
src/BSON/UTCDateTime.c \
src/BSON/UTCDateTimeInterface.c \
src/BSON/functions.c \
src/MongoDB/BulkWrite.c \
src/MongoDB/Command.c \
src/MongoDB/Cursor.c \
src/MongoDB/CursorId.c \
src/MongoDB/Manager.c \
src/MongoDB/Query.c \
src/MongoDB/ReadConcern.c \
src/MongoDB/ReadPreference.c \
src/MongoDB/Server.c \
+ src/MongoDB/Session.c \
src/MongoDB/WriteConcern.c \
src/MongoDB/WriteConcernError.c \
src/MongoDB/WriteError.c \
src/MongoDB/WriteResult.c \
src/MongoDB/Exception/AuthenticationException.c \
src/MongoDB/Exception/BulkWriteException.c \
src/MongoDB/Exception/ConnectionException.c \
src/MongoDB/Exception/ConnectionTimeoutException.c \
src/MongoDB/Exception/Exception.c \
src/MongoDB/Exception/ExecutionTimeoutException.c \
src/MongoDB/Exception/InvalidArgumentException.c \
src/MongoDB/Exception/LogicException.c \
src/MongoDB/Exception/RuntimeException.c \
src/MongoDB/Exception/SSLConnectionException.c \
src/MongoDB/Exception/UnexpectedValueException.c \
src/MongoDB/Exception/WriteException.c \
src/MongoDB/Monitoring/CommandFailedEvent.c \
src/MongoDB/Monitoring/CommandStartedEvent.c \
src/MongoDB/Monitoring/CommandSubscriber.c \
src/MongoDB/Monitoring/CommandSucceededEvent.c \
src/MongoDB/Monitoring/Subscriber.c \
src/MongoDB/Monitoring/functions.c \
"
- PHP_ARG_WITH(libbson, whether to use system libbson,
- [ --with-libbson Use system libbson], no, no)
- PHP_ARG_WITH(libmongoc, whether to use system libmongoc,
- [ --with-libmongoc Use system libmongoc], no, no)
+ PHP_ARG_WITH([libbson],
+ [whether to use system libbson],
+ [AS_HELP_STRING([--with-libbson=@<:@yes/no@:>@],
+ [MongoDB: Use system libbson [default=no]])],
+ [no],
+ [no])
+ PHP_ARG_WITH([libmongoc],
+ [whether to use system libmongoc],
+ [AS_HELP_STRING([--with-libmongoc=@<:@yes/no@:>@],
+ [MongoDB: Use system libmongoc [default=no]])],
+ [no],
+ [no])
if test "$PHP_LIBBSON" != "no"; then
- if test "$PHP_LIBMONGOC" == "no"; then
+ if test "$PHP_LIBMONGOC" = "no"; then
AC_MSG_ERROR(Cannot use system libbson and bundled libmongoc)
fi
AC_PATH_PROG(PKG_CONFIG, pkg-config, no)
AC_MSG_CHECKING(for libbson)
if test -x "$PKG_CONFIG" && $PKG_CONFIG --exists libbson-1.0; then
- if $PKG_CONFIG libbson-1.0 --atleast-version 1.8.0; then
+ if $PKG_CONFIG libbson-1.0 --atleast-version 1.9.0; then
LIBBSON_INC=`$PKG_CONFIG libbson-1.0 --cflags`
LIBBSON_LIB=`$PKG_CONFIG libbson-1.0 --libs`
LIBBSON_VER=`$PKG_CONFIG libbson-1.0 --modversion`
AC_MSG_RESULT(version $LIBBSON_VER found)
else
- AC_MSG_ERROR(system libbson must be upgraded to version >= 1.8.0)
+ AC_MSG_ERROR(system libbson must be upgraded to version >= 1.9.0)
fi
else
AC_MSG_ERROR(pkgconfig and libbson must be installed)
fi
PHP_EVAL_INCLINE($LIBBSON_INC)
PHP_EVAL_LIBLINE($LIBBSON_LIB, MONGODB_SHARED_LIBADD)
AC_DEFINE(HAVE_SYSTEM_LIBBSON, 1, [Use system libbson])
else
PHP_MONGODB_BSON_CFLAGS="$STD_CFLAGS -DBSON_COMPILATION"
dnl Generated with: find src/libbson/src/bson -name '*.c' -print0 | cut -sz -d / -f 5- | sort -z | tr '\000' ' '
PHP_MONGODB_BSON_SOURCES="bcon.c bson-atomic.c bson.c bson-clock.c bson-context.c bson-decimal128.c bson-error.c bson-iso8601.c bson-iter.c bson-json.c bson-keys.c bson-md5.c bson-memory.c bson-oid.c bson-reader.c bson-string.c bson-timegm.c bson-utf8.c bson-value.c bson-version-functions.c bson-writer.c"
dnl Generated with: find src/libbson/src/jsonsl -name '*.c' -print0 | cut -sz -d / -f 5- | sort -z | tr '\000' ' '
PHP_MONGODB_JSONSL_SOURCES="jsonsl.c"
PHP_ADD_SOURCES_X(PHP_EXT_DIR(mongodb)[src/libbson/src/bson], $PHP_MONGODB_BSON_SOURCES, $PHP_MONGODB_BSON_CFLAGS, shared_objects_mongodb, yes)
PHP_ADD_SOURCES_X(PHP_EXT_DIR(mongodb)[src/libbson/src/jsonsl], $PHP_MONGODB_JSONSL_SOURCES, $PHP_MONGODB_BSON_CFLAGS, shared_objects_mongodb, yes)
fi
AC_MSG_CHECKING(configuring libmongoc)
AC_MSG_RESULT(...)
if test "$PHP_LIBMONGOC" != "no"; then
- if test "$PHP_LIBBSON" == "no"; then
+ if test "$PHP_LIBBSON" = "no"; then
AC_MSG_ERROR(Cannot use system libmongoc and bundled libbson)
fi
AC_PATH_PROG(PKG_CONFIG, pkg-config, no)
AC_MSG_CHECKING(for libmongoc)
if test -x "$PKG_CONFIG" && $PKG_CONFIG --exists libmongoc-1.0; then
- if $PKG_CONFIG libmongoc-1.0 --atleast-version 1.8.0; then
+ if $PKG_CONFIG libmongoc-1.0 --atleast-version 1.9.0; then
LIBMONGOC_INC=`$PKG_CONFIG libmongoc-1.0 --cflags`
LIBMONGOC_LIB=`$PKG_CONFIG libmongoc-1.0 --libs`
LIBMONGOC_VER=`$PKG_CONFIG libmongoc-1.0 --modversion`
AC_MSG_RESULT(version $LIBMONGOC_VER found)
else
- AC_MSG_ERROR(system libmongoc must be upgraded to version >= 1.8.0)
+ AC_MSG_ERROR(system libmongoc must be upgraded to version >= 1.9.0)
fi
else
- AC_MSG_ERROR(pkgconfig and mongoc must be installed)
+ AC_MSG_ERROR(pkgconfig and libmongoc must be installed)
fi
PHP_EVAL_INCLINE($LIBMONGOC_INC)
PHP_EVAL_LIBLINE($LIBMONGOC_LIB, MONGODB_SHARED_LIBADD)
AC_DEFINE(HAVE_SYSTEM_LIBMONGOC, 1, [Use system libmongoc])
else
PHP_MONGODB_MONGOC_CFLAGS="$STD_CFLAGS -DMONGOC_COMPILATION -DMONGOC_TRACE"
dnl Generated with: find src/libmongoc/src/mongoc -name '*.c' -print0 | cut -sz -d / -f 5- | sort -z | tr '\000' ' '
- PHP_MONGODB_MONGOC_SOURCES="mongoc-apm.c mongoc-array.c mongoc-async.c mongoc-async-cmd.c mongoc-b64.c mongoc-buffer.c mongoc-bulk-operation.c mongoc-client.c mongoc-client-pool.c mongoc-cluster.c mongoc-cluster-cyrus.c mongoc-cluster-gssapi.c mongoc-cluster-sasl.c mongoc-cluster-sspi.c mongoc-cmd.c mongoc-collection.c mongoc-compression.c mongoc-counters.c mongoc-crypto.c mongoc-crypto-cng.c mongoc-crypto-common-crypto.c mongoc-crypto-openssl.c mongoc-cursor-array.c mongoc-cursor.c mongoc-cursor-cursorid.c mongoc-cursor-transform.c mongoc-cyrus.c mongoc-database.c mongoc-find-and-modify.c mongoc-gridfs.c mongoc-gridfs-file.c mongoc-gridfs-file-list.c mongoc-gridfs-file-page.c mongoc-gssapi.c mongoc-handshake.c mongoc-host-list.c mongoc-index.c mongoc-init.c mongoc-libressl.c mongoc-linux-distro-scanner.c mongoc-list.c mongoc-log.c mongoc-matcher.c mongoc-matcher-op.c mongoc-memcmp.c mongoc-openssl.c mongoc-queue.c mongoc-rand-cng.c mongoc-rand-common-crypto.c mongoc-rand-openssl.c mongoc-read-concern.c mongoc-read-prefs.c mongoc-rpc.c mongoc-sasl.c mongoc-scram.c mongoc-secure-channel.c mongoc-secure-transport.c mongoc-server-description.c mongoc-server-stream.c mongoc-set.c mongoc-socket.c mongoc-ssl.c mongoc-sspi.c mongoc-stream-buffered.c mongoc-stream.c mongoc-stream-file.c mongoc-stream-gridfs.c mongoc-stream-socket.c mongoc-stream-tls.c mongoc-stream-tls-libressl.c mongoc-stream-tls-openssl-bio.c mongoc-stream-tls-openssl.c mongoc-stream-tls-secure-channel.c mongoc-stream-tls-secure-transport.c mongoc-topology.c mongoc-topology-description-apm.c mongoc-topology-description.c mongoc-topology-scanner.c mongoc-uri.c mongoc-util.c mongoc-version-functions.c mongoc-write-command.c mongoc-write-concern.c"
+ PHP_MONGODB_MONGOC_SOURCES="mongoc-apm.c mongoc-array.c mongoc-async.c mongoc-async-cmd.c mongoc-b64.c mongoc-buffer.c mongoc-bulk-operation.c mongoc-change-stream.c mongoc-client.c mongoc-client-pool.c mongoc-client-session.c mongoc-cluster.c mongoc-cluster-cyrus.c mongoc-cluster-gssapi.c mongoc-cluster-sasl.c mongoc-cluster-sspi.c mongoc-cmd.c mongoc-collection.c mongoc-compression.c mongoc-counters.c mongoc-crypto.c mongoc-crypto-cng.c mongoc-crypto-common-crypto.c mongoc-crypto-openssl.c mongoc-cursor-array.c mongoc-cursor.c mongoc-cursor-cursorid.c mongoc-cursor-transform.c mongoc-cyrus.c mongoc-database.c mongoc-find-and-modify.c mongoc-gridfs.c mongoc-gridfs-file.c mongoc-gridfs-file-list.c mongoc-gridfs-file-page.c mongoc-gssapi.c mongoc-handshake.c mongoc-host-list.c mongoc-index.c mongoc-init.c mongoc-libressl.c mongoc-linux-distro-scanner.c mongoc-list.c mongoc-log.c mongoc-matcher.c mongoc-matcher-op.c mongoc-memcmp.c mongoc-openssl.c mongoc-queue.c mongoc-rand-cng.c mongoc-rand-common-crypto.c mongoc-rand-openssl.c mongoc-read-concern.c mongoc-read-prefs.c mongoc-rpc.c mongoc-sasl.c mongoc-scram.c mongoc-secure-channel.c mongoc-secure-transport.c mongoc-server-description.c mongoc-server-stream.c mongoc-set.c mongoc-socket.c mongoc-ssl.c mongoc-sspi.c mongoc-stream-buffered.c mongoc-stream.c mongoc-stream-file.c mongoc-stream-gridfs.c mongoc-stream-socket.c mongoc-stream-tls.c mongoc-stream-tls-libressl.c mongoc-stream-tls-openssl-bio.c mongoc-stream-tls-openssl.c mongoc-stream-tls-secure-channel.c mongoc-stream-tls-secure-transport.c mongoc-topology.c mongoc-topology-description-apm.c mongoc-topology-description.c mongoc-topology-scanner.c mongoc-uri.c mongoc-util.c mongoc-version-functions.c mongoc-write-command.c mongoc-write-command-legacy.c mongoc-write-concern.c"
+
+ dnl Generated with: find src/libmongoc/src/zlib-1.2.11 -maxdepth 1 -name '*.c' -print0 | cut -sz -d / -f 5- | sort -z | tr '\000' ' '
+ PHP_MONGODB_ZLIB_SOURCES="adler32.c compress.c crc32.c deflate.c gzclose.c gzlib.c gzread.c gzwrite.c infback.c inffast.c inflate.c inftrees.c trees.c uncompr.c zutil.c"
PHP_ADD_SOURCES_X(PHP_EXT_DIR(mongodb)[src/libmongoc/src/mongoc], $PHP_MONGODB_MONGOC_SOURCES, $PHP_MONGODB_MONGOC_CFLAGS, shared_objects_mongodb, yes)
- AC_SUBST(MONGOC_ENABLE_CRYPTO, 0)
- AC_SUBST(MONGOC_ENABLE_SSL, 0)
- AC_SUBST(MONGOC_ENABLE_CRYPTO_LIBCRYPTO, 0)
- AC_SUBST(MONGOC_ENABLE_SSL_OPENSSL, 0)
- AC_SUBST(MONGOC_HAVE_ASN1_STRING_GET0_DATA, 0)
-
- PHP_SETUP_OPENSSL(MONGODB_SHARED_LIBADD, [
- AC_SUBST(MONGOC_ENABLE_CRYPTO, 1)
- AC_SUBST(MONGOC_ENABLE_SSL, 1)
- AC_SUBST(MONGOC_ENABLE_CRYPTO_LIBCRYPTO, 1)
- AC_SUBST(MONGOC_ENABLE_SSL_OPENSSL, 1)
- ])
+ m4_include(scripts/build/autotools/m4/pkg.m4)
+
+ m4_include(scripts/build/autotools/CheckHost.m4)
+ m4_include(scripts/build/autotools/CheckSSL.m4)
if test "$PHP_SYSTEM_CIPHERS" != "no"; then
AC_SUBST(MONGOC_ENABLE_CRYPTO_SYSTEM_PROFILE, 1)
else
AC_SUBST(MONGOC_ENABLE_CRYPTO_SYSTEM_PROFILE, 0)
fi
- dnl TODO: Support building with Secure Transport on OSX
- AC_SUBST(MONGOC_ENABLE_SSL_SECURE_TRANSPORT, 0)
- AC_SUBST(MONGOC_ENABLE_CRYPTO_COMMON_CRYPTO, 0)
-
- dnl Secure Channel only applies to Windows
- AC_SUBST(MONGOC_ENABLE_SSL_SECURE_CHANNEL, 0)
- AC_SUBST(MONGOC_ENABLE_CRYPTO_CNG, 0)
-
- AC_SUBST(MONGOC_ENABLE_SSL_LIBRESSL, 0)
-
AC_SUBST(MONGOC_NO_AUTOMATIC_GLOBALS, 1)
AC_CHECK_TYPE([socklen_t], [AC_SUBST(MONGOC_HAVE_SOCKLEN, 1)], [AC_SUBST(MONGOC_HAVE_SOCKLEN, 0)], [#include <sys/socket.h>])
- AC_SUBST(MONGOC_ENABLE_COMPRESSION_SNAPPY, 0)
- AC_SUBST(MONGOC_ENABLE_COMPRESSION_ZLIB, 0)
- AC_SUBST(MONGOC_ENABLE_COMPRESSION, 0)
+ with_snappy=auto
+ with_zlib=auto
+ m4_include(src/libmongoc/build/autotools/CheckSnappy.m4)
+ m4_include(src/libmongoc/build/autotools/CheckZlib.m4)
+
+ if test "x$with_zlib" != "xno" -o "x$with_snappy" != "xno"; then
+ AC_SUBST(MONGOC_ENABLE_COMPRESSION, 1)
+ else
+ AC_SUBST(MONGOC_ENABLE_COMPRESSION, 0)
+ fi
+
+ if test "x$with_zlib" = "xbundled"; then
+ PHP_ADD_SOURCES_X(PHP_EXT_DIR(mongodb)[src/libmongoc/src/zlib-1.2.11], $PHP_MONGODB_ZLIB_SOURCES, $PHP_MONGODB_MONGOC_CFLAGS, shared_objects_mongodb, yes)
+ fi
fi
- PHP_ARG_WITH(mongodb-sasl, for Cyrus SASL support,
- [ --with-mongodb-sasl[=DIR] mongodb: Include Cyrus SASL support], auto, no)
+ PHP_ARG_WITH([mongodb-sasl],
+ [for Cyrus SASL support],
+ [AC_HELP_STRING([--with-mongodb-sasl=@<:@auto/no/DIR@:>@],
+ [MongoDB: Cyrus SASL support [default=auto]])],
+ [auto],
+ [no])
AC_SUBST(MONGOC_ENABLE_SASL, 0)
AC_SUBST(MONGOC_HAVE_SASL_CLIENT_DONE, 0)
AC_SUBST(MONGOC_ENABLE_SASL_CYRUS, 0)
AC_SUBST(MONGOC_ENABLE_SASL_SSPI, 0)
AC_SUBST(MONGOC_ENABLE_SASL_GSSAPI, 0)
if test "$PHP_MONGODB_SASL" != "no"; then
AC_MSG_CHECKING(for SASL)
for i in $PHP_MONGODB_SASL /usr /usr/local; do
if test -f $i/include/sasl/sasl.h; then
MONGODB_SASL_DIR=$i
AC_MSG_RESULT(found in $i)
break
fi
done
if test -z "$MONGODB_SASL_DIR"; then
AC_MSG_RESULT(not found)
if test "$PHP_MONGODB_SASL" != "auto"; then
AC_MSG_ERROR([sasl.h not found!])
fi
else
PHP_CHECK_LIBRARY(sasl2, sasl_version,
[
- PHP_ADD_INCLUDE($MONGODB_SASL_DIR)
+ PHP_ADD_INCLUDE($MONGODB_SASL_DIR/include)
PHP_ADD_LIBRARY_WITH_PATH(sasl2, $MONGODB_SASL_DIR/$PHP_LIBDIR, MONGODB_SHARED_LIBADD)
AC_SUBST(MONGOC_ENABLE_SASL, 1)
AC_SUBST(MONGOC_ENABLE_SASL_CYRUS, 1)
], [
if test "$MONGODB_SASL" != "auto"; then
AC_MSG_ERROR([MongoDB SASL check failed. Please check config.log for more information.])
fi
], [
-L$MONGODB_SASL_DIR/$PHP_LIBDIR
])
PHP_CHECK_LIBRARY(sasl2, sasl_client_done,
[
AC_SUBST(MONGOC_HAVE_SASL_CLIENT_DONE, 1)
])
fi
fi
m4_include(src/libmongoc/build/autotools/m4/ax_prototype.m4)
m4_include(src/libmongoc/build/autotools/CheckCompiler.m4)
+
+ dnl We need to convince the libmongoc M4 file to actually run these checks for us
+ enable_srv=auto
+ m4_include(src/libmongoc/build/autotools/FindResSearch.m4)
+
m4_include(src/libmongoc/build/autotools/WeakSymbols.m4)
m4_include(src/libmongoc/build/autotools/m4/ax_pthread.m4)
AX_PTHREAD
AC_CHECK_FUNCS([shm_open], [SHM_LIB=], [AC_CHECK_LIB([rt], [shm_open], [SHM_LIB=-lrt], [SHM_LIB=])])
MONGODB_SHARED_LIBADD="$MONGODB_SHARED_LIBADD $SHM_LIB"
EXTRA_CFLAGS="$PTHREAD_CFLAGS $SASL_CFLAGS"
PHP_SUBST(EXTRA_CFLAGS)
PHP_SUBST(EXTRA_LDFLAGS)
- MONGODB_SHARED_LIBADD="$MONGODB_SHARED_LIBADD $PTHREAD_LIBS $SASL_LIBS"
+ MONGODB_SHARED_LIBADD="$MONGODB_SHARED_LIBADD $PTHREAD_LIBS $SASL_LIBS $SNAPPY_LIBS $ZLIB_LIBS"
PHP_SUBST(MONGODB_SHARED_LIBADD)
PHP_NEW_EXTENSION(mongodb, $PHP_MONGODB_SOURCES, $ext_shared,, $PHP_MONGODB_CFLAGS)
PHP_ADD_EXTENSION_DEP(mongodb, date)
PHP_ADD_EXTENSION_DEP(mongodb, json)
PHP_ADD_EXTENSION_DEP(mongodb, spl)
PHP_ADD_EXTENSION_DEP(mongodb, standard)
PHP_ADD_INCLUDE([$ext_srcdir/src/BSON/])
PHP_ADD_INCLUDE([$ext_srcdir/src/MongoDB/])
PHP_ADD_INCLUDE([$ext_srcdir/src/MongoDB/Exception/])
PHP_ADD_INCLUDE([$ext_srcdir/src/MongoDB/Monitoring/])
PHP_ADD_INCLUDE([$ext_srcdir/src/contrib/])
PHP_ADD_BUILD_DIR([$ext_builddir/src/BSON/])
PHP_ADD_BUILD_DIR([$ext_builddir/src/MongoDB/])
PHP_ADD_BUILD_DIR([$ext_builddir/src/MongoDB/Exception/])
PHP_ADD_BUILD_DIR([$ext_builddir/src/MongoDB/Monitoring/])
PHP_ADD_BUILD_DIR([$ext_builddir/src/contrib/])
- if test "$PHP_LIBMONGOC" == "no"; then
+ if test "$PHP_LIBMONGOC" = "no"; then
PHP_ADD_INCLUDE([$ext_srcdir/src/libmongoc/src/mongoc/])
PHP_ADD_BUILD_DIR([$ext_builddir/src/libmongoc/src/mongoc/])
+ if test "x$with_zlib" = "xbundled"; then
+ PHP_ADD_INCLUDE([$ext_srcdir/src/libmongoc/src/zlib-1.2.11/])
+ PHP_ADD_BUILD_DIR([$ext_builddir/src/libmongoc/src/zlib-1.2.11/])
+ fi
fi
- if test "$PHP_LIBBSON" == "no"; then
+ if test "$PHP_LIBBSON" = "no"; then
m4_include(src/libbson/build/autotools/CheckAtomics.m4)
m4_include(src/libbson/build/autotools/FindDependencies.m4)
m4_include(src/libbson/build/autotools/m4/ac_compile_check_sizeof.m4)
m4_include(src/libbson/build/autotools/m4/ac_create_stdint_h.m4)
AC_CREATE_STDINT_H([$srcdir/src/libbson/src/bson/bson-stdint.h])
PHP_ADD_INCLUDE([$ext_srcdir/src/libbson/src/])
PHP_ADD_INCLUDE([$ext_srcdir/src/libbson/src/jsonsl/])
PHP_ADD_INCLUDE([$ext_srcdir/src/libbson/src/bson/])
PHP_ADD_BUILD_DIR([$ext_builddir/src/libbson/src/])
PHP_ADD_BUILD_DIR([$ext_builddir/src/libbson/src/jsonsl/])
PHP_ADD_BUILD_DIR([$ext_builddir/src/libbson/src/bson/])
fi
PHP_BSON_BIGENDIAN
AC_HEADER_STDBOOL
AC_SUBST(BSON_EXTRA_ALIGN, 0)
AC_SUBST(BSON_HAVE_DECIMAL128, 0)
if test "$ac_cv_header_stdbool_h" = "yes"; then
AC_SUBST(BSON_HAVE_STDBOOL_H, 1)
else
AC_SUBST(BSON_HAVE_STDBOOL_H, 0)
fi
AC_SUBST(BSON_OS, 1)
PHP_BSON_CLOCK
AC_CHECK_FUNC(strnlen,ac_cv_func_strnlen=yes,ac_cv_func_strnlen=no)
if test "$ac_cv_func_strnlen" = "yes"; then
AC_SUBST(BSON_HAVE_STRNLEN, 1)
else
AC_SUBST(BSON_HAVE_STRNLEN, 0)
fi
AC_CHECK_FUNC(snprintf,ac_cv_func_snprintf=yes,ac_cv_func_snprintf=no)
if test "$ac_cv_func_snprintf" = "yes"; then
AC_SUBST(BSON_HAVE_SNPRINTF, 1)
else
AC_SUBST(BSON_HAVE_SNPRINTF, 0)
fi
- if test "$PHP_LIBMONGOC" == "no"; then
+ if test "$PHP_LIBMONGOC" = "no"; then
backup_srcdir=${srcdir}
srcdir=${srcdir}/src/libmongoc/
m4_include(src/libmongoc/build/autotools/Versions.m4)
srcdir=${backup_srcdir}
MONGOC_API_VERSION=1.0
AC_SUBST(MONGOC_MAJOR_VERSION)
AC_SUBST(MONGOC_MINOR_VERSION)
AC_SUBST(MONGOC_MICRO_VERSION)
AC_SUBST(MONGOC_API_VERSION)
AC_SUBST(MONGOC_VERSION)
AC_OUTPUT($srcdir/src/libmongoc/src/mongoc/mongoc-config.h)
AC_OUTPUT($srcdir/src/libmongoc/src/mongoc/mongoc-version.h)
+ if test "x$with_zlib" = "xbundled"; then
+ AC_OUTPUT($srcdir/src/libmongoc/src/zlib-1.2.11/zconf.h)
+ fi
fi
- if test "$PHP_LIBBSON" == "no"; then
+ if test "$PHP_LIBBSON" = "no"; then
backup_srcdir=${srcdir}
srcdir=${srcdir}/src/libbson/
m4_include(src/libbson/build/autotools/Versions.m4)
srcdir=${backup_srcdir}
BSON_API_VERSION=1.0
AC_SUBST(BSON_MAJOR_VERSION)
AC_SUBST(BSON_MINOR_VERSION)
AC_SUBST(BSON_MICRO_VERSION)
AC_SUBST(BSON_API_VERSION)
AC_SUBST(BSON_VERSION)
AC_OUTPUT($srcdir/src/libbson/src/bson/bson-config.h)
AC_OUTPUT($srcdir/src/libbson/src/bson/bson-version.h)
fi
dnl This must come after PHP_NEW_EXTENSION, otherwise the srcdir won't be set
PHP_ADD_MAKEFILE_FRAGMENT
AC_CONFIG_COMMANDS_POST([echo "
mongodb was configured with the following options:
Build configuration:
CFLAGS : $CFLAGS
Extra CFLAGS : $STD_CFLAGS $EXTRA_CFLAGS
Developers flags (slow) : $MAINTAINER_CFLAGS
Code Coverage flags (extra slow) : $COVERAGE_CFLAGS
System mongoc : $PHP_LIBMONGOC
System libbson : $PHP_LIBBSON
LDFLAGS : $LDFLAGS
EXTRA_LDFLAGS : $EXTRA_LDFLAGS
MONGODB_SHARED_LIBADD : $MONGODB_SHARED_LIBADD
Please submit bugreports at:
https://jira.mongodb.org/browse/PHPC
"])
fi
dnl: vim: et sw=2
diff --git a/mongodb-1.3.4/config.w32 b/mongodb-1.4.2/config.w32
similarity index 76%
rename from mongodb-1.3.4/config.w32
rename to mongodb-1.4.2/config.w32
index b6349ec6..155700bb 100644
--- a/mongodb-1.3.4/config.w32
+++ b/mongodb-1.4.2/config.w32
@@ -1,224 +1,239 @@
// vim:ft=javascript
function mongodb_generate_header(inpath, outpath, replacements)
{
STDOUT.WriteLine("Generating " + outpath);
var infile = FSO.OpenTextFile(inpath, 1);
var outdata = infile.ReadAll();
infile.Close();
for (var key in replacements) {
var replacement = replacements[key];
if (typeof replacement === 'string') {
replacement = replacement.replace(/"/g, '\\"');
}
outdata = outdata.replace(new RegExp('@' + key + '@', 'g'), replacement);
}
var outfile = FSO.CreateTextFile(outpath, true);
outfile.Write(outdata);
outfile.Close();
}
function mongodb_parse_version_file(inpath, prefix)
{
var infile = FSO.OpenTextFile(inpath, 1);
var version = infile.ReadLine();
infile.Close();
var xyz_pre = version.split("-");
var xyz = xyz_pre[0].split(".");
var pre = xyz_pre.length > 1 ? xyz_pre[1] : "";
var replacements = {};
replacements[prefix + "VERSION"] = version;
replacements[prefix + "MAJOR_VERSION"] = xyz[0];
replacements[prefix + "MINOR_VERSION"] = xyz[1];
replacements[prefix + "MICRO_VERSION"] = xyz[2];
replacements[prefix + "PRERELEASE_VERSION"] = pre;
return replacements;
}
ARG_ENABLE("mongodb", "MongoDB support", "no");
ARG_WITH("mongodb-sasl", "MongoDB: Build against Cyrus-SASL", "yes");
if (PHP_MONGODB != "no") {
/* Note: ADD_EXTENSION_DEP() cannot be used to declare that we depend on the
* date and standard extensions. Assume that they're always enabled. */
ADD_EXTENSION_DEP("mongodb", "json", false);
ADD_EXTENSION_DEP("mongodb", "spl", false);
/* MongoDB does not actually depend on PHP's OpenSSL extension, but this is in
* place to ensure that later SSL library checks succeed. This can be removed
* once we support building with Secure Channel. */
ADD_EXTENSION_DEP("mongodb", "openssl", false);
var PHP_MONGODB_CFLAGS="\
/D BSON_COMPILATION /D MONGOC_COMPILATION /D MONGOC_TRACE \
/I" + configure_module_dirname + " \
/I" + configure_module_dirname + "/src/BSON \
/I" + configure_module_dirname + "/src/MongoDB \
/I" + configure_module_dirname + "/src/MongoDB/Exception \
/I" + configure_module_dirname + "/src/contrib \
/I" + configure_module_dirname + "/src/libbson/src \
/I" + configure_module_dirname + "/src/libbson/src/bson \
/I" + configure_module_dirname + "/src/libbson/src/yajl \
/I" + configure_module_dirname + "/src/libmongoc/src/mongoc \
";
// Condense whitespace in CFLAGS
PHP_MONGODB_CFLAGS = PHP_MONGODB_CFLAGS.replace(/\s+/g, ' ');
// Generated with: find src/libbson/src/bson -name '*.c' -print0 | cut -sz -d / -f 5- | sort -z | tr '\000' ' '
var PHP_MONGODB_BSON_SOURCES="bcon.c bson-atomic.c bson.c bson-clock.c bson-context.c bson-decimal128.c bson-error.c bson-iso8601.c bson-iter.c bson-json.c bson-keys.c bson-md5.c bson-memory.c bson-oid.c bson-reader.c bson-string.c bson-timegm.c bson-utf8.c bson-value.c bson-version-functions.c bson-writer.c";
// Generated with: find src/libbson/src/jsonsl -name '*.c' -print0 | cut -sz -d / -f 5- | sort -z | tr '\000' ' '
var PHP_MONGODB_JSONSL_SOURCES="jsonsl.c";
// Generated with: find src/libmongoc/src/mongoc -name '*.c' -print0 | cut -sz -d / -f 4- | sort -z | tr '\000' ' '
- var PHP_MONGODB_MONGOC_SOURCES="mongoc-apm.c mongoc-array.c mongoc-async.c mongoc-async-cmd.c mongoc-b64.c mongoc-buffer.c mongoc-bulk-operation.c mongoc-client.c mongoc-client-pool.c mongoc-cluster.c mongoc-cluster-cyrus.c mongoc-cluster-gssapi.c mongoc-cluster-sasl.c mongoc-cluster-sspi.c mongoc-cmd.c mongoc-collection.c mongoc-compression.c mongoc-counters.c mongoc-crypto.c mongoc-crypto-cng.c mongoc-crypto-common-crypto.c mongoc-crypto-openssl.c mongoc-cursor-array.c mongoc-cursor.c mongoc-cursor-cursorid.c mongoc-cursor-transform.c mongoc-cyrus.c mongoc-database.c mongoc-find-and-modify.c mongoc-gridfs.c mongoc-gridfs-file.c mongoc-gridfs-file-list.c mongoc-gridfs-file-page.c mongoc-gssapi.c mongoc-handshake.c mongoc-host-list.c mongoc-index.c mongoc-init.c mongoc-libressl.c mongoc-linux-distro-scanner.c mongoc-list.c mongoc-log.c mongoc-matcher.c mongoc-matcher-op.c mongoc-memcmp.c mongoc-openssl.c mongoc-queue.c mongoc-rand-cng.c mongoc-rand-common-crypto.c mongoc-rand-openssl.c mongoc-read-concern.c mongoc-read-prefs.c mongoc-rpc.c mongoc-sasl.c mongoc-scram.c mongoc-secure-channel.c mongoc-secure-transport.c mongoc-server-description.c mongoc-server-stream.c mongoc-set.c mongoc-socket.c mongoc-ssl.c mongoc-sspi.c mongoc-stream-buffered.c mongoc-stream.c mongoc-stream-file.c mongoc-stream-gridfs.c mongoc-stream-socket.c mongoc-stream-tls.c mongoc-stream-tls-libressl.c mongoc-stream-tls-openssl-bio.c mongoc-stream-tls-openssl.c mongoc-stream-tls-secure-channel.c mongoc-stream-tls-secure-transport.c mongoc-topology.c mongoc-topology-description-apm.c mongoc-topology-description.c mongoc-topology-scanner.c mongoc-uri.c mongoc-util.c mongoc-version-functions.c mongoc-write-command.c mongoc-write-concern.c";
+ var PHP_MONGODB_MONGOC_SOURCES="mongoc-apm.c mongoc-array.c mongoc-async.c mongoc-async-cmd.c mongoc-b64.c mongoc-buffer.c mongoc-bulk-operation.c mongoc-change-stream.c mongoc-client.c mongoc-client-pool.c mongoc-client-session.c mongoc-cluster.c mongoc-cluster-cyrus.c mongoc-cluster-gssapi.c mongoc-cluster-sasl.c mongoc-cluster-sspi.c mongoc-cmd.c mongoc-collection.c mongoc-compression.c mongoc-counters.c mongoc-crypto.c mongoc-crypto-cng.c mongoc-crypto-common-crypto.c mongoc-crypto-openssl.c mongoc-cursor-array.c mongoc-cursor.c mongoc-cursor-cursorid.c mongoc-cursor-transform.c mongoc-cyrus.c mongoc-database.c mongoc-find-and-modify.c mongoc-gridfs.c mongoc-gridfs-file.c mongoc-gridfs-file-list.c mongoc-gridfs-file-page.c mongoc-gssapi.c mongoc-handshake.c mongoc-host-list.c mongoc-index.c mongoc-init.c mongoc-libressl.c mongoc-linux-distro-scanner.c mongoc-list.c mongoc-log.c mongoc-matcher.c mongoc-matcher-op.c mongoc-memcmp.c mongoc-openssl.c mongoc-queue.c mongoc-rand-cng.c mongoc-rand-common-crypto.c mongoc-rand-openssl.c mongoc-read-concern.c mongoc-read-prefs.c mongoc-rpc.c mongoc-sasl.c mongoc-scram.c mongoc-secure-channel.c mongoc-secure-transport.c mongoc-server-description.c mongoc-server-stream.c mongoc-set.c mongoc-socket.c mongoc-ssl.c mongoc-sspi.c mongoc-stream-buffered.c mongoc-stream.c mongoc-stream-file.c mongoc-stream-gridfs.c mongoc-stream-socket.c mongoc-stream-tls.c mongoc-stream-tls-libressl.c mongoc-stream-tls-openssl-bio.c mongoc-stream-tls-openssl.c mongoc-stream-tls-secure-channel.c mongoc-stream-tls-secure-transport.c mongoc-topology.c mongoc-topology-description-apm.c mongoc-topology-description.c mongoc-topology-scanner.c mongoc-uri.c mongoc-util.c mongoc-version-functions.c mongoc-write-command.c mongoc-write-command-legacy.c mongoc-write-concern.c";
EXTENSION("mongodb", "php_phongo.c phongo_compat.c", null, PHP_MONGODB_CFLAGS);
ADD_SOURCES(configure_module_dirname + "/src", "bson.c bson-encode.c", "mongodb");
- ADD_SOURCES(configure_module_dirname + "/src/BSON", "Binary.c BinaryInterface.c Decimal128.c Decimal128Interface.c Javascript.c JavascriptInterface.c MaxKey.c MaxKeyInterface.c MinKey.c MinKeyInterface.c ObjectId.c ObjectIdInterface.c Persistable.c Regex.c RegexInterface.c Serializable.c Timestamp.c TimestampInterface.c Type.c Unserializable.c UTCDateTime.c UTCDateTimeInterface.c functions.c", "mongodb");
- ADD_SOURCES(configure_module_dirname + "/src/MongoDB", "BulkWrite.c Command.c Cursor.c CursorId.c Manager.c Query.c ReadConcern.c ReadPreference.c Server.c WriteConcern.c WriteConcernError.c WriteError.c WriteResult.c", "mongodb");
+ ADD_SOURCES(configure_module_dirname + "/src/BSON", "Binary.c BinaryInterface.c DBPointer.c Decimal128.c Decimal128Interface.c Javascript.c JavascriptInterface.c MaxKey.c MaxKeyInterface.c MinKey.c MinKeyInterface.c ObjectId.c ObjectIdInterface.c Persistable.c Regex.c RegexInterface.c Serializable.c Symbol.c Timestamp.c TimestampInterface.c Type.c Undefined.c Unserializable.c UTCDateTime.c UTCDateTimeInterface.c functions.c", "mongodb");
+ ADD_SOURCES(configure_module_dirname + "/src/MongoDB", "BulkWrite.c Command.c Cursor.c CursorId.c Manager.c Query.c ReadConcern.c ReadPreference.c Server.c Session.c WriteConcern.c WriteConcernError.c WriteError.c WriteResult.c", "mongodb");
ADD_SOURCES(configure_module_dirname + "/src/MongoDB/Exception", "AuthenticationException.c BulkWriteException.c ConnectionException.c ConnectionTimeoutException.c Exception.c ExecutionTimeoutException.c InvalidArgumentException.c LogicException.c RuntimeException.c SSLConnectionException.c UnexpectedValueException.c WriteException.c", "mongodb");
ADD_SOURCES(configure_module_dirname + "/src/MongoDB/Monitoring", "CommandFailedEvent.c CommandStartedEvent.c CommandSubscriber.c CommandSucceededEvent.c Subscriber.c functions.c", "mongodb");
ADD_SOURCES(configure_module_dirname + "/src/libbson/src/bson", PHP_MONGODB_BSON_SOURCES, "mongodb");
ADD_SOURCES(configure_module_dirname + "/src/libbson/src/jsonsl", PHP_MONGODB_JSONSL_SOURCES, "mongodb");
ADD_SOURCES(configure_module_dirname + "/src/libmongoc/src/mongoc", PHP_MONGODB_MONGOC_SOURCES, "mongodb");
var bson_opts = {
BSON_BYTE_ORDER: 1234,
BSON_OS: 2,
BSON_HAVE_STDBOOL_H: 0,
BSON_HAVE_ATOMIC_32_ADD_AND_FETCH: 0,
BSON_HAVE_ATOMIC_64_ADD_AND_FETCH: 0,
BSON_PTHREAD_ONCE_INIT_NEEDS_BRACES: 0,
BSON_HAVE_CLOCK_GETTIME: 0,
BSON_HAVE_STRNLEN: 0,
BSON_HAVE_SNPRINTF: 0,
BSON_HAVE_REALLOCF: 0,
BSON_NEEDS_SET_OUTPUT_FORMAT: 0,
BSON_HAVE_TIMESPEC: 0,
BSON_EXTRA_ALIGN: 0,
BSON_HAVE_SYSCALL_TID: 0,
BSON_HAVE_DECIMAL128: 0,
- BSON_HAVE_GMTIME_R: 0
+ BSON_HAVE_GMTIME_R: 0,
+ BSON_HAVE_RAND_R: 0
};
if (CHECK_FUNC_IN_HEADER("stdio.h", "_set_output_format")) {
bson_opts.BSON_NEEDS_SET_OUTPUT_FORMAT = 1;
}
mongodb_generate_header(
configure_module_dirname + "/src/libbson/src/bson/bson-config.h.in",
configure_module_dirname + "/src/libbson/src/bson/bson-config.h",
bson_opts
);
mongodb_generate_header(
configure_module_dirname + "/src/libbson/src/bson/bson-version.h.in",
configure_module_dirname + "/src/libbson/src/bson/bson-version.h",
mongodb_parse_version_file(configure_module_dirname + "/src/libbson/VERSION_CURRENT", "BSON_")
);
var mongoc_opts = {
// TODO: Support building with Secure Channel on Windows
MONGOC_ENABLE_SSL_SECURE_CHANNEL: 0,
MONGOC_ENABLE_CRYPTO_CNG: 0,
// Secure Transport does not apply to Windows
MONGOC_ENABLE_SSL_SECURE_TRANSPORT: 0,
MONGOC_ENABLE_CRYPTO_COMMON_CRYPTO: 0,
MONGOC_ENABLE_SSL_LIBRESSL: 0,
MONGOC_ENABLE_SSL_OPENSSL: 0,
MONGOC_ENABLE_CRYPTO_LIBCRYPTO: 0,
MONGOC_ENABLE_SSL: 0,
MONGOC_ENABLE_CRYPTO: 0,
MONGOC_ENABLE_CRYPTO_SYSTEM_PROFILE: 0,
MONGOC_ENABLE_COMPRESSION_SNAPPY: 0,
MONGOC_ENABLE_COMPRESSION_ZLIB: 0,
MONGOC_ENABLE_COMPRESSION: 0,
MONGOC_ENABLE_SASL: 0,
MONGOC_ENABLE_SASL_CYRUS: 0,
MONGOC_ENABLE_SASL_GSSAPI: 0,
MONGOC_ENABLE_SASL_SSPI: 0,
MONGOC_HAVE_ASN1_STRING_GET0_DATA: 0,
MONGOC_HAVE_SASL_CLIENT_DONE: 0,
MONGOC_HAVE_SOCKLEN: 1,
MONGOC_HAVE_WEAK_SYMBOLS: 0,
MONGOC_NO_AUTOMATIC_GLOBALS: 1,
MONGOC_SOCKET_ARG2: "struct sockaddr",
MONGOC_SOCKET_ARG3: "socklen_t",
+ MONGOC_HAVE_DNSAPI: 0,
+ MONGOC_HAVE_RES_NSEARCH: 0,
+ MONGOC_HAVE_RES_NDESTROY: 0,
+ MONGOC_HAVE_RES_NCLOSE: 0,
+ MONGOC_HAVE_RES_SEARCH: 0,
MONGOC_CC: "",
MONGOC_USER_SET_CFLAGS: "",
MONGOC_USER_SET_LDFLAGS: ""
};
var mongoc_ssl_path_to_check = PHP_MONGODB;
if (typeof PHP_OPENSSL === 'string') {
mongoc_ssl_path_to_check += ";" + PHP_OPENSSL;
}
var mongoc_ssl_found = false;
/* PHP 7.1.2 introduced SETUP_OPENSSL(), which supports OpenSSL 1.1.x. Earlier
* versions will use the legacy check for OpenSSL 1.0.x and lower. */
if (typeof SETUP_OPENSSL === 'function') {
- mongoc_ssl_found = SETUP_OPENSSL("mongodb", mongoc_ssl_path_to_check) > 0;
+ openssl_type = SETUP_OPENSSL("mongodb", mongoc_ssl_path_to_check);
+ mongoc_ssl_found = openssl_type > 0;
+ if (openssl_type >= 2) {
+ mongoc_opts.MONGOC_HAVE_ASN1_STRING_GET0_DATA = 1;
+ }
} else if (CHECK_LIB("ssleay32.lib", "mongodb", mongoc_ssl_path_to_check) &&
CHECK_LIB("libeay32.lib", "mongodb", mongoc_ssl_path_to_check) &&
CHECK_LIB("crypt32.lib", "mongodb", mongoc_ssl_path_to_check) &&
CHECK_HEADER_ADD_INCLUDE("openssl/ssl.h", "CFLAGS_MONGODB")) {
mongoc_ssl_found = true;
}
if (mongoc_ssl_found) {
mongoc_opts.MONGOC_ENABLE_SSL_OPENSSL = 1;
mongoc_opts.MONGOC_ENABLE_CRYPTO_LIBCRYPTO = 1;
mongoc_opts.MONGOC_ENABLE_SSL = 1;
mongoc_opts.MONGOC_ENABLE_CRYPTO = 1;
} else {
WARNING("mongodb libopenssl support not enabled, libs not found");
}
// TODO: Support building with native GSSAPI (SSPI) on Windows
if (PHP_MONGODB_SASL != "no" &&
CHECK_LIB("libsasl.lib", "mongodb", PHP_MONGODB) &&
CHECK_HEADER_ADD_INCLUDE("sasl/sasl.h", "CFLAGS_MONGODB")) {
mongoc_opts.MONGOC_ENABLE_SASL = 1;
mongoc_opts.MONGOC_ENABLE_SASL_CYRUS = 1;
if (CHECK_FUNC_IN_HEADER("sasl/sasl.h", "sasl_client_done")) {
mongoc_opts.MONGOC_HAVE_SASL_CLIENT_DONE = 1;
}
} else {
WARNING("mongodb libsasl support not enabled, libs not found");
}
+ if (CHECK_LIB("dnsapi.lib", "mongodb", PHP_MONGODB) &&
+ CHECK_HEADER_ADD_INCLUDE("windns.h", "CFLAGS_MONGODB")) {
+ mongoc_opts.MONGOC_HAVE_DNSAPI = 1;
+ }
+
if (typeof COMPILER_NAME === 'string') {
mongoc_opts.MONGOC_CC = COMPILER_NAME;
} else if (typeof VC_VERSIONS[VCVERS] === 'string') {
mongoc_opts.MONGOC_CC = VC_VERSIONS[VCVERS];
}
/* MONGOC_USER_SET_CFLAGS and MONGOC_USER_SET_LDFLAGS can be left blank, as we
* do not expect CFLAGS or LDFLAGS to be customized at build time. */
mongodb_generate_header(
configure_module_dirname + "/src/libmongoc/src/mongoc/mongoc-config.h.in",
configure_module_dirname + "/src/libmongoc/src/mongoc/mongoc-config.h",
mongoc_opts
);
mongodb_generate_header(
configure_module_dirname + "/src/libmongoc/src/mongoc/mongoc-version.h.in",
configure_module_dirname + "/src/libmongoc/src/mongoc/mongoc-version.h",
mongodb_parse_version_file(configure_module_dirname + "/src/libmongoc/VERSION_CURRENT", "MONGOC_")
);
}
diff --git a/mongodb-1.3.4/phongo_compat.c b/mongodb-1.4.2/phongo_compat.c
similarity index 100%
rename from mongodb-1.3.4/phongo_compat.c
rename to mongodb-1.4.2/phongo_compat.c
diff --git a/mongodb-1.3.4/phongo_compat.h b/mongodb-1.4.2/phongo_compat.h
similarity index 87%
rename from mongodb-1.3.4/phongo_compat.h
rename to mongodb-1.4.2/phongo_compat.h
index 4e23384a..af175a4b 100644
--- a/mongodb-1.3.4/phongo_compat.h
+++ b/mongodb-1.4.2/phongo_compat.h
@@ -1,165 +1,180 @@
/*
* Copyright 2015-2017 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef PHONGO_COMPAT_H
#define PHONGO_COMPAT_H
#include <php.h>
#include <Zend/zend_string.h>
#if PHP_VERSION_ID >= 70000
#include <Zend/zend_portability.h>
#endif
#ifdef PHP_WIN32
# include "config.w32.h"
#else
# include <php_config.h>
#endif
#ifndef PHP_FE_END
# define PHP_FE_END { NULL, NULL, NULL }
#endif
#ifndef HASH_KEY_NON_EXISTENT
# define HASH_KEY_NON_EXISTENT HASH_KEY_NON_EXISTANT
#endif
#if PHP_VERSION_ID >= 70000
# define str_efree(s) efree((char*)s)
#else
# include <Zend/zend_string.h>
#endif
#if defined(__GNUC__)
# define ARG_UNUSED __attribute__ ((unused))
#else
# define ARG_UNUSED
#endif
+#if defined (__GNUC__)
+# define PHONGO_GNUC_CHECK_VERSION(major, minor) \
+ ((__GNUC__ > (major)) || \
+ ((__GNUC__ == (major)) && (__GNUC_MINOR__ >= (minor))))
+#else
+# define PHONGO_GNUC_CHECK_VERSION(major, minor) 0
+#endif
+
+#if PHONGO_GNUC_CHECK_VERSION(7, 0)
+# define PHONGO_BREAK_INTENTIONALLY_MISSING __attribute__ ((fallthrough));
+#else
+# define PHONGO_BREAK_INTENTIONALLY_MISSING
+#endif
+
+
#if PHP_VERSION_ID >= 70000
# define phongo_char zend_string
# define phongo_long zend_long
#if SIZEOF_ZEND_LONG == 8
# define PHONGO_LONG_FORMAT PRId64
#elif SIZEOF_ZEND_LONG == 4
# define PHONGO_LONG_FORMAT PRId32
#else
# error Unsupported architecture (integers are neither 32-bit nor 64-bit)
#endif
# define SIZEOF_PHONGO_LONG SIZEOF_ZEND_LONG
# define phongo_create_object_retval zend_object*
# define phongo_get_gc_table zval **
# define PHONGO_ALLOC_OBJECT_T(_obj_t, _class_type) (_obj_t *)ecalloc(1, sizeof(_obj_t)+zend_object_properties_size(_class_type))
# define PHONGO_TSRMLS_FETCH_FROM_CTX(user_data)
# define SUPPRESS_UNUSED_WARNING(x)
# define DECLARE_RETURN_VALUE_USED int return_value_used = 1;
# define EXCEPTION_P(_ex, _zp) ZVAL_OBJ(&_zp, _ex)
# define ADD_ASSOC_STRING(_zv, _key, _value) add_assoc_string_ex(_zv, ZEND_STRL(_key), (char *)(_value));
# define ADD_ASSOC_STRINGL(_zv, _key, _value, _len) add_assoc_stringl_ex(_zv, ZEND_STRL(_key), (char *)(_value), _len);
# define ADD_ASSOC_STRING_EX(_zv, _key, _key_len, _value, _value_len) add_assoc_stringl_ex(_zv, _key, _key_len, (char *)(_value), _value_len);
# define ADD_ASSOC_LONG_EX(_zv, _key, _value) add_assoc_long_ex(_zv, ZEND_STRL(_key), _value);
# define ADD_ASSOC_ZVAL_EX(_zv, _key, _value) add_assoc_zval_ex(_zv, ZEND_STRL(_key), _value);
# define ADD_ASSOC_ZVAL(_zv, _key, _value) add_assoc_zval(_zv, _key, _value);
# define ADD_ASSOC_NULL_EX(_zv, _key) add_assoc_null_ex(_zv, ZEND_STRL(_key));
# define ADD_ASSOC_BOOL_EX(_zv, _key, _value) add_assoc_bool_ex(_zv, ZEND_STRL(_key), _value);
# define ADD_NEXT_INDEX_STRINGL(_zv, _value, _len) add_next_index_stringl(_zv, _value, _len);
# define phongo_free_object_arg zend_object
# define phongo_zpp_char_len size_t
# define ZEND_HASH_APPLY_COUNT(ht) (ht)->u.v.nApplyCount
# define PHONGO_RETVAL_STRINGL(s, slen) RETVAL_STRINGL(s, slen)
# define PHONGO_RETURN_STRINGL(s, slen) RETURN_STRINGL(s, slen)
# define PHONGO_RETVAL_STRING(s) RETVAL_STRING(s)
# define PHONGO_RETURN_STRING(s) RETURN_STRING(s)
# define PHONGO_RETVAL_SMART_STR(val) PHONGO_RETVAL_STRINGL(ZSTR_VAL((val).s), ZSTR_LEN((val).s));
#else
# define phongo_char char
# define phongo_long long
# define PHONGO_LONG_FORMAT "ld"
# define SIZEOF_PHONGO_LONG SIZEOF_LONG
# define ZSTR_VAL(str) str
# define phongo_create_object_retval zend_object_value
# define phongo_get_gc_table zval ***
# define PHONGO_ALLOC_OBJECT_T(_obj_t, _class_type) (_obj_t *)ecalloc(1, sizeof(_obj_t))
# define PHONGO_TSRMLS_FETCH_FROM_CTX(user_data) TSRMLS_FETCH_FROM_CTX(user_data)
# define SUPPRESS_UNUSED_WARNING(x) (void)x;
# define DECLARE_RETURN_VALUE_USED
# define EXCEPTION_P(_ex, _zp) _zp = _ex
# define ADD_ASSOC_STRING(_zv, _key, _value) add_assoc_string_ex(_zv, ZEND_STRS(_key), (char *)(_value), 1);
# define ADD_ASSOC_STRINGL(_zv, _key, _value, _len) add_assoc_stringl_ex(_zv, ZEND_STRS(_key), (char *)(_value), _len, 1);
# define ADD_ASSOC_STRING_EX(_zv, _key, _key_len, _value, _value_len) add_assoc_stringl_ex(_zv, _key, _key_len+1, (char *)(_value), _value_len, 1);
# define ADD_ASSOC_LONG_EX(_zv, _key, _value) add_assoc_long_ex(_zv, ZEND_STRS(_key), _value);
# define ADD_ASSOC_ZVAL_EX(_zv, _key, _value) add_assoc_zval_ex(_zv, ZEND_STRS(_key), _value);
# define ADD_ASSOC_ZVAL(_zv, _key, _value) add_assoc_zval(_zv, _key, _value);
# define ADD_ASSOC_NULL_EX(_zv, _key) add_assoc_null_ex(_zv, ZEND_STRS(_key));
# define ADD_ASSOC_BOOL_EX(_zv, _key, _value) add_assoc_bool_ex(_zv, ZEND_STRS(_key), _value);
# define ADD_NEXT_INDEX_STRINGL(_zv, _value, _len) add_next_index_stringl(_zv, _value, _len, 1);
# define Z_PHPDATE_P(object) ((php_date_obj*)zend_object_store_get_object(object TSRMLS_CC))
# define Z_ISUNDEF(x) !x
# define ZVAL_UNDEF(x) do { (*x) = NULL; } while (0)
# define phongo_free_object_arg void
# define phongo_zpp_char_len int
# define ZEND_HASH_APPLY_PROTECTION(ht) true
# define ZEND_HASH_GET_APPLY_COUNT(ht) ((ht)->nApplyCount)
# define ZEND_HASH_DEC_APPLY_COUNT(ht) ((ht)->nApplyCount -= 1)
# define ZEND_HASH_INC_APPLY_COUNT(ht) ((ht)->nApplyCount += 1)
# define PHONGO_RETVAL_STRINGL(s, slen) RETVAL_STRINGL(s, slen, 1)
# define PHONGO_RETURN_STRINGL(s, slen) RETURN_STRINGL(s, slen, 1)
# define PHONGO_RETVAL_STRING(s) RETVAL_STRING(s, 1)
# define PHONGO_RETURN_STRING(s) RETURN_STRING(s, 1)
# define PHONGO_RETVAL_SMART_STR(val) PHONGO_RETVAL_STRINGL((val).c, (val).len);
#endif
#if SIZEOF_PHONGO_LONG == 8
# define ADD_INDEX_INT64(zval, index, value) add_index_long(zval, index, value)
# define ADD_NEXT_INDEX_INT64(zval, value) add_next_index_long(zval, value)
# define ADD_ASSOC_INT64(zval, key, value) add_assoc_long(zval, key, value)
#elif SIZEOF_PHONGO_LONG == 4
# define ADD_INDEX_INT64(zval, index, value) \
- if (value > INT32_MAX || value < INT32_MIN) { \
- phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Integer overflow detected on your platform: %lld", value); \
+ if ((value) > INT32_MAX || (value) < INT32_MIN) { \
+ phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Integer overflow detected on your platform: %lld", (value)); \
} else { \
- add_index_long(zval, index, value); \
+ add_index_long(zval, index, (value)); \
}
# define ADD_NEXT_INDEX_INT64(zval, value) \
- if (value > INT32_MAX || value < INT32_MIN) { \
- phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Integer overflow detected on your platform: %lld", value); \
+ if ((value) > INT32_MAX || (value) < INT32_MIN) { \
+ phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Integer overflow detected on your platform: %lld", (value)); \
} else { \
- add_next_index_long(zval, value); \
+ add_next_index_long(zval, (value)); \
}
# define ADD_ASSOC_INT64(zval, key, value) \
- if (value > INT32_MAX || value < INT32_MIN) { \
- phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Integer overflow detected on your platform: %lld", value); \
+ if ((value) > INT32_MAX || (value) < INT32_MIN) { \
+ phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Integer overflow detected on your platform: %lld", (value)); \
} else { \
- add_assoc_long(zval, key, value); \
+ add_assoc_long(zval, key, (value)); \
}
#else
# error Unsupported architecture (integers are neither 32-bit nor 64-bit)
#endif
void phongo_add_exception_prop(const char *prop, int prop_len, zval *value TSRMLS_DC);
#endif /* PHONGO_COMPAT_H */
/*
* Local variables:
* tab-width: 4
* c-basic-offset: 4
* End:
* vim600: noet sw=4 ts=4 fdm=marker
* vim<600: noet sw=4 ts=4
*/
diff --git a/mongodb-1.3.4/php_bson.h b/mongodb-1.4.2/php_bson.h
similarity index 100%
rename from mongodb-1.3.4/php_bson.h
rename to mongodb-1.4.2/php_bson.h
diff --git a/mongodb-1.3.4/php_phongo.c b/mongodb-1.4.2/php_phongo.c
similarity index 83%
rename from mongodb-1.3.4/php_phongo.c
rename to mongodb-1.4.2/php_phongo.c
index c932e337..45e665b8 100644
--- a/mongodb-1.3.4/php_phongo.c
+++ b/mongodb-1.4.2/php_phongo.c
@@ -1,2612 +1,3024 @@
/*
* Copyright 2014-2017 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifdef HAVE_CONFIG_H
# include "config.h"
#endif
/* External libs */
#include "bson.h"
#include "mongoc.h"
/* PHP Core stuff */
#include <php.h>
#include <php_ini.h>
#include <ext/standard/info.h>
#include <ext/standard/file.h>
#include <Zend/zend_hash.h>
#include <Zend/zend_interfaces.h>
#include <Zend/zend_exceptions.h>
#include <ext/spl/spl_iterators.h>
#include <ext/spl/spl_exceptions.h>
#include <ext/standard/php_var.h>
#if PHP_VERSION_ID >= 70000
# include <Zend/zend_smart_str.h>
#else
# include <ext/standard/php_smart_str.h>
#endif
/* getpid() */
#if HAVE_UNISTD_H
# include <unistd.h>
#endif
#ifdef PHP_WIN32
# include <process.h>
#endif
/* Stream wrapper */
#include <main/php_streams.h>
#include <main/php_network.h>
/* Debug log writing */
#include <main/php_open_temporary_file.h>
/* For formating timestamp in the log */
#include <ext/date/php_date.h>
/* String manipulation */
#include <Zend/zend_string.h>
/* PHP array helpers */
#include "php_array_api.h"
/* Our Compatability header */
#include "phongo_compat.h"
/* Our stuffz */
#include "php_phongo.h"
#include "php_bson.h"
#include "src/BSON/functions.h"
#include "src/MongoDB/Monitoring/functions.h"
#undef MONGOC_LOG_DOMAIN
#define MONGOC_LOG_DOMAIN "PHONGO"
#define PHONGO_DEBUG_INI "mongodb.debug"
#define PHONGO_DEBUG_INI_DEFAULT ""
ZEND_DECLARE_MODULE_GLOBALS(mongodb)
#if PHP_VERSION_ID >= 70000
#if defined(ZTS) && defined(COMPILE_DL_MONGODB)
ZEND_TSRMLS_CACHE_DEFINE();
#endif
#endif
/* Declare zend_class_entry dependencies, which are initialized in MINIT */
zend_class_entry *php_phongo_date_immutable_ce;
zend_class_entry *php_phongo_json_serializable_ce;
php_phongo_server_description_type_map_t
php_phongo_server_description_type_map[PHONGO_SERVER_DESCRIPTION_TYPES] = {
{ PHONGO_SERVER_UNKNOWN, "Unknown" },
{ PHONGO_SERVER_STANDALONE, "Standalone" },
{ PHONGO_SERVER_MONGOS, "Mongos" },
{ PHONGO_SERVER_POSSIBLE_PRIMARY, "PossiblePrimary" },
{ PHONGO_SERVER_RS_PRIMARY, "RSPrimary" },
{ PHONGO_SERVER_RS_SECONDARY, "RSSecondary" },
{ PHONGO_SERVER_RS_ARBITER, "RSArbiter" },
{ PHONGO_SERVER_RS_OTHER, "RSOther" },
{ PHONGO_SERVER_RS_GHOST, "RSGhost" },
};
/* {{{ phongo_std_object_handlers */
zend_object_handlers phongo_std_object_handlers;
zend_object_handlers *phongo_get_std_object_handlers(void)
{
return &phongo_std_object_handlers;
}
/* }}} */
/* Forward declarations */
static bool phongo_split_namespace(const char *namespace, char **dbname, char **cname);
/* {{{ Error reporting and logging */
zend_class_entry* phongo_exception_from_phongo_domain(php_phongo_error_domain_t domain)
{
switch (domain) {
case PHONGO_ERROR_INVALID_ARGUMENT:
return php_phongo_invalidargumentexception_ce;
case PHONGO_ERROR_LOGIC:
return php_phongo_logicexception_ce;
case PHONGO_ERROR_RUNTIME:
return php_phongo_runtimeexception_ce;
case PHONGO_ERROR_UNEXPECTED_VALUE:
return php_phongo_unexpectedvalueexception_ce;
case PHONGO_ERROR_MONGOC_FAILED:
return php_phongo_runtimeexception_ce;
case PHONGO_ERROR_WRITE_FAILED:
return php_phongo_bulkwriteexception_ce;
case PHONGO_ERROR_CONNECTION_FAILED:
return php_phongo_connectionexception_ce;
}
MONGOC_ERROR("Resolving unknown phongo error domain: %d", domain);
return php_phongo_runtimeexception_ce;
}
zend_class_entry* phongo_exception_from_mongoc_domain(uint32_t /* mongoc_error_domain_t */ domain, uint32_t /* mongoc_error_code_t */ code)
{
switch(code) {
case 50: /* ExceededTimeLimit */
return php_phongo_executiontimeoutexception_ce;
case MONGOC_ERROR_STREAM_SOCKET:
case MONGOC_ERROR_SERVER_SELECTION_FAILURE:
return php_phongo_connectiontimeoutexception_ce;
case MONGOC_ERROR_CLIENT_AUTHENTICATE:
return php_phongo_authenticationexception_ce;
case MONGOC_ERROR_COMMAND_INVALID_ARG:
return php_phongo_invalidargumentexception_ce;
case MONGOC_ERROR_STREAM_INVALID_TYPE:
case MONGOC_ERROR_STREAM_INVALID_STATE:
case MONGOC_ERROR_STREAM_NAME_RESOLUTION:
case MONGOC_ERROR_STREAM_CONNECT:
case MONGOC_ERROR_STREAM_NOT_ESTABLISHED:
return php_phongo_connectionexception_ce;
case MONGOC_ERROR_CLIENT_NOT_READY:
case MONGOC_ERROR_CLIENT_TOO_BIG:
case MONGOC_ERROR_CLIENT_TOO_SMALL:
case MONGOC_ERROR_CLIENT_GETNONCE:
case MONGOC_ERROR_CLIENT_NO_ACCEPTABLE_PEER:
case MONGOC_ERROR_CLIENT_IN_EXHAUST:
case MONGOC_ERROR_PROTOCOL_INVALID_REPLY:
case MONGOC_ERROR_PROTOCOL_BAD_WIRE_VERSION:
case MONGOC_ERROR_CURSOR_INVALID_CURSOR:
case MONGOC_ERROR_QUERY_FAILURE:
/*case MONGOC_ERROR_PROTOCOL_ERROR:*/
case MONGOC_ERROR_BSON_INVALID:
case MONGOC_ERROR_MATCHER_INVALID:
case MONGOC_ERROR_NAMESPACE_INVALID:
case MONGOC_ERROR_COLLECTION_INSERT_FAILED:
case MONGOC_ERROR_GRIDFS_INVALID_FILENAME:
case MONGOC_ERROR_QUERY_COMMAND_NOT_FOUND:
case MONGOC_ERROR_QUERY_NOT_TAILABLE:
return php_phongo_runtimeexception_ce;
}
switch (domain) {
case MONGOC_ERROR_CLIENT:
case MONGOC_ERROR_STREAM:
case MONGOC_ERROR_PROTOCOL:
case MONGOC_ERROR_CURSOR:
case MONGOC_ERROR_QUERY:
case MONGOC_ERROR_INSERT:
case MONGOC_ERROR_SASL:
case MONGOC_ERROR_BSON:
case MONGOC_ERROR_MATCHER:
case MONGOC_ERROR_NAMESPACE:
case MONGOC_ERROR_COMMAND:
case MONGOC_ERROR_COLLECTION:
case MONGOC_ERROR_GRIDFS:
/* FIXME: We don't have the Exceptions mocked yet.. */
#if 0
return phongo_ce_mongo_connection_exception;
#endif
default:
return php_phongo_runtimeexception_ce;
}
}
void phongo_throw_exception(php_phongo_error_domain_t domain TSRMLS_DC, const char *format, ...)
{
va_list args;
char *message;
int message_len;
va_start(args, format);
message_len = vspprintf(&message, 0, format, args);
zend_throw_exception(phongo_exception_from_phongo_domain(domain), message, 0 TSRMLS_CC);
efree(message);
va_end(args);
}
void phongo_throw_exception_from_bson_error_t(bson_error_t *error TSRMLS_DC)
{
zend_throw_exception(phongo_exception_from_mongoc_domain(error->domain, error->code), error->message, error->code TSRMLS_CC);
}
+
static void php_phongo_log(mongoc_log_level_t log_level, const char *log_domain, const char *message, void *user_data)
{
- phongo_char *dt;
+ struct timeval tv;
+ time_t t;
+ phongo_long tu;
+ phongo_char *dt;
PHONGO_TSRMLS_FETCH_FROM_CTX(user_data);
(void)user_data;
- dt = php_format_date((char *) ZEND_STRL("Y-m-d\\TH:i:sP"), time(NULL), 0 TSRMLS_CC);
+ gettimeofday(&tv, NULL);
+ t = tv.tv_sec;
+ tu = tv.tv_usec;
+
+ dt = php_format_date((char *) ZEND_STRL("Y-m-d\\TH:i:s"), t, 0 TSRMLS_CC);
- fprintf(MONGODB_G(debug_fd), "[%s] %10s: %-8s> %s\n", ZSTR_VAL(dt), log_domain, mongoc_log_level_str(log_level), message);
+ fprintf(MONGODB_G(debug_fd), "[%s.%06" PHONGO_LONG_FORMAT "+00:00] %10s: %-8s> %s\n", ZSTR_VAL(dt), tu, log_domain, mongoc_log_level_str(log_level), message);
fflush(MONGODB_G(debug_fd));
efree(dt);
}
/* }}} */
/* {{{ Init objects */
static void phongo_cursor_init(zval *return_value, mongoc_client_t *client, mongoc_cursor_t *cursor, zval *readPreference TSRMLS_DC) /* {{{ */
{
php_phongo_cursor_t *intern;
object_init_ex(return_value, php_phongo_cursor_ce);
intern = Z_CURSOR_OBJ_P(return_value);
intern->cursor = cursor;
intern->server_id = mongoc_cursor_get_hint(cursor);
intern->client = client;
+ intern->advanced = false;
if (readPreference) {
#if PHP_VERSION_ID >= 70000
ZVAL_ZVAL(&intern->read_preference, readPreference, 1, 0);
#else
Z_ADDREF_P(readPreference);
intern->read_preference = readPreference;
#endif
}
} /* }}} */
static void phongo_cursor_init_for_command(zval *return_value, mongoc_client_t *client, mongoc_cursor_t *cursor, const char *db, zval *command, zval *readPreference TSRMLS_DC) /* {{{ */
{
php_phongo_cursor_t *intern;
phongo_cursor_init(return_value, client, cursor, readPreference TSRMLS_CC);
intern = Z_CURSOR_OBJ_P(return_value);
intern->database = estrdup(db);
#if PHP_VERSION_ID >= 70000
ZVAL_ZVAL(&intern->command, command, 1, 0);
#else
Z_ADDREF_P(command);
intern->command = command;
#endif
} /* }}} */
static void phongo_cursor_init_for_query(zval *return_value, mongoc_client_t *client, mongoc_cursor_t *cursor, const char *namespace, zval *query, zval *readPreference TSRMLS_DC) /* {{{ */
{
php_phongo_cursor_t *intern;
phongo_cursor_init(return_value, client, cursor, readPreference TSRMLS_CC);
intern = Z_CURSOR_OBJ_P(return_value);
/* namespace has already been validated by phongo_execute_query() */
phongo_split_namespace(namespace, &intern->database, &intern->collection);
+ /* cursor has already been advanced by phongo_execute_query() calling
+ * phongo_cursor_advance_and_check_for_error() */
+ intern->advanced = true;
+
#if PHP_VERSION_ID >= 70000
ZVAL_ZVAL(&intern->query, query, 1, 0);
#else
Z_ADDREF_P(query);
intern->query = query;
#endif
} /* }}} */
-void phongo_server_init(zval *return_value, mongoc_client_t *client, int server_id TSRMLS_DC) /* {{{ */
+void phongo_server_init(zval *return_value, mongoc_client_t *client, uint32_t server_id TSRMLS_DC) /* {{{ */
{
php_phongo_server_t *server;
object_init_ex(return_value, php_phongo_server_ce);
server = Z_SERVER_OBJ_P(return_value);
server->server_id = server_id;
server->client = client;
}
/* }}} */
+void phongo_session_init(zval *return_value, mongoc_client_session_t *client_session TSRMLS_DC) /* {{{ */
+{
+ php_phongo_session_t *session;
+
+ object_init_ex(return_value, php_phongo_session_ce);
+
+ session = Z_SESSION_OBJ_P(return_value);
+ session->client_session = client_session;
+}
+/* }}} */
+
void phongo_readconcern_init(zval *return_value, const mongoc_read_concern_t *read_concern TSRMLS_DC) /* {{{ */
{
php_phongo_readconcern_t *intern;
object_init_ex(return_value, php_phongo_readconcern_ce);
intern = Z_READCONCERN_OBJ_P(return_value);
intern->read_concern = mongoc_read_concern_copy(read_concern);
}
/* }}} */
void phongo_readpreference_init(zval *return_value, const mongoc_read_prefs_t *read_prefs TSRMLS_DC) /* {{{ */
{
php_phongo_readpreference_t *intern;
object_init_ex(return_value, php_phongo_readpreference_ce);
intern = Z_READPREFERENCE_OBJ_P(return_value);
intern->read_preference = mongoc_read_prefs_copy(read_prefs);
}
/* }}} */
void phongo_writeconcern_init(zval *return_value, const mongoc_write_concern_t *write_concern TSRMLS_DC) /* {{{ */
{
php_phongo_writeconcern_t *intern;
object_init_ex(return_value, php_phongo_writeconcern_ce);
intern = Z_WRITECONCERN_OBJ_P(return_value);
intern->write_concern = mongoc_write_concern_copy(write_concern);
}
/* }}} */
zend_bool phongo_writeconcernerror_init(zval *return_value, bson_t *bson TSRMLS_DC) /* {{{ */
{
bson_iter_t iter;
php_phongo_writeconcernerror_t *intern;
object_init_ex(return_value, php_phongo_writeconcernerror_ce);
intern = Z_WRITECONCERNERROR_OBJ_P(return_value);
if (bson_iter_init_find(&iter, bson, "code") && BSON_ITER_HOLDS_INT32(&iter)) {
intern->code = bson_iter_int32(&iter);
}
if (bson_iter_init_find(&iter, bson, "errmsg") && BSON_ITER_HOLDS_UTF8(&iter)) {
uint32_t errmsg_len;
const char *err_msg = bson_iter_utf8(&iter, &errmsg_len);
intern->message = estrndup(err_msg, errmsg_len);
}
if (bson_iter_init_find(&iter, bson, "errInfo") && BSON_ITER_HOLDS_DOCUMENT(&iter)) {
uint32_t len;
const uint8_t *data = NULL;
bson_iter_document(&iter, &len, &data);
if (!php_phongo_bson_to_zval(data, len, &intern->info)) {
zval_ptr_dtor(&intern->info);
ZVAL_UNDEF(&intern->info);
return false;
}
}
return true;
} /* }}} */
zend_bool phongo_writeerror_init(zval *return_value, bson_t *bson TSRMLS_DC) /* {{{ */
{
bson_iter_t iter;
php_phongo_writeerror_t *intern;
object_init_ex(return_value, php_phongo_writeerror_ce);
intern = Z_WRITEERROR_OBJ_P(return_value);
if (bson_iter_init_find(&iter, bson, "code") && BSON_ITER_HOLDS_INT32(&iter)) {
intern->code = bson_iter_int32(&iter);
}
if (bson_iter_init_find(&iter, bson, "errmsg") && BSON_ITER_HOLDS_UTF8(&iter)) {
uint32_t errmsg_len;
const char *err_msg = bson_iter_utf8(&iter, &errmsg_len);
intern->message = estrndup(err_msg, errmsg_len);
}
if (bson_iter_init_find(&iter, bson, "errInfo") && BSON_ITER_HOLDS_DOCUMENT(&iter)) {
uint32_t len;
const uint8_t *data = NULL;
bson_iter_document(&iter, &len, &data);
if (!php_phongo_bson_to_zval(data, len, &intern->info)) {
zval_ptr_dtor(&intern->info);
ZVAL_UNDEF(&intern->info);
return false;
}
}
if (bson_iter_init_find(&iter, bson, "index") && BSON_ITER_HOLDS_INT32(&iter)) {
intern->index = bson_iter_int32(&iter);
}
return true;
} /* }}} */
-static php_phongo_writeresult_t *phongo_writeresult_init(zval *return_value, bson_t *reply, mongoc_client_t *client, int server_id TSRMLS_DC) /* {{{ */
+static php_phongo_writeresult_t *phongo_writeresult_init(zval *return_value, bson_t *reply, mongoc_client_t *client, uint32_t server_id TSRMLS_DC) /* {{{ */
{
php_phongo_writeresult_t *writeresult;
object_init_ex(return_value, php_phongo_writeresult_ce);
writeresult = Z_WRITERESULT_OBJ_P(return_value);
writeresult->reply = bson_copy(reply);
writeresult->server_id = server_id;
writeresult->client = client;
return writeresult;
} /* }}} */
/* }}} */
/* {{{ CRUD */
/* Splits a namespace name into the database and collection names, allocated with estrdup. */
static bool phongo_split_namespace(const char *namespace, char **dbname, char **cname) /* {{{ */
{
char *dot = strchr(namespace, '.');
if (!dot) {
return false;
}
if (cname) {
*cname = estrdup(namespace + (dot - namespace) + 1);
}
if (dbname) {
*dbname = estrndup(namespace, dot - namespace);
}
return true;
} /* }}} */
-mongoc_bulk_operation_t *phongo_bulkwrite_init(zend_bool ordered) { /* {{{ */
- return mongoc_bulk_operation_new(ordered);
+/* Parses the "readConcern" option for an execute method. If mongoc_opts is not
+ * NULL, the option will be appended. On error, false is returned and an
+ * exception is thrown. */
+static bool phongo_parse_read_concern(zval *options, bson_t *mongoc_opts TSRMLS_DC) /* {{{ */
+{
+ zval *option = NULL;
+ mongoc_read_concern_t *read_concern;
+
+ if (!options) {
+ return true;
+ }
+
+ if (Z_TYPE_P(options) != IS_ARRAY) {
+ phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected options to be array, %s given", PHONGO_ZVAL_CLASS_OR_TYPE_NAME_P(options));
+ return false;
+ }
+
+ option = php_array_fetchc(options, "readConcern");
+
+ if (!option) {
+ return true;
+ }
+
+ if (Z_TYPE_P(option) != IS_OBJECT || !instanceof_function(Z_OBJCE_P(option), php_phongo_readconcern_ce TSRMLS_CC)) {
+ phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected \"readConcern\" option to be %s, %s given", ZSTR_VAL(php_phongo_readconcern_ce->name), PHONGO_ZVAL_CLASS_OR_TYPE_NAME_P(option));
+ return false;
+ }
+
+ read_concern = Z_READCONCERN_OBJ_P(option)->read_concern;
+
+ if (mongoc_opts && !mongoc_read_concern_append(read_concern, mongoc_opts)) {
+ phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Error appending \"readConcern\" option");
+ return false;
+ }
+
+ return true;
+} /* }}} */
+
+/* Parses the "readPreference" option for an execute method. If zreadPreference
+ * is not NULL, it will be assigned to the option. On error, false is returned
+ * and an exception is thrown. */
+bool phongo_parse_read_preference(zval *options, zval **zreadPreference TSRMLS_DC) /* {{{ */
+{
+ zval *option = NULL;
+
+ if (!options) {
+ return true;
+ }
+
+ if (Z_TYPE_P(options) != IS_ARRAY) {
+ phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected options to be array, %s given", PHONGO_ZVAL_CLASS_OR_TYPE_NAME_P(options));
+ return false;
+ }
+
+ option = php_array_fetchc(options, "readPreference");
+
+ if (!option) {
+ return true;
+ }
+
+ if (Z_TYPE_P(option) != IS_OBJECT || !instanceof_function(Z_OBJCE_P(option), php_phongo_readpreference_ce TSRMLS_CC)) {
+ phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected \"readPreference\" option to be %s, %s given", ZSTR_VAL(php_phongo_readpreference_ce->name), PHONGO_ZVAL_CLASS_OR_TYPE_NAME_P(option));
+ return false;
+ }
+
+ if (zreadPreference) {
+ *zreadPreference = option;
+ }
+
+ return true;
+} /* }}} */
+
+/* Parses the "session" option for an execute method. If mongoc_opts is not
+ * NULL, the option will be appended. If zsession is not NULL, it will be
+ * assigned to the option. On error, false is returned and an exception is
+ * thrown. */
+static bool phongo_parse_session(zval *options, bson_t *mongoc_opts, zval **zsession, mongoc_client_t *client TSRMLS_DC) /* {{{ */
+{
+ zval *option = NULL;
+ const mongoc_client_session_t *client_session;
+
+ if (!options) {
+ return true;
+ }
+
+ if (Z_TYPE_P(options) != IS_ARRAY) {
+ phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected options to be array, %s given", PHONGO_ZVAL_CLASS_OR_TYPE_NAME_P(options));
+ return false;
+ }
+
+ option = php_array_fetchc(options, "session");
+
+ if (!option) {
+ return true;
+ }
+
+ if (Z_TYPE_P(option) != IS_OBJECT || !instanceof_function(Z_OBJCE_P(option), php_phongo_session_ce TSRMLS_CC)) {
+ phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected \"session\" option to be %s, %s given", ZSTR_VAL(php_phongo_session_ce->name), PHONGO_ZVAL_CLASS_OR_TYPE_NAME_P(option));
+ return false;
+ }
+
+ client_session = Z_SESSION_OBJ_P(option)->client_session;
+
+ if (client != mongoc_client_session_get_client(client_session)) {
+ phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Cannot use Session started from a different Manager");
+ return false;
+ }
+
+ if (mongoc_opts && !mongoc_client_session_append(client_session, mongoc_opts, NULL)) {
+ phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Error appending \"session\" option");
+ return false;
+ }
+
+ if (zsession) {
+ *zsession = option;
+ }
+
+ return true;
} /* }}} */
-bool phongo_execute_write(mongoc_client_t *client, const char *namespace, php_phongo_bulkwrite_t *bulk_write, const mongoc_write_concern_t *write_concern, int server_id, zval *return_value, int return_value_used TSRMLS_DC) /* {{{ */
+/* Parses the "writeConcern" option for an execute method. If mongoc_opts is not
+ * NULL, the option will be appended. If zwriteConcern is not NULL, it will be
+ * assigned to the option. On error, false is returned and an exception is
+ * thrown. */
+static bool phongo_parse_write_concern(zval *options, bson_t *mongoc_opts, zval **zwriteConcern TSRMLS_DC) /* {{{ */
+{
+ zval *option = NULL;
+ mongoc_write_concern_t *write_concern;
+
+ if (!options) {
+ return true;
+ }
+
+ if (Z_TYPE_P(options) != IS_ARRAY) {
+ phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected options to be array, %s given", PHONGO_ZVAL_CLASS_OR_TYPE_NAME_P(options));
+ return false;
+ }
+
+ option = php_array_fetchc(options, "writeConcern");
+
+ if (!option) {
+ return true;
+ }
+
+ if (Z_TYPE_P(option) != IS_OBJECT || !instanceof_function(Z_OBJCE_P(option), php_phongo_writeconcern_ce TSRMLS_CC)) {
+ phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected \"writeConcern\" option to be %s, %s given", ZSTR_VAL(php_phongo_writeconcern_ce->name), PHONGO_ZVAL_CLASS_OR_TYPE_NAME_P(option));
+ return false;
+ }
+
+ write_concern = Z_WRITECONCERN_OBJ_P(option)->write_concern;
+
+ if (mongoc_opts && !mongoc_write_concern_append(write_concern, mongoc_opts)) {
+ phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Error appending \"writeConcern\" option");
+ return false;
+ }
+
+ if (zwriteConcern) {
+ *zwriteConcern = option;
+ }
+
+ return true;
+}
+
+bool phongo_execute_bulk_write(mongoc_client_t *client, const char *namespace, php_phongo_bulkwrite_t *bulk_write, zval *options, uint32_t server_id, zval *return_value, int return_value_used TSRMLS_DC) /* {{{ */
{
bson_error_t error;
int success;
bson_t reply = BSON_INITIALIZER;
mongoc_bulk_operation_t *bulk = bulk_write->bulk;
php_phongo_writeresult_t *writeresult;
+ zval *zwriteConcern = NULL;
+ zval *zsession = NULL;
+ const mongoc_write_concern_t *write_concern = NULL;
if (bulk_write->executed) {
phongo_throw_exception(PHONGO_ERROR_WRITE_FAILED TSRMLS_CC, "BulkWrite objects may only be executed once and this instance has already been executed");
return false;
}
if (!phongo_split_namespace(namespace, &bulk_write->database, &bulk_write->collection)) {
phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "%s: %s", "Invalid namespace provided", namespace);
return false;
}
+ if (!phongo_parse_session(options, NULL, &zsession, client TSRMLS_CC)) {
+ /* Exception should already have been thrown */
+ return false;
+ }
+
+ if (!phongo_parse_write_concern(options, NULL, &zwriteConcern TSRMLS_CC)) {
+ /* Exception should already have been thrown */
+ return false;
+ }
+
mongoc_bulk_operation_set_database(bulk, bulk_write->database);
mongoc_bulk_operation_set_collection(bulk, bulk_write->collection);
mongoc_bulk_operation_set_client(bulk, client);
+ mongoc_bulk_operation_set_hint(bulk, server_id);
+
+ if (zsession) {
+ mongoc_bulk_operation_set_client_session(bulk, Z_SESSION_OBJ_P(zsession)->client_session);
+ }
/* If a write concern was not specified, libmongoc will use the client's
* write concern; however, we should still fetch it for the write result. */
+ write_concern = phongo_write_concern_from_zval(zwriteConcern TSRMLS_CC);
if (write_concern) {
mongoc_bulk_operation_set_write_concern(bulk, write_concern);
} else {
write_concern = mongoc_client_get_write_concern(client);
}
- if (server_id > 0) {
- mongoc_bulk_operation_set_hint(bulk, server_id);
- }
-
success = mongoc_bulk_operation_execute(bulk, &reply, &error);
bulk_write->executed = true;
/* Write succeeded and the user doesn't care for the results */
if (success && !return_value_used) {
bson_destroy(&reply);
return true;
}
/* Check for connection related exceptions */
if (EG(exception)) {
bson_destroy(&reply);
return false;
}
writeresult = phongo_writeresult_init(return_value, &reply, client, mongoc_bulk_operation_get_hint(bulk) TSRMLS_CC);
writeresult->write_concern = mongoc_write_concern_copy(write_concern);
/* The Write failed */
if (!success) {
if ((error.domain == MONGOC_ERROR_COMMAND && error.code != MONGOC_ERROR_COMMAND_INVALID_ARG) ||
error.domain == MONGOC_ERROR_WRITE_CONCERN) {
phongo_throw_exception(PHONGO_ERROR_WRITE_FAILED TSRMLS_CC, "%s", error.message);
phongo_add_exception_prop(ZEND_STRL("writeResult"), return_value TSRMLS_CC);
} else {
phongo_throw_exception_from_bson_error_t(&error TSRMLS_CC);
}
}
bson_destroy(&reply);
return success;
} /* }}} */
-/* Advance the cursor and return whether there is an error. On error, the cursor
- * will be destroyed and an exception will be thrown. */
-static bool phongo_advance_cursor_and_check_for_error(mongoc_cursor_t *cursor TSRMLS_DC)
+/* Advance the cursor and return whether there is an error. On error, false is
+ * returned and an exception is thrown. */
+bool phongo_cursor_advance_and_check_for_error(mongoc_cursor_t *cursor TSRMLS_DC) /* {{{ */
{
const bson_t *doc;
if (!mongoc_cursor_next(cursor, &doc)) {
bson_error_t error;
/* Check for connection related exceptions */
if (EG(exception)) {
- mongoc_cursor_destroy(cursor);
return false;
}
/* Could simply be no docs, which is not an error */
if (mongoc_cursor_error(cursor, &error)) {
phongo_throw_exception_from_bson_error_t(&error TSRMLS_CC);
- mongoc_cursor_destroy(cursor);
return false;
}
}
return true;
-}
+} /* }}} */
-int phongo_execute_query(mongoc_client_t *client, const char *namespace, zval *zquery, zval *zreadPreference, int server_id, zval *return_value, int return_value_used TSRMLS_DC) /* {{{ */
+int phongo_execute_query(mongoc_client_t *client, const char *namespace, zval *zquery, zval *options, uint32_t server_id, zval *return_value, int return_value_used TSRMLS_DC) /* {{{ */
{
const php_phongo_query_t *query;
mongoc_cursor_t *cursor;
char *dbname;
char *collname;
mongoc_collection_t *collection;
+ zval *zreadPreference = NULL;
if (!phongo_split_namespace(namespace, &dbname, &collname)) {
phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "%s: %s", "Invalid namespace provided", namespace);
return false;
}
collection = mongoc_client_get_collection(client, dbname, collname);
efree(dbname);
efree(collname);
query = Z_QUERY_OBJ_P(zquery);
if (query->read_concern) {
mongoc_collection_set_read_concern(collection, query->read_concern);
}
- cursor = mongoc_collection_find_with_opts(collection, query->filter, query->opts, phongo_read_preference_from_zval(zreadPreference TSRMLS_CC));
- mongoc_collection_destroy(collection);
+ if (!phongo_parse_read_preference(options, &zreadPreference TSRMLS_CC)) {
+ /* Exception should already have been thrown */
+ mongoc_collection_destroy(collection);
+ return false;
+ }
- if (server_id > 0 && !mongoc_cursor_set_hint(cursor, server_id)) {
- phongo_throw_exception(PHONGO_ERROR_MONGOC_FAILED TSRMLS_CC, "%s", "Could not set cursor server_id");
+ if (!phongo_parse_session(options, query->opts, NULL, client TSRMLS_CC)) {
+ /* Exception should already have been thrown */
+ mongoc_collection_destroy(collection);
return false;
}
+ if (!BSON_APPEND_INT32(query->opts, "serverId", server_id)) {
+ phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Error appending \"serverId\" option");
+ mongoc_collection_destroy(collection);
+ return false;
+ }
+
+ cursor = mongoc_collection_find_with_opts(collection, query->filter, query->opts, phongo_read_preference_from_zval(zreadPreference TSRMLS_CC));
+ mongoc_collection_destroy(collection);
+
/* maxAwaitTimeMS must be set before the cursor is sent */
if (query->max_await_time_ms) {
mongoc_cursor_set_max_await_time_ms(cursor, query->max_await_time_ms);
}
- if (!phongo_advance_cursor_and_check_for_error(cursor TSRMLS_CC)) {
+ if (!phongo_cursor_advance_and_check_for_error(cursor TSRMLS_CC)) {
+ mongoc_cursor_destroy(cursor);
return false;
}
if (!return_value_used) {
mongoc_cursor_destroy(cursor);
return true;
}
phongo_cursor_init_for_query(return_value, client, cursor, namespace, zquery, zreadPreference TSRMLS_CC);
return true;
} /* }}} */
-int phongo_execute_command(mongoc_client_t *client, const char *db, zval *zcommand, zval *zreadPreference, int server_id, zval *return_value, int return_value_used TSRMLS_DC) /* {{{ */
+static bson_t *create_wrapped_command_envelope(const char *db, bson_t *reply)
+{
+ bson_t *tmp;
+ size_t max_ns_len = strlen(db) + 5 + 1; /* db + ".$cmd" + '\0' */
+ char *ns = emalloc(max_ns_len);
+
+ snprintf(ns, max_ns_len, "%s.$cmd", db);
+ tmp = BCON_NEW("cursor", "{", "id", BCON_INT64(0), "ns", BCON_UTF8(ns), "firstBatch", "[", BCON_DOCUMENT(reply), "]", "}");
+ efree(ns);
+
+ return tmp;
+}
+
+int phongo_execute_command(mongoc_client_t *client, php_phongo_command_type_t type, const char *db, zval *zcommand, zval *options, uint32_t server_id, zval *return_value, int return_value_used TSRMLS_DC) /* {{{ */
{
const php_phongo_command_t *command;
- mongoc_cursor_t *cursor;
bson_iter_t iter;
+ bson_t reply;
+ bson_error_t error;
+ bson_t opts = BSON_INITIALIZER;
+ mongoc_cursor_t *cmd_cursor;
+ zval *zreadPreference = NULL;
+ int result;
command = Z_COMMAND_OBJ_P(zcommand);
- cursor = mongoc_client_command(client, db, MONGOC_QUERY_NONE, 0, 1, 0, command->bson, NULL, phongo_read_preference_from_zval(zreadPreference TSRMLS_CC));
+ if ((type & PHONGO_OPTION_READ_CONCERN) && !phongo_parse_read_concern(options, &opts TSRMLS_CC)) {
+ /* Exception should already have been thrown */
+ bson_destroy(&opts);
+ return false;
+ }
+
+ if ((type & PHONGO_OPTION_READ_PREFERENCE) && !phongo_parse_read_preference(options, &zreadPreference TSRMLS_CC)) {
+ /* Exception should already have been thrown */
+ bson_destroy(&opts);
+ return false;
+ }
- if (server_id > 0 && !mongoc_cursor_set_hint(cursor, server_id)) {
- phongo_throw_exception(PHONGO_ERROR_MONGOC_FAILED TSRMLS_CC, "%s", "Could not set cursor server_id");
+ if (!phongo_parse_session(options, &opts, NULL, client TSRMLS_CC)) {
+ /* Exception should already have been thrown */
+ bson_destroy(&opts);
return false;
}
- if (!phongo_advance_cursor_and_check_for_error(cursor TSRMLS_CC)) {
+ if ((type & PHONGO_OPTION_WRITE_CONCERN) && !phongo_parse_write_concern(options, &opts, NULL TSRMLS_CC)) {
+ /* Exception should already have been thrown */
+ bson_destroy(&opts);
+ return false;
+ }
+
+ if (!BSON_APPEND_INT32(&opts, "serverId", server_id)) {
+ phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Error appending \"serverId\" option");
+ bson_destroy(&opts);
+ return false;
+ }
+
+ /* Although "opts" already always includes the serverId option, the read
+ * preference is added to the command parts, which is relevant for mongos
+ * command construction. */
+ switch (type) {
+ case PHONGO_COMMAND_RAW:
+ result = mongoc_client_command_with_opts(client, db, command->bson, phongo_read_preference_from_zval(zreadPreference TSRMLS_CC), &opts, &reply, &error);
+ break;
+ case PHONGO_COMMAND_READ:
+ result = mongoc_client_read_command_with_opts(client, db, command->bson, phongo_read_preference_from_zval(zreadPreference TSRMLS_CC), &opts, &reply, &error);
+ break;
+ case PHONGO_COMMAND_WRITE:
+ result = mongoc_client_write_command_with_opts(client, db, command->bson, &opts, &reply, &error);
+ break;
+ case PHONGO_COMMAND_READ_WRITE:
+ /* We can pass NULL as readPreference, as this argument was added historically, but has no function */
+ result = mongoc_client_read_write_command_with_opts(client, db, command->bson, NULL, &opts, &reply, &error);
+ break;
+ default:
+ /* Should never happen, but if it does: exception */
+ phongo_throw_exception(PHONGO_ERROR_LOGIC TSRMLS_CC, "Type '%d' should never have been passed to phongo_execute_command, please file a bug report", type);
+ bson_destroy(&opts);
+ return false;
+ }
+ if (!result) {
+ phongo_throw_exception_from_bson_error_t(&error TSRMLS_CC);
+ bson_destroy(&reply);
+ bson_destroy(&opts);
return false;
}
+ bson_destroy(&opts);
+
if (!return_value_used) {
- mongoc_cursor_destroy(cursor);
+ bson_destroy(&reply);
return true;
}
- if (bson_iter_init_find(&iter, mongoc_cursor_current(cursor), "cursor") && BSON_ITER_HOLDS_DOCUMENT(&iter)) {
- mongoc_cursor_t *cmd_cursor;
+ /* According to mongoc_cursor_new_from_command_reply(), the reply bson_t
+ * is ultimately destroyed on both success and failure. */
+ if (bson_iter_init_find(&iter, &reply, "cursor") && BSON_ITER_HOLDS_DOCUMENT(&iter)) {
+ bson_t initial_reply = BSON_INITIALIZER;
- /* According to mongoc_cursor_new_from_command_reply(), the reply bson_t
- * is ultimately destroyed on both success and failure. Use bson_copy()
- * to create a writable copy of the const bson_t we fetched above. */
- cmd_cursor = mongoc_cursor_new_from_command_reply(client, bson_copy(mongoc_cursor_current(cursor)), mongoc_cursor_get_hint(cursor));
- mongoc_cursor_destroy(cursor);
+ bson_copy_to(&reply, &initial_reply);
- if (!phongo_advance_cursor_and_check_for_error(cmd_cursor TSRMLS_CC)) {
- return false;
+ if (command->max_await_time_ms) {
+ bson_append_bool(&initial_reply, "awaitData", -1, 1);
+ bson_append_int64(&initial_reply, "maxAwaitTimeMS", -1, command->max_await_time_ms);
+ bson_append_bool(&initial_reply, "tailable", -1, 1);
}
- phongo_cursor_init_for_command(return_value, client, cmd_cursor, db, zcommand, zreadPreference TSRMLS_CC);
- return true;
+ if (command->batch_size) {
+ bson_append_int64(&initial_reply, "batchSize", -1, command->batch_size);
+ }
+
+ cmd_cursor = mongoc_cursor_new_from_command_reply(client, &initial_reply, server_id);
+ bson_destroy(&reply);
+ } else {
+ bson_t *wrapped_reply = create_wrapped_command_envelope(db, &reply);
+
+ cmd_cursor = mongoc_cursor_new_from_command_reply(client, wrapped_reply, server_id);
+ bson_destroy(&reply);
}
- phongo_cursor_init_for_command(return_value, client, cursor, db, zcommand, zreadPreference TSRMLS_CC);
+ phongo_cursor_init_for_command(return_value, client, cmd_cursor, db, zcommand, zreadPreference TSRMLS_CC);
return true;
} /* }}} */
-
/* }}} */
/* {{{ mongoc types from from_zval */
const mongoc_write_concern_t* phongo_write_concern_from_zval(zval *zwrite_concern TSRMLS_DC) /* {{{ */
{
if (zwrite_concern) {
php_phongo_writeconcern_t *intern = Z_WRITECONCERN_OBJ_P(zwrite_concern);
if (intern) {
return intern->write_concern;
}
}
return NULL;
} /* }}} */
const mongoc_read_concern_t* phongo_read_concern_from_zval(zval *zread_concern TSRMLS_DC) /* {{{ */
{
if (zread_concern) {
php_phongo_readconcern_t *intern = Z_READCONCERN_OBJ_P(zread_concern);
if (intern) {
return intern->read_concern;
}
}
return NULL;
} /* }}} */
const mongoc_read_prefs_t* phongo_read_preference_from_zval(zval *zread_preference TSRMLS_DC) /* {{{ */
{
if (zread_preference) {
php_phongo_readpreference_t *intern = Z_READPREFERENCE_OBJ_P(zread_preference);
if (intern) {
return intern->read_preference;
}
}
return NULL;
} /* }}} */
/* }}} */
/* {{{ phongo zval from mongoc types */
void php_phongo_cursor_id_new_from_id(zval *object, int64_t cursorid TSRMLS_DC) /* {{{ */
{
php_phongo_cursorid_t *intern;
object_init_ex(object, php_phongo_cursorid_ce);
intern = Z_CURSORID_OBJ_P(object);
intern->id = cursorid;
} /* }}} */
void php_phongo_objectid_new_from_oid(zval *object, const bson_oid_t *oid TSRMLS_DC) /* {{{ */
{
php_phongo_objectid_t *intern;
object_init_ex(object, php_phongo_objectid_ce);
intern = Z_OBJECTID_OBJ_P(object);
bson_oid_to_string(oid, intern->oid);
intern->initialized = true;
} /* }}} */
php_phongo_server_description_type_t php_phongo_server_description_type(mongoc_server_description_t *sd)
{
const char* name = mongoc_server_description_type(sd);
int i;
for (i = 0; i < PHONGO_SERVER_DESCRIPTION_TYPES; i++) {
if (!strcmp(name, php_phongo_server_description_type_map[i].name)) {
return php_phongo_server_description_type_map[i].type;
}
}
return PHONGO_SERVER_UNKNOWN;
}
void php_phongo_server_to_zval(zval *retval, mongoc_server_description_t *sd) /* {{{ */
{
mongoc_host_list_t *host = mongoc_server_description_host(sd);
const bson_t *is_master = mongoc_server_description_ismaster(sd);
bson_iter_t iter;
array_init(retval);
ADD_ASSOC_STRING(retval, "host", host->host);
ADD_ASSOC_LONG_EX(retval, "port", host->port);
ADD_ASSOC_LONG_EX(retval, "type", php_phongo_server_description_type(sd));
ADD_ASSOC_BOOL_EX(retval, "is_primary", !strcmp(mongoc_server_description_type(sd), php_phongo_server_description_type_map[PHONGO_SERVER_RS_PRIMARY].name));
ADD_ASSOC_BOOL_EX(retval, "is_secondary", !strcmp(mongoc_server_description_type(sd), php_phongo_server_description_type_map[PHONGO_SERVER_RS_SECONDARY].name));
ADD_ASSOC_BOOL_EX(retval, "is_arbiter", !strcmp(mongoc_server_description_type(sd), php_phongo_server_description_type_map[PHONGO_SERVER_RS_ARBITER].name));
ADD_ASSOC_BOOL_EX(retval, "is_hidden", bson_iter_init_find_case(&iter, is_master, "hidden") && bson_iter_as_bool(&iter));
ADD_ASSOC_BOOL_EX(retval, "is_passive", bson_iter_init_find_case(&iter, is_master, "passive") && bson_iter_as_bool(&iter));
if (bson_iter_init_find(&iter, is_master, "tags") && BSON_ITER_HOLDS_DOCUMENT(&iter)) {
const uint8_t *bytes;
uint32_t len;
php_phongo_bson_state state = PHONGO_BSON_STATE_INITIALIZER;
/* Use native arrays for debugging output */
state.map.root_type = PHONGO_TYPEMAP_NATIVE_ARRAY;
state.map.document_type = PHONGO_TYPEMAP_NATIVE_ARRAY;
bson_iter_document(&iter, &len, &bytes);
php_phongo_bson_to_zval_ex(bytes, len, &state);
#if PHP_VERSION_ID >= 70000
ADD_ASSOC_ZVAL_EX(retval, "tags", &state.zchild);
#else
ADD_ASSOC_ZVAL_EX(retval, "tags", state.zchild);
#endif
}
{
php_phongo_bson_state state = PHONGO_BSON_STATE_INITIALIZER;
/* Use native arrays for debugging output */
state.map.root_type = PHONGO_TYPEMAP_NATIVE_ARRAY;
state.map.document_type = PHONGO_TYPEMAP_NATIVE_ARRAY;
php_phongo_bson_to_zval_ex(bson_get_data(is_master), is_master->len, &state);
#if PHP_VERSION_ID >= 70000
ADD_ASSOC_ZVAL_EX(retval, "last_is_master", &state.zchild);
#else
ADD_ASSOC_ZVAL_EX(retval, "last_is_master", state.zchild);
#endif
}
ADD_ASSOC_LONG_EX(retval, "round_trip_time", (phongo_long) mongoc_server_description_round_trip_time(sd));
} /* }}} */
void php_phongo_read_concern_to_zval(zval *retval, const mongoc_read_concern_t *read_concern) /* {{{ */
{
const char *level = mongoc_read_concern_get_level(read_concern);
array_init_size(retval, 1);
if (level) {
ADD_ASSOC_STRING(retval, "level", level);
}
} /* }}} */
+/* If options is not an array, insert it as a field in a newly allocated array.
+ * This may be used to convert legacy options (e.g. ReadPreference option for
+ * an executeQuery method) into an options array.
+ *
+ * A pointer to the array zval will always be returned. If allocated is set to
+ * true, php_phongo_prep_legacy_option_free() should be used to free the array
+ * zval later. */
+zval *php_phongo_prep_legacy_option(zval *options, const char *key, bool *allocated TSRMLS_DC) /* {{{ */
+{
+ *allocated = false;
+
+ if (options && Z_TYPE_P(options) != IS_ARRAY) {
+#if PHP_VERSION_ID >= 70000
+ zval *new_options = ecalloc(sizeof(zval), 1);
+#else
+ zval *new_options = NULL;
+ ALLOC_INIT_ZVAL(new_options);
+#endif
+
+ array_init_size(new_options, 1);
+ add_assoc_zval(new_options, key, options);
+ Z_ADDREF_P(options);
+ *allocated = true;
+
+ return new_options;
+ }
+
+ return options;
+} /* }}} */
+
+void php_phongo_prep_legacy_option_free(zval *options TSRMLS_DC) /* {{{ */
+{
+#if PHP_VERSION_ID >= 70000
+ zval_ptr_dtor(options);
+ efree(options);
+#else
+ zval_ptr_dtor(&options);
+#endif
+} /* }}} */
+
+
/* Prepare tagSets for BSON encoding by converting each array in the set to an
* object. This ensures that empty arrays will serialize as empty documents.
*
* php_phongo_read_preference_tags_are_valid() handles actual validation of the
* tag set structure. */
void php_phongo_read_preference_prep_tagsets(zval *tagSets TSRMLS_DC) /* {{{ */
{
HashTable *ht_data;
if (Z_TYPE_P(tagSets) != IS_ARRAY) {
return;
}
ht_data = HASH_OF(tagSets);
#if PHP_VERSION_ID >= 70000
{
zval *tagSet;
ZEND_HASH_FOREACH_VAL(ht_data, tagSet) {
ZVAL_DEREF(tagSet);
if (Z_TYPE_P(tagSet) == IS_ARRAY) {
SEPARATE_ZVAL_NOREF(tagSet);
convert_to_object(tagSet);
}
} ZEND_HASH_FOREACH_END();
}
#else
{
HashPosition pos;
zval **tagSet;
for (zend_hash_internal_pointer_reset_ex(ht_data, &pos);
zend_hash_get_current_data_ex(ht_data, (void **) &tagSet, &pos) == SUCCESS;
zend_hash_move_forward_ex(ht_data, &pos)) {
if (Z_TYPE_PP(tagSet) == IS_ARRAY) {
SEPARATE_ZVAL_IF_NOT_REF(tagSet);
convert_to_object(*tagSet);
}
}
}
#endif
return;
} /* }}} */
/* Checks if tags is valid to set on a mongoc_read_prefs_t. It may be null or an
* array of one or more documents. */
bool php_phongo_read_preference_tags_are_valid(const bson_t *tags) /* {{{ */
{
bson_iter_t iter;
if (bson_empty0(tags)) {
return true;
}
if (!bson_iter_init(&iter, tags)) {
return false;
}
while (bson_iter_next(&iter)) {
if (!BSON_ITER_HOLDS_DOCUMENT(&iter)) {
return false;
}
}
return true;
} /* }}} */
void php_phongo_read_preference_to_zval(zval *retval, const mongoc_read_prefs_t *read_prefs) /* {{{ */
{
const bson_t *tags = mongoc_read_prefs_get_tags(read_prefs);
mongoc_read_mode_t mode = mongoc_read_prefs_get_mode(read_prefs);
array_init_size(retval, 3);
switch (mode) {
case MONGOC_READ_PRIMARY: ADD_ASSOC_STRING(retval, "mode", "primary"); break;
case MONGOC_READ_PRIMARY_PREFERRED: ADD_ASSOC_STRING(retval, "mode", "primaryPreferred"); break;
case MONGOC_READ_SECONDARY: ADD_ASSOC_STRING(retval, "mode", "secondary"); break;
case MONGOC_READ_SECONDARY_PREFERRED: ADD_ASSOC_STRING(retval, "mode", "secondaryPreferred"); break;
case MONGOC_READ_NEAREST: ADD_ASSOC_STRING(retval, "mode", "nearest"); break;
default: /* Do nothing */
break;
}
if (!bson_empty0(tags)) {
/* Use PHONGO_TYPEMAP_NATIVE_ARRAY for the root type since tags is an
* array; however, inner documents and arrays can use the default. */
php_phongo_bson_state state = PHONGO_BSON_STATE_INITIALIZER;
state.map.root_type = PHONGO_TYPEMAP_NATIVE_ARRAY;
php_phongo_bson_to_zval_ex(bson_get_data(tags), tags->len, &state);
#if PHP_VERSION_ID >= 70000
ADD_ASSOC_ZVAL_EX(retval, "tags", &state.zchild);
#else
ADD_ASSOC_ZVAL_EX(retval, "tags", state.zchild);
#endif
}
if (mongoc_read_prefs_get_max_staleness_seconds(read_prefs) != MONGOC_NO_MAX_STALENESS) {
ADD_ASSOC_LONG_EX(retval, "maxStalenessSeconds", mongoc_read_prefs_get_max_staleness_seconds(read_prefs));
}
} /* }}} */
void php_phongo_write_concern_to_zval(zval *retval, const mongoc_write_concern_t *write_concern) /* {{{ */
{
const char *wtag = mongoc_write_concern_get_wtag(write_concern);
const int32_t w = mongoc_write_concern_get_w(write_concern);
const int32_t wtimeout = mongoc_write_concern_get_wtimeout(write_concern);
array_init_size(retval, 4);
if (wtag) {
ADD_ASSOC_STRING(retval, "w", wtag);
} else if (mongoc_write_concern_get_wmajority(write_concern)) {
ADD_ASSOC_STRING(retval, "w", PHONGO_WRITE_CONCERN_W_MAJORITY);
} else if (w != MONGOC_WRITE_CONCERN_W_DEFAULT) {
ADD_ASSOC_LONG_EX(retval, "w", w);
}
if (mongoc_write_concern_journal_is_set(write_concern)) {
ADD_ASSOC_BOOL_EX(retval, "j", mongoc_write_concern_get_journal(write_concern));
}
if (wtimeout != 0) {
ADD_ASSOC_LONG_EX(retval, "wtimeout", wtimeout);
}
} /* }}} */
/* }}} */
static mongoc_uri_t *php_phongo_make_uri(const char *uri_string, bson_t *options TSRMLS_DC) /* {{{ */
{
mongoc_uri_t *uri;
bson_error_t error;
uri = mongoc_uri_new_with_error(uri_string, &error);
MONGOC_DEBUG("Connection string: '%s'", uri_string);
if (!uri) {
phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Failed to parse MongoDB URI: '%s'. %s.", uri_string, error.message);
return NULL;
}
return uri;
} /* }}} */
static const char *php_phongo_bson_type_to_string(bson_type_t type) /* {{{ */
{
switch (type) {
case BSON_TYPE_EOD: return "EOD";
case BSON_TYPE_DOUBLE: return "double";
case BSON_TYPE_UTF8: return "string";
case BSON_TYPE_DOCUMENT: return "document";
case BSON_TYPE_ARRAY: return "array";
case BSON_TYPE_BINARY: return "Binary";
case BSON_TYPE_UNDEFINED: return "undefined";
case BSON_TYPE_OID: return "ObjectId";
case BSON_TYPE_BOOL: return "boolean";
case BSON_TYPE_DATE_TIME: return "UTCDateTime";
case BSON_TYPE_NULL: return "null";
case BSON_TYPE_REGEX: return "Regex";
case BSON_TYPE_DBPOINTER: return "DBPointer";
case BSON_TYPE_CODE: return "Javascript";
case BSON_TYPE_SYMBOL: return "symbol";
case BSON_TYPE_CODEWSCOPE: return "Javascript with scope";
case BSON_TYPE_INT32: return "32-bit integer";
case BSON_TYPE_TIMESTAMP: return "Timestamp";
case BSON_TYPE_INT64: return "64-bit integer";
case BSON_TYPE_DECIMAL128: return "Decimal128";
case BSON_TYPE_MAXKEY: return "MaxKey";
case BSON_TYPE_MINKEY: return "MinKey";
default: return "unknown";
}
} /* }}} */
#define PHONGO_URI_INVALID_TYPE(iter, expected) \
phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, \
"Expected %s for \"%s\" URI option, %s given", \
(expected), \
bson_iter_key(&(iter)), \
php_phongo_bson_type_to_string(bson_iter_type(&(iter))))
static bool php_phongo_apply_options_to_uri(mongoc_uri_t *uri, bson_t *options TSRMLS_DC) /* {{{ */
{
bson_iter_t iter;
/* Return early if there are no options to apply */
if (bson_empty0(options) || !bson_iter_init(&iter, options)) {
return true;
}
while (bson_iter_next(&iter)) {
const char *key = bson_iter_key(&iter);
/* Skip read preference, read concern, and write concern options, as
* those will be processed by other functions. */
if (!strcasecmp(key, MONGOC_URI_JOURNAL) ||
!strcasecmp(key, MONGOC_URI_MAXSTALENESSSECONDS) ||
!strcasecmp(key, MONGOC_URI_READCONCERNLEVEL) ||
!strcasecmp(key, MONGOC_URI_READPREFERENCE) ||
!strcasecmp(key, MONGOC_URI_READPREFERENCETAGS) ||
!strcasecmp(key, MONGOC_URI_SAFE) ||
!strcasecmp(key, MONGOC_URI_SLAVEOK) ||
!strcasecmp(key, MONGOC_URI_W) ||
!strcasecmp(key, MONGOC_URI_WTIMEOUTMS)) {
continue;
}
if (mongoc_uri_option_is_bool(key)) {
/* The option's type is not validated because bson_iter_as_bool() is
* used to cast the value to a boolean. Validation may be introduced
* in PHPC-990. */
if (!mongoc_uri_set_option_as_bool(uri, key, bson_iter_as_bool(&iter))) {
phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Failed to parse \"%s\" URI option", key);
return false;
}
continue;
}
if (mongoc_uri_option_is_int32(key)) {
if (!BSON_ITER_HOLDS_INT32(&iter)) {
PHONGO_URI_INVALID_TYPE(iter, "32-bit integer");
return false;
}
if (!mongoc_uri_set_option_as_int32(uri, key, bson_iter_int32(&iter))) {
phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Failed to parse \"%s\" URI option", key);
return false;
}
continue;
}
if (mongoc_uri_option_is_utf8(key)) {
if (!BSON_ITER_HOLDS_UTF8(&iter)) {
PHONGO_URI_INVALID_TYPE(iter, "string");
return false;
}
if (!mongoc_uri_set_option_as_utf8(uri, key, bson_iter_utf8(&iter, NULL))) {
/* Assignment uses mongoc_uri_set_appname() for the "appname"
* option, which validates length in addition to UTF-8 encoding.
* For BC, we report the invalid string to the user. */
if (!strcasecmp(key, MONGOC_URI_APPNAME)) {
phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Invalid appname value: '%s'", bson_iter_utf8(&iter, NULL));
} else {
phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Failed to parse \"%s\" URI option", key);
}
return false;
}
continue;
}
if (!strcasecmp(key, "username")) {
if (!BSON_ITER_HOLDS_UTF8(&iter)) {
PHONGO_URI_INVALID_TYPE(iter, "string");
return false;
}
if (!mongoc_uri_set_username(uri, bson_iter_utf8(&iter, NULL))) {
phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Failed to parse \"%s\" URI option", key);
return false;
}
continue;
}
if (!strcasecmp(key, "password")) {
if (!BSON_ITER_HOLDS_UTF8(&iter)) {
PHONGO_URI_INVALID_TYPE(iter, "string");
return false;
}
if (!mongoc_uri_set_password(uri, bson_iter_utf8(&iter, NULL))) {
phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Failed to parse \"%s\" URI option", key);
return false;
}
continue;
}
if (!strcasecmp(key, MONGOC_URI_AUTHMECHANISM)) {
if (!BSON_ITER_HOLDS_UTF8(&iter)) {
PHONGO_URI_INVALID_TYPE(iter, "string");
return false;
}
if (!mongoc_uri_set_auth_mechanism(uri, bson_iter_utf8(&iter, NULL))) {
phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Failed to parse \"%s\" URI option", key);
return false;
}
continue;
}
if (!strcasecmp(key, MONGOC_URI_AUTHSOURCE)) {
if (!BSON_ITER_HOLDS_UTF8(&iter)) {
PHONGO_URI_INVALID_TYPE(iter, "string");
return false;
}
if (!mongoc_uri_set_auth_source(uri, bson_iter_utf8(&iter, NULL))) {
phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Failed to parse \"%s\" URI option", key);
return false;
}
continue;
}
if (!strcasecmp(key, MONGOC_URI_AUTHMECHANISMPROPERTIES)) {
bson_t properties;
uint32_t len;
const uint8_t *data;
if (!BSON_ITER_HOLDS_DOCUMENT(&iter)) {
PHONGO_URI_INVALID_TYPE(iter, "array or object");
return false;
}
bson_iter_document(&iter, &len, &data);
if (!bson_init_static(&properties, data, len)) {
phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Could not initialize BSON structure for auth mechanism properties");
return false;
}
if (!mongoc_uri_set_mechanism_properties(uri, &properties)) {
phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Failed to parse \"%s\" URI option", key);
return false;
}
continue;
}
+
+ if (!strcasecmp(key, MONGOC_URI_GSSAPISERVICENAME)) {
+ bson_t unused, properties = BSON_INITIALIZER;
+
+ if (mongoc_uri_get_mechanism_properties(uri, &unused)) {
+ phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "authMechanismProperties SERVICE_NAME already set, ignoring \"%s\"", key);
+ return false;
+ }
+
+ if (!BSON_ITER_HOLDS_UTF8(&iter)) {
+ PHONGO_URI_INVALID_TYPE(iter, "string");
+ return false;
+ }
+
+ bson_append_utf8(&properties, "SERVICE_NAME", -1, bson_iter_utf8(&iter, NULL), -1);
+
+ if (!mongoc_uri_set_mechanism_properties(uri, &properties)) {
+ phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Failed to parse \"%s\" URI option", key);
+ bson_destroy(&properties);
+ return false;
+ }
+
+ bson_destroy(&properties);
+
+ continue;
+ }
+
+ if (!strcasecmp(key, MONGOC_URI_COMPRESSORS)) {
+ if (!BSON_ITER_HOLDS_UTF8(&iter)) {
+ PHONGO_URI_INVALID_TYPE(iter, "string");
+ return false;
+ }
+
+ if (!mongoc_uri_set_compressors(uri, bson_iter_utf8(&iter, NULL))) {
+ phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Failed to parse \"%s\" URI option", key);
+ return false;
+ }
+
+ continue;
+ }
}
return true;
} /* }}} */
static bool php_phongo_apply_rc_options_to_uri(mongoc_uri_t *uri, bson_t *options TSRMLS_DC) /* {{{ */
{
bson_iter_t iter;
mongoc_read_concern_t *new_rc;
const mongoc_read_concern_t *old_rc;
if (!(old_rc = mongoc_uri_get_read_concern(uri))) {
phongo_throw_exception(PHONGO_ERROR_MONGOC_FAILED TSRMLS_CC, "mongoc_uri_t does not have a read concern");
return false;
}
/* Return early if there are no options to apply */
if (bson_empty0(options)) {
return true;
}
if (!bson_iter_init_find_case(&iter, options, MONGOC_URI_READCONCERNLEVEL)) {
return true;
}
new_rc = mongoc_read_concern_copy(old_rc);
if (bson_iter_init_find_case(&iter, options, MONGOC_URI_READCONCERNLEVEL)) {
if (!BSON_ITER_HOLDS_UTF8(&iter)) {
PHONGO_URI_INVALID_TYPE(iter, "string");
mongoc_read_concern_destroy(new_rc);
return false;
}
mongoc_read_concern_set_level(new_rc, bson_iter_utf8(&iter, NULL));
}
mongoc_uri_set_read_concern(uri, new_rc);
mongoc_read_concern_destroy(new_rc);
return true;
} /* }}} */
static bool php_phongo_apply_rp_options_to_uri(mongoc_uri_t *uri, bson_t *options TSRMLS_DC) /* {{{ */
{
bson_iter_t iter;
mongoc_read_prefs_t *new_rp;
const mongoc_read_prefs_t *old_rp;
if (!(old_rp = mongoc_uri_get_read_prefs_t(uri))) {
phongo_throw_exception(PHONGO_ERROR_MONGOC_FAILED TSRMLS_CC, "mongoc_uri_t does not have a read preference");
return false;
}
/* Return early if there are no options to apply */
if (bson_empty0(options)) {
return true;
}
if (!bson_iter_init_find_case(&iter, options, MONGOC_URI_SLAVEOK) &&
!bson_iter_init_find_case(&iter, options, MONGOC_URI_READPREFERENCE) &&
!bson_iter_init_find_case(&iter, options, MONGOC_URI_READPREFERENCETAGS) &&
!bson_iter_init_find_case(&iter, options, MONGOC_URI_MAXSTALENESSSECONDS)
) {
return true;
}
new_rp = mongoc_read_prefs_copy(old_rp);
if (bson_iter_init_find_case(&iter, options, MONGOC_URI_SLAVEOK)) {
if (!BSON_ITER_HOLDS_BOOL(&iter)) {
PHONGO_URI_INVALID_TYPE(iter, "boolean");
mongoc_read_prefs_destroy(new_rp);
return false;
}
if (bson_iter_bool(&iter)) {
mongoc_read_prefs_set_mode(new_rp, MONGOC_READ_SECONDARY_PREFERRED);
}
}
if (bson_iter_init_find_case(&iter, options, MONGOC_URI_READPREFERENCE)) {
const char *str;
if (!BSON_ITER_HOLDS_UTF8(&iter)) {
PHONGO_URI_INVALID_TYPE(iter, "string");
mongoc_read_prefs_destroy(new_rp);
return false;
}
str = bson_iter_utf8(&iter, NULL);
if (0 == strcasecmp("primary", str)) {
mongoc_read_prefs_set_mode(new_rp, MONGOC_READ_PRIMARY);
} else if (0 == strcasecmp("primarypreferred", str)) {
mongoc_read_prefs_set_mode(new_rp, MONGOC_READ_PRIMARY_PREFERRED);
} else if (0 == strcasecmp("secondary", str)) {
mongoc_read_prefs_set_mode(new_rp, MONGOC_READ_SECONDARY);
} else if (0 == strcasecmp("secondarypreferred", str)) {
mongoc_read_prefs_set_mode(new_rp, MONGOC_READ_SECONDARY_PREFERRED);
} else if (0 == strcasecmp("nearest", str)) {
mongoc_read_prefs_set_mode(new_rp, MONGOC_READ_NEAREST);
} else {
phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Unsupported %s value: '%s'", bson_iter_key(&iter), str);
mongoc_read_prefs_destroy(new_rp);
return false;
}
}
if (bson_iter_init_find_case(&iter, options, MONGOC_URI_READPREFERENCETAGS)) {
bson_t tags;
uint32_t len;
const uint8_t *data;
if (!BSON_ITER_HOLDS_ARRAY(&iter)) {
PHONGO_URI_INVALID_TYPE(iter, "array");
mongoc_read_prefs_destroy(new_rp);
return false;
}
bson_iter_array(&iter, &len, &data);
if (!bson_init_static(&tags, data, len)) {
phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Could not initialize BSON structure for read preference tags");
mongoc_read_prefs_destroy(new_rp);
return false;
}
if (!php_phongo_read_preference_tags_are_valid(&tags)) {
phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Read preference tags must be an array of zero or more documents");
mongoc_read_prefs_destroy(new_rp);
return false;
}
mongoc_read_prefs_set_tags(new_rp, &tags);
}
if (mongoc_read_prefs_get_mode(new_rp) == MONGOC_READ_PRIMARY &&
!bson_empty(mongoc_read_prefs_get_tags(new_rp))) {
phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Primary read preference mode conflicts with tags");
mongoc_read_prefs_destroy(new_rp);
return false;
}
/* Handle maxStalenessSeconds, and make sure it is not combined with primary
* readPreference */
if (bson_iter_init_find_case(&iter, options, MONGOC_URI_MAXSTALENESSSECONDS)) {
int64_t max_staleness_seconds;
if (!BSON_ITER_HOLDS_INT(&iter)) {
PHONGO_URI_INVALID_TYPE(iter, "integer");
mongoc_read_prefs_destroy(new_rp);
return false;
}
max_staleness_seconds = bson_iter_as_int64(&iter);
if (max_staleness_seconds != MONGOC_NO_MAX_STALENESS) {
if (max_staleness_seconds < MONGOC_SMALLEST_MAX_STALENESS_SECONDS) {
phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected maxStalenessSeconds to be >= %d, %" PRId64 " given", MONGOC_SMALLEST_MAX_STALENESS_SECONDS, max_staleness_seconds);
mongoc_read_prefs_destroy(new_rp);
return false;
}
if (max_staleness_seconds > INT32_MAX) {
phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected maxStalenessSeconds to be <= %d, %" PRId64 " given", INT32_MAX, max_staleness_seconds);
mongoc_read_prefs_destroy(new_rp);
return false;
}
if (mongoc_read_prefs_get_mode(new_rp) == MONGOC_READ_PRIMARY) {
phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Primary read preference mode conflicts with maxStalenessSeconds");
mongoc_read_prefs_destroy(new_rp);
return false;
}
}
mongoc_read_prefs_set_max_staleness_seconds(new_rp, max_staleness_seconds);
}
/* This may be redundant in light of the last check (primary with tags), but
* we'll check anyway in case additional validation is implemented. */
if (!mongoc_read_prefs_is_valid(new_rp)) {
phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Read preference is not valid");
mongoc_read_prefs_destroy(new_rp);
return false;
}
mongoc_uri_set_read_prefs_t(uri, new_rp);
mongoc_read_prefs_destroy(new_rp);
return true;
} /* }}} */
static bool php_phongo_apply_wc_options_to_uri(mongoc_uri_t *uri, bson_t *options TSRMLS_DC) /* {{{ */
{
bson_iter_t iter;
int32_t wtimeoutms;
mongoc_write_concern_t *new_wc;
const mongoc_write_concern_t *old_wc;
if (!(old_wc = mongoc_uri_get_write_concern(uri))) {
phongo_throw_exception(PHONGO_ERROR_MONGOC_FAILED TSRMLS_CC, "mongoc_uri_t does not have a write concern");
return false;
}
/* Return early if there are no options to apply */
if (bson_empty0(options)) {
return true;
}
if (!bson_iter_init_find_case(&iter, options, MONGOC_URI_JOURNAL) &&
!bson_iter_init_find_case(&iter, options, MONGOC_URI_SAFE) &&
!bson_iter_init_find_case(&iter, options, MONGOC_URI_W) &&
!bson_iter_init_find_case(&iter, options, MONGOC_URI_WTIMEOUTMS)) {
return true;
}
wtimeoutms = mongoc_write_concern_get_wtimeout(old_wc);
new_wc = mongoc_write_concern_copy(old_wc);
if (bson_iter_init_find_case(&iter, options, MONGOC_URI_SAFE)) {
if (!BSON_ITER_HOLDS_BOOL(&iter)) {
PHONGO_URI_INVALID_TYPE(iter, "boolean");
mongoc_write_concern_destroy(new_wc);
return false;
}
mongoc_write_concern_set_w(new_wc, bson_iter_bool(&iter) ? 1 : MONGOC_WRITE_CONCERN_W_UNACKNOWLEDGED);
}
if (bson_iter_init_find_case(&iter, options, MONGOC_URI_WTIMEOUTMS)) {
if (!BSON_ITER_HOLDS_INT32(&iter)) {
PHONGO_URI_INVALID_TYPE(iter, "32-bit integer");
mongoc_write_concern_destroy(new_wc);
return false;
}
wtimeoutms = bson_iter_int32(&iter);
}
if (bson_iter_init_find_case(&iter, options, MONGOC_URI_JOURNAL)) {
if (!BSON_ITER_HOLDS_BOOL(&iter)) {
PHONGO_URI_INVALID_TYPE(iter, "boolean");
mongoc_write_concern_destroy(new_wc);
return false;
}
mongoc_write_concern_set_journal(new_wc, bson_iter_bool(&iter));
}
if (bson_iter_init_find_case(&iter, options, MONGOC_URI_W)) {
if (BSON_ITER_HOLDS_INT32(&iter)) {
int32_t value = bson_iter_int32(&iter);
switch (value) {
case MONGOC_WRITE_CONCERN_W_ERRORS_IGNORED:
case MONGOC_WRITE_CONCERN_W_UNACKNOWLEDGED:
mongoc_write_concern_set_w(new_wc, value);
break;
default:
if (value > 0) {
mongoc_write_concern_set_w(new_wc, value);
break;
}
phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Unsupported w value: %d", value);
mongoc_write_concern_destroy(new_wc);
return false;
}
} else if (BSON_ITER_HOLDS_UTF8(&iter)) {
const char *str = bson_iter_utf8(&iter, NULL);
if (0 == strcasecmp(PHONGO_WRITE_CONCERN_W_MAJORITY, str)) {
mongoc_write_concern_set_wmajority(new_wc, wtimeoutms);
} else {
mongoc_write_concern_set_wtag(new_wc, str);
}
} else {
PHONGO_URI_INVALID_TYPE(iter, "32-bit integer or string");
mongoc_write_concern_destroy(new_wc);
return false;
}
}
/* Only set wtimeout if it's still applicable; otherwise, clear it. */
if (mongoc_write_concern_get_w(new_wc) > 1 ||
mongoc_write_concern_get_wmajority(new_wc) ||
mongoc_write_concern_get_wtag(new_wc)) {
mongoc_write_concern_set_wtimeout(new_wc, wtimeoutms);
} else {
mongoc_write_concern_set_wtimeout(new_wc, 0);
}
if (mongoc_write_concern_get_journal(new_wc)) {
int32_t w = mongoc_write_concern_get_w(new_wc);
if (w == MONGOC_WRITE_CONCERN_W_UNACKNOWLEDGED || w == MONGOC_WRITE_CONCERN_W_ERRORS_IGNORED) {
phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Journal conflicts with w value: %d", w);
mongoc_write_concern_destroy(new_wc);
return false;
}
}
/* This may be redundant in light of the last check (unacknowledged w with
journal), but we'll check anyway in case additional validation is
implemented. */
if (!mongoc_write_concern_is_valid(new_wc)) {
phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Write concern is not valid");
mongoc_write_concern_destroy(new_wc);
return false;
}
mongoc_uri_set_write_concern(uri, new_wc);
mongoc_write_concern_destroy(new_wc);
return true;
} /* }}} */
#ifdef MONGOC_ENABLE_SSL
static inline char *php_phongo_fetch_ssl_opt_string(zval *zoptions, const char *key, int key_len)
{
int plen;
zend_bool pfree;
char *pval, *value;
pval = php_array_fetchl_string(zoptions, key, key_len, &plen, &pfree);
value = pfree ? pval : estrndup(pval, plen);
return value;
}
static mongoc_ssl_opt_t *php_phongo_make_ssl_opt(zval *zoptions TSRMLS_DC)
{
mongoc_ssl_opt_t *ssl_opt;
if (!zoptions) {
return NULL;
}
#if defined(MONGOC_ENABLE_SSL_SECURE_CHANNEL) || defined(MONGOC_ENABLE_SSL_SECURE_TRANSPORT)
if (php_array_existsc(zoptions, "ca_dir")) {
phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "\"ca_dir\" option is not supported by Secure Channel and Secure Transport");
return NULL;
}
if (php_array_existsc(zoptions, "capath")) {
phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "\"capath\" option is not supported by Secure Channel and Secure Transport");
return NULL;
}
#endif
#if defined(MONGOC_ENABLE_SSL_LIBRESSL) || defined(MONGOC_ENABLE_SSL_SECURE_TRANSPORT)
if (php_array_existsc(zoptions, "crl_file")) {
phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "\"crl_file\" option is not supported by LibreSSL and Secure Transport");
return NULL;
}
#endif
ssl_opt = ecalloc(1, sizeof(mongoc_ssl_opt_t));
/* Check canonical option names first and fall back to SSL context options
* for backwards compatibility. */
if (php_array_existsc(zoptions, "allow_invalid_hostname")) {
ssl_opt->allow_invalid_hostname = php_array_fetchc_bool(zoptions, "allow_invalid_hostname");
}
if (php_array_existsc(zoptions, "weak_cert_validation")) {
ssl_opt->weak_cert_validation = php_array_fetchc_bool(zoptions, "weak_cert_validation");
} else if (php_array_existsc(zoptions, "allow_self_signed")) {
ssl_opt->weak_cert_validation = php_array_fetchc_bool(zoptions, "allow_self_signed");
}
if (php_array_existsc(zoptions, "pem_file")) {
ssl_opt->pem_file = php_phongo_fetch_ssl_opt_string(zoptions, ZEND_STRL("pem_file"));
} else if (php_array_existsc(zoptions, "local_cert")) {
ssl_opt->pem_file = php_phongo_fetch_ssl_opt_string(zoptions, ZEND_STRL("local_cert"));
}
if (php_array_existsc(zoptions, "pem_pwd")) {
ssl_opt->pem_pwd = php_phongo_fetch_ssl_opt_string(zoptions, ZEND_STRL("pem_pwd"));
} else if (php_array_existsc(zoptions, "passphrase")) {
ssl_opt->pem_pwd = php_phongo_fetch_ssl_opt_string(zoptions, ZEND_STRL("passphrase"));
}
if (php_array_existsc(zoptions, "ca_file")) {
ssl_opt->ca_file = php_phongo_fetch_ssl_opt_string(zoptions, ZEND_STRL("ca_file"));
} else if (php_array_existsc(zoptions, "cafile")) {
ssl_opt->ca_file = php_phongo_fetch_ssl_opt_string(zoptions, ZEND_STRL("cafile"));
}
if (php_array_existsc(zoptions, "ca_dir")) {
ssl_opt->ca_dir = php_phongo_fetch_ssl_opt_string(zoptions, ZEND_STRL("ca_dir"));
} else if (php_array_existsc(zoptions, "capath")) {
ssl_opt->ca_dir = php_phongo_fetch_ssl_opt_string(zoptions, ZEND_STRL("capath"));
}
if (php_array_existsc(zoptions, "crl_file")) {
ssl_opt->crl_file = php_phongo_fetch_ssl_opt_string(zoptions, ZEND_STRL("crl_file"));
}
return ssl_opt;
}
static void php_phongo_free_ssl_opt(mongoc_ssl_opt_t *ssl_opt)
{
if (ssl_opt->pem_file) {
str_efree(ssl_opt->pem_file);
}
if (ssl_opt->pem_pwd) {
str_efree(ssl_opt->pem_pwd);
}
if (ssl_opt->ca_file) {
str_efree(ssl_opt->ca_file);
}
if (ssl_opt->ca_dir) {
str_efree(ssl_opt->ca_dir);
}
if (ssl_opt->crl_file) {
str_efree(ssl_opt->crl_file);
}
efree(ssl_opt);
}
#endif
/* APM callbacks */
static void php_phongo_dispatch_handlers(const char *name, zval *z_event)
{
#if PHP_VERSION_ID >= 70000
zval *value;
ZEND_HASH_FOREACH_VAL(MONGODB_G(subscribers), value) {
/* We can't use the zend_call_method_with_1_params macro here, as it
* does a sizeof() on the name argument, which does only work with
* constant names, but not with parameterized ones as it does
* "sizeof(char*)" in that case. */
zend_call_method(value, NULL, NULL, name, strlen(name), NULL, 1, z_event, NULL TSRMLS_CC);
} ZEND_HASH_FOREACH_END();
#else
HashPosition pos;
TSRMLS_FETCH();
zend_hash_internal_pointer_reset_ex(MONGODB_G(subscribers), &pos);
for (;; zend_hash_move_forward_ex(MONGODB_G(subscribers), &pos)) {
zval **value;
if (zend_hash_get_current_data_ex(MONGODB_G(subscribers), (void **) &value, &pos) == FAILURE) {
break;
}
/* We can't use the zend_call_method_with_1_params macro here, as it
* does a sizeof() on the name argument, which does only work with
* constant names, but not with parameterized ones as it does
* "sizeof(char*)" in that case. */
zend_call_method(value, NULL, NULL, name, strlen(name), NULL, 1, z_event, NULL TSRMLS_CC);
}
#endif
}
static void php_phongo_command_started(const mongoc_apm_command_started_t *event)
{
php_phongo_commandstartedevent_t *p_event;
#if PHP_VERSION_ID >= 70000
zval z_event;
#else
zval *z_event = NULL;
#endif
TSRMLS_FETCH();
/* Return early if there are no APM subscribers to notify */
if (!MONGODB_G(subscribers) || zend_hash_num_elements(MONGODB_G(subscribers)) == 0) {
return;
}
#if PHP_VERSION_ID >= 70000
object_init_ex(&z_event, php_phongo_commandstartedevent_ce);
p_event = Z_COMMANDSTARTEDEVENT_OBJ_P(&z_event);
#else
MAKE_STD_ZVAL(z_event);
object_init_ex(z_event, php_phongo_commandstartedevent_ce);
p_event = Z_COMMANDSTARTEDEVENT_OBJ_P(z_event);
#endif
p_event->client = mongoc_apm_command_started_get_context(event);
p_event->command_name = estrdup(mongoc_apm_command_started_get_command_name(event));
p_event->server_id = mongoc_apm_command_started_get_server_id(event);
p_event->operation_id = mongoc_apm_command_started_get_operation_id(event);
p_event->request_id = mongoc_apm_command_started_get_request_id(event);
p_event->command = bson_copy(mongoc_apm_command_started_get_command(event));
p_event->database_name = estrdup(mongoc_apm_command_started_get_database_name(event));
#if PHP_VERSION_ID >= 70000
php_phongo_dispatch_handlers("commandStarted", &z_event);
#else
php_phongo_dispatch_handlers("commandStarted", z_event);
#endif
zval_ptr_dtor(&z_event);
}
static void php_phongo_command_succeeded(const mongoc_apm_command_succeeded_t *event)
{
php_phongo_commandsucceededevent_t *p_event;
#if PHP_VERSION_ID >= 70000
zval z_event;
#else
zval *z_event = NULL;
#endif
TSRMLS_FETCH();
/* Return early if there are no APM subscribers to notify */
if (!MONGODB_G(subscribers) || zend_hash_num_elements(MONGODB_G(subscribers)) == 0) {
return;
}
#if PHP_VERSION_ID >= 70000
object_init_ex(&z_event, php_phongo_commandsucceededevent_ce);
p_event = Z_COMMANDSUCCEEDEDEVENT_OBJ_P(&z_event);
#else
MAKE_STD_ZVAL(z_event);
object_init_ex(z_event, php_phongo_commandsucceededevent_ce);
p_event = Z_COMMANDSUCCEEDEDEVENT_OBJ_P(z_event);
#endif
p_event->client = mongoc_apm_command_succeeded_get_context(event);
p_event->command_name = estrdup(mongoc_apm_command_succeeded_get_command_name(event));
p_event->server_id = mongoc_apm_command_succeeded_get_server_id(event);
p_event->operation_id = mongoc_apm_command_succeeded_get_operation_id(event);
p_event->request_id = mongoc_apm_command_succeeded_get_request_id(event);
p_event->duration_micros = mongoc_apm_command_succeeded_get_duration(event);
p_event->reply = bson_copy(mongoc_apm_command_succeeded_get_reply(event));
#if PHP_VERSION_ID >= 70000
php_phongo_dispatch_handlers("commandSucceeded", &z_event);
#else
php_phongo_dispatch_handlers("commandSucceeded", z_event);
#endif
zval_ptr_dtor(&z_event);
}
static void php_phongo_command_failed(const mongoc_apm_command_failed_t *event)
{
php_phongo_commandfailedevent_t *p_event;
#if PHP_VERSION_ID >= 70000
zval z_event;
#else
zval *z_event = NULL;
#endif
bson_error_t tmp_error;
zend_class_entry *default_exception_ce;
TSRMLS_FETCH();
default_exception_ce = zend_exception_get_default(TSRMLS_C);
/* Return early if there are no APM subscribers to notify */
if (!MONGODB_G(subscribers) || zend_hash_num_elements(MONGODB_G(subscribers)) == 0) {
return;
}
#if PHP_VERSION_ID >= 70000
object_init_ex(&z_event, php_phongo_commandfailedevent_ce);
p_event = Z_COMMANDFAILEDEVENT_OBJ_P(&z_event);
#else
MAKE_STD_ZVAL(z_event);
object_init_ex(z_event, php_phongo_commandfailedevent_ce);
p_event = Z_COMMANDFAILEDEVENT_OBJ_P(z_event);
#endif
p_event->client = mongoc_apm_command_failed_get_context(event);
p_event->command_name = estrdup(mongoc_apm_command_failed_get_command_name(event));
p_event->server_id = mongoc_apm_command_failed_get_server_id(event);
p_event->operation_id = mongoc_apm_command_failed_get_operation_id(event);
p_event->request_id = mongoc_apm_command_failed_get_request_id(event);
p_event->duration_micros = mongoc_apm_command_failed_get_duration(event);
/* We need to process and convert the error right here, otherwise
* debug_info will turn into a recursive loop, and with the wrong trace
* locations */
mongoc_apm_command_failed_get_error(event, &tmp_error);
{
#if PHP_VERSION_ID < 70000
MAKE_STD_ZVAL(p_event->z_error);
object_init_ex(p_event->z_error, phongo_exception_from_mongoc_domain(tmp_error.domain, tmp_error.code));
zend_update_property_string(default_exception_ce, p_event->z_error, ZEND_STRL("message"), tmp_error.message TSRMLS_CC);
zend_update_property_long(default_exception_ce, p_event->z_error, ZEND_STRL("code"), tmp_error.code TSRMLS_CC);
#else
object_init_ex(&p_event->z_error, phongo_exception_from_mongoc_domain(tmp_error.domain, tmp_error.code));
zend_update_property_string(default_exception_ce, &p_event->z_error, ZEND_STRL("message"), tmp_error.message TSRMLS_CC);
zend_update_property_long(default_exception_ce, &p_event->z_error, ZEND_STRL("code"), tmp_error.code TSRMLS_CC);
#endif
}
#if PHP_VERSION_ID >= 70000
php_phongo_dispatch_handlers("commandFailed", &z_event);
#else
php_phongo_dispatch_handlers("commandFailed", z_event);
#endif
zval_ptr_dtor(&z_event);
}
/* Sets the callbacks for APM */
int php_phongo_set_monitoring_callbacks(mongoc_client_t *client)
{
int retval;
mongoc_apm_callbacks_t *callbacks = mongoc_apm_callbacks_new();
mongoc_apm_set_command_started_cb(callbacks, php_phongo_command_started);
mongoc_apm_set_command_succeeded_cb(callbacks, php_phongo_command_succeeded);
mongoc_apm_set_command_failed_cb(callbacks, php_phongo_command_failed);
retval = mongoc_client_set_apm_callbacks(client, callbacks, client);
mongoc_apm_callbacks_destroy(callbacks);
return retval;
}
/* Creates a hash for a client by concatenating the URI string with serialized
* options arrays. On success, a persistent string is returned (i.e. pefree()
* should be used to free it) and hash_len will be set to the string's length.
* On error, an exception will have been thrown and NULL will be returned. */
static char *php_phongo_manager_make_client_hash(const char *uri_string, zval *options, zval *driverOptions, size_t *hash_len TSRMLS_DC)
{
char *hash = NULL;
smart_str var_buf = {0};
php_serialize_data_t var_hash;
#if PHP_VERSION_ID >= 70000
zval args;
array_init_size(&args, 4);
ADD_ASSOC_LONG_EX(&args, "pid", getpid());
ADD_ASSOC_STRING(&args, "uri", uri_string);
if (options) {
ADD_ASSOC_ZVAL_EX(&args, "options", options);
Z_ADDREF_P(options);
} else {
ADD_ASSOC_NULL_EX(&args, "options");
}
if (driverOptions) {
ADD_ASSOC_ZVAL_EX(&args, "driverOptions", driverOptions);
Z_ADDREF_P(driverOptions);
} else {
ADD_ASSOC_NULL_EX(&args, "driverOptions");
}
PHP_VAR_SERIALIZE_INIT(var_hash);
php_var_serialize(&var_buf, &args, &var_hash);
PHP_VAR_SERIALIZE_DESTROY(var_hash);
if (!EG(exception)) {
*hash_len = ZSTR_LEN(var_buf.s);
hash = pestrndup(ZSTR_VAL(var_buf.s), *hash_len, 1);
}
zval_ptr_dtor(&args);
#else
zval *args;
MAKE_STD_ZVAL(args);
array_init_size(args, 4);
ADD_ASSOC_LONG_EX(args, "pid", getpid());
ADD_ASSOC_STRING(args, "uri", uri_string);
if (options) {
ADD_ASSOC_ZVAL_EX(args, "options", options);
Z_ADDREF_P(options);
} else {
ADD_ASSOC_NULL_EX(args, "options");
}
if (driverOptions) {
ADD_ASSOC_ZVAL_EX(args, "driverOptions", driverOptions);
Z_ADDREF_P(driverOptions);
} else {
ADD_ASSOC_NULL_EX(args, "driverOptions");
}
PHP_VAR_SERIALIZE_INIT(var_hash);
php_var_serialize(&var_buf, &args, &var_hash TSRMLS_CC);
PHP_VAR_SERIALIZE_DESTROY(var_hash);
if (!EG(exception)) {
*hash_len = var_buf.len;
hash = pestrndup(var_buf.c, *hash_len, 1);
}
zval_ptr_dtor(&args);
#endif
smart_str_free(&var_buf);
return hash;
}
static mongoc_client_t *php_phongo_make_mongo_client(const mongoc_uri_t *uri TSRMLS_DC) /* {{{ */
{
const char *mongoc_version, *bson_version;
#ifdef HAVE_SYSTEM_LIBMONGOC
mongoc_version = mongoc_get_version();
#else
mongoc_version = "bundled";
#endif
#ifdef HAVE_SYSTEM_LIBBSON
bson_version = bson_get_version();
#else
bson_version = "bundled";
#endif
MONGOC_DEBUG("Creating Manager, phongo-%s[%s] - mongoc-%s(%s), libbson-%s(%s), php-%s",
PHP_MONGODB_VERSION,
PHP_MONGODB_STABILITY,
MONGOC_VERSION_S,
mongoc_version,
BSON_VERSION_S,
bson_version,
PHP_VERSION
);
return mongoc_client_new_from_uri(uri);
} /* }}} */
static void php_phongo_persist_client(const char *hash, size_t hash_len, mongoc_client_t *client TSRMLS_DC)
{
php_phongo_pclient_t *pclient = (php_phongo_pclient_t *) pecalloc(1, sizeof(php_phongo_pclient_t), 1);
pclient->pid = (int) getpid();
pclient->client = client;
#if PHP_VERSION_ID >= 70000
zend_hash_str_update_ptr(&MONGODB_G(pclients), hash, hash_len, pclient);
#else
zend_hash_update(&MONGODB_G(pclients), hash, hash_len + 1, &pclient, sizeof(php_phongo_pclient_t *), NULL);
#endif
}
static mongoc_client_t *php_phongo_find_client(const char *hash, size_t hash_len TSRMLS_DC)
{
#if PHP_VERSION_ID >= 70000
php_phongo_pclient_t *pclient;
if ((pclient = zend_hash_str_find_ptr(&MONGODB_G(pclients), hash, hash_len)) != NULL) {
return pclient->client;
}
#else
php_phongo_pclient_t **pclient;
if (zend_hash_find(&MONGODB_G(pclients), hash, hash_len + 1, (void**) &pclient) == SUCCESS) {
return (*pclient)->client;
}
#endif
return NULL;
}
void phongo_manager_init(php_phongo_manager_t *manager, const char *uri_string, zval *options, zval *driverOptions TSRMLS_DC) /* {{{ */
{
char *hash = NULL;
size_t hash_len = 0;
bson_t bson_options = BSON_INITIALIZER;
mongoc_uri_t *uri = NULL;
#ifdef MONGOC_ENABLE_SSL
mongoc_ssl_opt_t *ssl_opt = NULL;
#endif
if (!(hash = php_phongo_manager_make_client_hash(uri_string, options, driverOptions, &hash_len TSRMLS_CC))) {
/* Exception should already have been thrown and there is nothing to free */
return;
}
if ((manager->client = php_phongo_find_client(hash, hash_len TSRMLS_CC))) {
MONGOC_DEBUG("Found client for hash: %s\n", hash);
goto cleanup;
}
if (options) {
php_phongo_zval_to_bson(options, PHONGO_BSON_NONE, &bson_options, NULL TSRMLS_CC);
}
/* An exception may be thrown during BSON conversion */
if (EG(exception)) {
goto cleanup;
}
if (!(uri = php_phongo_make_uri(uri_string, &bson_options TSRMLS_CC))) {
/* Exception should already have been thrown */
goto cleanup;
}
if (!php_phongo_apply_options_to_uri(uri, &bson_options TSRMLS_CC) ||
!php_phongo_apply_rc_options_to_uri(uri, &bson_options TSRMLS_CC) ||
!php_phongo_apply_rp_options_to_uri(uri, &bson_options TSRMLS_CC) ||
!php_phongo_apply_wc_options_to_uri(uri, &bson_options TSRMLS_CC)) {
/* Exception should already have been thrown */
goto cleanup;
}
#ifdef MONGOC_ENABLE_SSL
/* Construct SSL options even if SSL is not enabled so that exceptions can
* be thrown for unsupported driver options. */
ssl_opt = php_phongo_make_ssl_opt(driverOptions TSRMLS_CC);
/* An exception may be thrown during SSL option creation */
if (EG(exception)) {
goto cleanup;
}
#else
if (mongoc_uri_get_ssl(uri)) {
phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Cannot create SSL client. SSL is not enabled in this build.");
goto cleanup;
}
#endif
manager->client = php_phongo_make_mongo_client(uri TSRMLS_CC);
if (!manager->client) {
phongo_throw_exception(PHONGO_ERROR_RUNTIME TSRMLS_CC, "Failed to create Manager from URI: '%s'", uri_string);
goto cleanup;
}
#ifdef MONGOC_ENABLE_SSL
if (ssl_opt && mongoc_uri_get_ssl(uri)) {
mongoc_client_set_ssl_opts(manager->client, ssl_opt);
}
#endif
MONGOC_DEBUG("Created client hash: %s\n", hash);
php_phongo_persist_client(hash, hash_len, manager->client TSRMLS_CC);
cleanup:
if (hash) {
pefree(hash, 1);
}
bson_destroy(&bson_options);
if (uri) {
mongoc_uri_destroy(uri);
}
#ifdef MONGOC_ENABLE_SSL
if (ssl_opt) {
php_phongo_free_ssl_opt(ssl_opt);
}
#endif
} /* }}} */
void php_phongo_new_utcdatetime_from_epoch(zval *object, int64_t msec_since_epoch TSRMLS_DC) /* {{{ */
{
php_phongo_utcdatetime_t *intern;
object_init_ex(object, php_phongo_utcdatetime_ce);
intern = Z_UTCDATETIME_OBJ_P(object);
intern->milliseconds = msec_since_epoch;
intern->initialized = true;
} /* }}} */
void php_phongo_new_timestamp_from_increment_and_timestamp(zval *object, uint32_t increment, uint32_t timestamp TSRMLS_DC) /* {{{ */
{
php_phongo_timestamp_t *intern;
object_init_ex(object, php_phongo_timestamp_ce);
intern = Z_TIMESTAMP_OBJ_P(object);
intern->increment = increment;
intern->timestamp = timestamp;
intern->initialized = true;
} /* }}} */
void php_phongo_new_javascript_from_javascript(int init, zval *object, const char *code, size_t code_len TSRMLS_DC) /* {{{ */
{
php_phongo_new_javascript_from_javascript_and_scope(init, object, code, code_len, NULL TSRMLS_CC);
} /* }}} */
void php_phongo_new_javascript_from_javascript_and_scope(int init, zval *object, const char *code, size_t code_len, const bson_t *scope TSRMLS_DC) /* {{{ */
{
php_phongo_javascript_t *intern;
if (init) {
object_init_ex(object, php_phongo_javascript_ce);
}
intern = Z_JAVASCRIPT_OBJ_P(object);
intern->code = estrndup(code, code_len);
intern->code_len = code_len;
intern->scope = scope ? bson_copy(scope) : NULL;
} /* }}} */
void php_phongo_new_binary_from_binary_and_type(zval *object, const char *data, size_t data_len, bson_subtype_t type TSRMLS_DC) /* {{{ */
{
php_phongo_binary_t *intern;
object_init_ex(object, php_phongo_binary_ce);
intern = Z_BINARY_OBJ_P(object);
intern->data = estrndup(data, data_len);
intern->data_len = data_len;
intern->type = (uint8_t) type;
} /* }}} */
void php_phongo_new_decimal128(zval *object, const bson_decimal128_t *decimal TSRMLS_DC) /* {{{ */
{
php_phongo_decimal128_t *intern;
object_init_ex(object, php_phongo_decimal128_ce);
intern = Z_DECIMAL128_OBJ_P(object);
memcpy(&intern->decimal, decimal, sizeof(bson_decimal128_t));
intern->initialized = true;
} /* }}} */
/* qsort() compare callback for alphabetizing regex flags upon initialization */
static int php_phongo_regex_compare_flags(const void *f1, const void *f2) {
if (* (const char *) f1 == * (const char *) f2) {
return 0;
}
return (* (const char *) f1 > * (const char *) f2) ? 1 : -1;
}
void php_phongo_new_regex_from_regex_and_options(zval *object, const char *pattern, const char *flags TSRMLS_DC) /* {{{ */
{
php_phongo_regex_t *intern;
object_init_ex(object, php_phongo_regex_ce);
intern = Z_REGEX_OBJ_P(object);
intern->pattern_len = strlen(pattern);
intern->pattern = estrndup(pattern, intern->pattern_len);
intern->flags_len = strlen(flags);
intern->flags = estrndup(flags, intern->flags_len);
/* Ensure flags are alphabetized upon initialization. This may be removed
* once CDRIVER-1883 is implemented. */
qsort((void *) intern->flags, intern->flags_len, 1, php_phongo_regex_compare_flags);
} /* }}} */
+void php_phongo_new_symbol(zval *object, const char *symbol, size_t symbol_len TSRMLS_DC) /* {{{ */
+{
+ php_phongo_symbol_t *intern;
+
+ object_init_ex(object, php_phongo_symbol_ce);
+
+ intern = Z_SYMBOL_OBJ_P(object);
+ intern->symbol = estrndup(symbol, symbol_len);
+ intern->symbol_len = symbol_len;
+} /* }}} */
+
+void php_phongo_new_dbpointer(zval *object, const char *ref, size_t ref_len, const bson_oid_t *oid TSRMLS_DC) /* {{{ */
+{
+ php_phongo_dbpointer_t *intern;
+
+ object_init_ex(object, php_phongo_dbpointer_ce);
+
+ intern = Z_DBPOINTER_OBJ_P(object);
+ intern->ref = estrndup(ref, ref_len);
+ intern->ref_len = ref_len;
+ bson_oid_to_string(oid, intern->id);
+} /* }}} */
+
/* {{{ Memory allocation wrappers */
static void* php_phongo_malloc(size_t num_bytes) /* {{{ */
{
return pemalloc(num_bytes, 1);
} /* }}} */
static void* php_phongo_calloc(size_t num_members, size_t num_bytes) /* {{{ */
{
return pecalloc(num_members, num_bytes, 1);
} /* }}} */
static void* php_phongo_realloc(void *mem, size_t num_bytes) { /* {{{ */
return perealloc(mem, num_bytes, 1);
} /* }}} */
static void php_phongo_free(void *mem) /* {{{ */
{
if (mem) {
pefree(mem, 1);
}
} /* }}} */
/* }}} */
/* {{{ M[INIT|SHUTDOWN] R[INIT|SHUTDOWN] G[INIT|SHUTDOWN] MINFO INI */
ZEND_INI_MH(OnUpdateDebug)
{
void ***ctx = NULL;
char *tmp_dir = NULL;
TSRMLS_SET_CTX(ctx);
/* Close any previously open log files */
if (MONGODB_G(debug_fd)) {
if (MONGODB_G(debug_fd) != stderr && MONGODB_G(debug_fd) != stdout) {
fclose(MONGODB_G(debug_fd));
}
MONGODB_G(debug_fd) = NULL;
}
if (!new_value || (new_value && !ZSTR_VAL(new_value)[0])
|| strcasecmp("0", ZSTR_VAL(new_value)) == 0
|| strcasecmp("off", ZSTR_VAL(new_value)) == 0
|| strcasecmp("no", ZSTR_VAL(new_value)) == 0
|| strcasecmp("false", ZSTR_VAL(new_value)) == 0
) {
mongoc_log_trace_disable();
mongoc_log_set_handler(NULL, NULL);
#if PHP_VERSION_ID >= 70000
return OnUpdateString(entry, new_value, mh_arg1, mh_arg2, mh_arg3, stage TSRMLS_CC);
#else
return OnUpdateString(entry, new_value, new_value_length, mh_arg1, mh_arg2, mh_arg3, stage TSRMLS_CC);
#endif
}
if (strcasecmp(ZSTR_VAL(new_value), "stderr") == 0) {
MONGODB_G(debug_fd) = stderr;
} else if (strcasecmp(ZSTR_VAL(new_value), "stdout") == 0) {
MONGODB_G(debug_fd) = stdout;
} else if (
strcasecmp("1", ZSTR_VAL(new_value)) == 0
|| strcasecmp("on", ZSTR_VAL(new_value)) == 0
|| strcasecmp("yes", ZSTR_VAL(new_value)) == 0
|| strcasecmp("true", ZSTR_VAL(new_value)) == 0
) {
tmp_dir = NULL;
} else {
tmp_dir = ZSTR_VAL(new_value);
}
if (!MONGODB_G(debug_fd)) {
time_t t;
int fd = -1;
char *prefix;
int len;
phongo_char *filename;
time(&t);
len = spprintf(&prefix, 0, "PHONGO-%ld", t);
fd = php_open_temporary_fd(tmp_dir, prefix, &filename TSRMLS_CC);
if (fd != -1) {
const char *path = ZSTR_VAL(filename);
MONGODB_G(debug_fd) = VCWD_FOPEN(path, "a");
}
efree(filename);
efree(prefix);
close(fd);
}
mongoc_log_trace_enable();
mongoc_log_set_handler(php_phongo_log, ctx);
#if PHP_VERSION_ID >= 70000
return OnUpdateString(entry, new_value, mh_arg1, mh_arg2, mh_arg3, stage TSRMLS_CC);
#else
return OnUpdateString(entry, new_value, new_value_length, mh_arg1, mh_arg2, mh_arg3, stage TSRMLS_CC);
#endif
}
/* {{{ INI entries */
PHP_INI_BEGIN()
#if PHP_VERSION_ID >= 70000
STD_PHP_INI_ENTRY(PHONGO_DEBUG_INI, PHONGO_DEBUG_INI_DEFAULT, PHP_INI_ALL, OnUpdateDebug, debug, zend_mongodb_globals, mongodb_globals)
#else
{ 0, PHP_INI_ALL, (char *)PHONGO_DEBUG_INI, sizeof(PHONGO_DEBUG_INI), OnUpdateDebug, (void *) XtOffsetOf(zend_mongodb_globals, debug), (void *) &mglo, NULL, (char *)PHONGO_DEBUG_INI_DEFAULT, sizeof(PHONGO_DEBUG_INI_DEFAULT)-1, NULL, 0, 0, 0, NULL },
#endif
PHP_INI_END()
/* }}} */
static inline void php_phongo_pclient_destroy(php_phongo_pclient_t *pclient)
{
/* Do not destroy mongoc_client_t objects created by other processes. This
* ensures that we do not shutdown sockets that may still be in use by our
* parent process (see: CDRIVER-2049). While this is a leak, we are already
* in MSHUTDOWN at this point. */
if (pclient->pid == getpid()) {
mongoc_client_destroy(pclient->client);
}
pefree(pclient, 1);
}
#if PHP_VERSION_ID >= 70000
static void php_phongo_pclient_dtor(zval *zv)
{
php_phongo_pclient_destroy((php_phongo_pclient_t *) Z_PTR_P(zv));
}
#else
static void php_phongo_pclient_dtor(void *pp)
{
php_phongo_pclient_destroy(*((php_phongo_pclient_t **) pp));
}
#endif
/* {{{ PHP_RINIT_FUNCTION */
PHP_RINIT_FUNCTION(mongodb)
{
/* Initialize HashTable for APM subscribers, which is initialized to NULL in
* GINIT and destroyed and reset to NULL in RSHUTDOWN. */
if (MONGODB_G(subscribers) == NULL) {
ALLOC_HASHTABLE(MONGODB_G(subscribers));
zend_hash_init(MONGODB_G(subscribers), 0, NULL, ZVAL_PTR_DTOR, 0);
}
return SUCCESS;
}
/* }}} */
/* {{{ PHP_GINIT_FUNCTION */
PHP_GINIT_FUNCTION(mongodb)
{
bson_mem_vtable_t bsonMemVTable = {
php_phongo_malloc,
php_phongo_calloc,
php_phongo_realloc,
php_phongo_free,
};
#if PHP_VERSION_ID >= 70000
#if defined(COMPILE_DL_MONGODB) && defined(ZTS)
ZEND_TSRMLS_CACHE_UPDATE();
#endif
#endif
memset(mongodb_globals, 0, sizeof(zend_mongodb_globals));
mongodb_globals->bsonMemVTable = bsonMemVTable;
/* Initialize HashTable for persistent clients */
zend_hash_init_ex(&mongodb_globals->pclients, 0, NULL, php_phongo_pclient_dtor, 1, 0);
}
/* }}} */
static zend_class_entry *php_phongo_fetch_internal_class(const char *class_name, size_t class_name_len TSRMLS_DC)
{
#if PHP_VERSION_ID >= 70000
zend_class_entry *pce;
if ((pce = zend_hash_str_find_ptr(CG(class_table), class_name, class_name_len))) {
return pce;
}
#else
zend_class_entry **pce;
if (zend_hash_find(CG(class_table), class_name, class_name_len + 1, (void **) &pce) == SUCCESS) {
return *pce;
}
#endif
return NULL;
}
/* {{{ PHP_MINIT_FUNCTION */
PHP_MINIT_FUNCTION(mongodb)
{
char *php_version_string;
(void)type; /* We don't care if we are loaded via dl() or extension= */
REGISTER_INI_ENTRIES();
/* Initialize libmongoc */
mongoc_init();
/* Set handshake options */
php_version_string = malloc(4 + sizeof(PHP_VERSION) + 1);
snprintf(php_version_string, 4 + sizeof(PHP_VERSION) + 1, "PHP %s", PHP_VERSION);
mongoc_handshake_data_append("ext-mongodb:PHP", PHP_MONGODB_VERSION, php_version_string);
free(php_version_string);
/* Initialize libbson */
bson_mem_set_vtable(&MONGODB_G(bsonMemVTable));
/* Prep default object handlers to be used when we register the classes */
memcpy(&phongo_std_object_handlers, zend_get_std_object_handlers(), sizeof(zend_object_handlers));
phongo_std_object_handlers.clone_obj = NULL;
/*
phongo_std_object_handlers.get_debug_info = NULL;
phongo_std_object_handlers.compare_objects = NULL;
phongo_std_object_handlers.cast_object = NULL;
phongo_std_object_handlers.count_elements = NULL;
phongo_std_object_handlers.get_closure = NULL;
*/
/* Initialize zend_class_entry dependencies.
*
* Although DateTimeImmutable was introduced in PHP 5.5.0,
* php_date_get_immutable_ce() is not available in PHP versions before
* 5.5.24 and 5.6.8.
*
* Although JsonSerializable was introduced in PHP 5.4.0,
* php_json_serializable_ce is not exported in PHP versions before 5.4.26
* and 5.5.10. For later PHP versions, looking up the class manually also
* helps with distros that disable LTDL_LAZY for dlopen() (e.g. Fedora).
*/
php_phongo_date_immutable_ce = php_phongo_fetch_internal_class(ZEND_STRL("datetimeimmutable") TSRMLS_CC);
php_phongo_json_serializable_ce = php_phongo_fetch_internal_class(ZEND_STRL("jsonserializable") TSRMLS_CC);
if (php_phongo_json_serializable_ce == NULL) {
zend_error(E_ERROR, "JsonSerializable class is not defined. Please ensure that the 'json' module is loaded before the 'mongodb' module.");
return FAILURE;
}
/* Register base BSON classes first */
php_phongo_type_init_ce(INIT_FUNC_ARGS_PASSTHRU);
php_phongo_serializable_init_ce(INIT_FUNC_ARGS_PASSTHRU);
php_phongo_unserializable_init_ce(INIT_FUNC_ARGS_PASSTHRU);
php_phongo_binary_interface_init_ce(INIT_FUNC_ARGS_PASSTHRU);
php_phongo_decimal128_interface_init_ce(INIT_FUNC_ARGS_PASSTHRU);
php_phongo_javascript_interface_init_ce(INIT_FUNC_ARGS_PASSTHRU);
php_phongo_maxkey_interface_init_ce(INIT_FUNC_ARGS_PASSTHRU);
php_phongo_minkey_interface_init_ce(INIT_FUNC_ARGS_PASSTHRU);
php_phongo_objectid_interface_init_ce(INIT_FUNC_ARGS_PASSTHRU);
php_phongo_regex_interface_init_ce(INIT_FUNC_ARGS_PASSTHRU);
php_phongo_timestamp_interface_init_ce(INIT_FUNC_ARGS_PASSTHRU);
php_phongo_utcdatetime_interface_init_ce(INIT_FUNC_ARGS_PASSTHRU);
php_phongo_binary_init_ce(INIT_FUNC_ARGS_PASSTHRU);
+ php_phongo_dbpointer_init_ce(INIT_FUNC_ARGS_PASSTHRU);
php_phongo_decimal128_init_ce(INIT_FUNC_ARGS_PASSTHRU);
php_phongo_javascript_init_ce(INIT_FUNC_ARGS_PASSTHRU);
php_phongo_maxkey_init_ce(INIT_FUNC_ARGS_PASSTHRU);
php_phongo_minkey_init_ce(INIT_FUNC_ARGS_PASSTHRU);
php_phongo_objectid_init_ce(INIT_FUNC_ARGS_PASSTHRU);
php_phongo_persistable_init_ce(INIT_FUNC_ARGS_PASSTHRU);
php_phongo_regex_init_ce(INIT_FUNC_ARGS_PASSTHRU);
+ php_phongo_symbol_init_ce(INIT_FUNC_ARGS_PASSTHRU);
php_phongo_timestamp_init_ce(INIT_FUNC_ARGS_PASSTHRU);
+ php_phongo_undefined_init_ce(INIT_FUNC_ARGS_PASSTHRU);
php_phongo_utcdatetime_init_ce(INIT_FUNC_ARGS_PASSTHRU);
php_phongo_bulkwrite_init_ce(INIT_FUNC_ARGS_PASSTHRU);
php_phongo_command_init_ce(INIT_FUNC_ARGS_PASSTHRU);
php_phongo_cursor_init_ce(INIT_FUNC_ARGS_PASSTHRU);
php_phongo_cursorid_init_ce(INIT_FUNC_ARGS_PASSTHRU);
php_phongo_manager_init_ce(INIT_FUNC_ARGS_PASSTHRU);
php_phongo_query_init_ce(INIT_FUNC_ARGS_PASSTHRU);
php_phongo_readconcern_init_ce(INIT_FUNC_ARGS_PASSTHRU);
php_phongo_readpreference_init_ce(INIT_FUNC_ARGS_PASSTHRU);
php_phongo_server_init_ce(INIT_FUNC_ARGS_PASSTHRU);
+ php_phongo_session_init_ce(INIT_FUNC_ARGS_PASSTHRU);
php_phongo_writeconcern_init_ce(INIT_FUNC_ARGS_PASSTHRU);
php_phongo_writeconcernerror_init_ce(INIT_FUNC_ARGS_PASSTHRU);
php_phongo_writeerror_init_ce(INIT_FUNC_ARGS_PASSTHRU);
php_phongo_writeresult_init_ce(INIT_FUNC_ARGS_PASSTHRU);
/* Register base exception classes first */
php_phongo_exception_init_ce(INIT_FUNC_ARGS_PASSTHRU);
php_phongo_runtimeexception_init_ce(INIT_FUNC_ARGS_PASSTHRU);
php_phongo_connectionexception_init_ce(INIT_FUNC_ARGS_PASSTHRU);
php_phongo_writeexception_init_ce(INIT_FUNC_ARGS_PASSTHRU);
php_phongo_authenticationexception_init_ce(INIT_FUNC_ARGS_PASSTHRU);
php_phongo_bulkwriteexception_init_ce(INIT_FUNC_ARGS_PASSTHRU);
php_phongo_connectiontimeoutexception_init_ce(INIT_FUNC_ARGS_PASSTHRU);
php_phongo_executiontimeoutexception_init_ce(INIT_FUNC_ARGS_PASSTHRU);
php_phongo_invalidargumentexception_init_ce(INIT_FUNC_ARGS_PASSTHRU);
php_phongo_logicexception_init_ce(INIT_FUNC_ARGS_PASSTHRU);
php_phongo_sslconnectionexception_init_ce(INIT_FUNC_ARGS_PASSTHRU);
php_phongo_unexpectedvalueexception_init_ce(INIT_FUNC_ARGS_PASSTHRU);
/* Register base APM classes first */
php_phongo_subscriber_init_ce(INIT_FUNC_ARGS_PASSTHRU);
php_phongo_commandsubscriber_init_ce(INIT_FUNC_ARGS_PASSTHRU);
php_phongo_commandfailedevent_init_ce(INIT_FUNC_ARGS_PASSTHRU);
php_phongo_commandstartedevent_init_ce(INIT_FUNC_ARGS_PASSTHRU);
php_phongo_commandsucceededevent_init_ce(INIT_FUNC_ARGS_PASSTHRU);
REGISTER_STRING_CONSTANT("MONGODB_VERSION", (char *)PHP_MONGODB_VERSION, CONST_CS | CONST_PERSISTENT);
REGISTER_STRING_CONSTANT("MONGODB_STABILITY", (char *)PHP_MONGODB_STABILITY, CONST_CS | CONST_PERSISTENT);
return SUCCESS;
}
/* }}} */
/* {{{ PHP_MSHUTDOWN_FUNCTION */
PHP_MSHUTDOWN_FUNCTION(mongodb)
{
(void)type; /* We don't care if we are loaded via dl() or extension= */
/* Destroy HashTable for persistent clients. The HashTable destructor will
* destroy any mongoc_client_t objects that were created by this process. */
zend_hash_destroy(&MONGODB_G(pclients));
bson_mem_restore_vtable();
/* Cleanup after libmongoc */
mongoc_cleanup();
UNREGISTER_INI_ENTRIES();
return SUCCESS;
}
/* }}} */
/* {{{ PHP_RSHUTDOWN_FUNCTION */
PHP_RSHUTDOWN_FUNCTION(mongodb)
{
/* Destroy HashTable for APM subscribers, which was initialized in RINIT */
if (MONGODB_G(subscribers)) {
zend_hash_destroy(MONGODB_G(subscribers));
FREE_HASHTABLE(MONGODB_G(subscribers));
MONGODB_G(subscribers) = NULL;
}
return SUCCESS;
}
/* }}} */
/* {{{ PHP_GSHUTDOWN_FUNCTION */
PHP_GSHUTDOWN_FUNCTION(mongodb)
{
mongodb_globals->debug = NULL;
if (mongodb_globals->debug_fd) {
fclose(mongodb_globals->debug_fd);
mongodb_globals->debug_fd = NULL;
}
}
/* }}} */
/* {{{ PHP_MINFO_FUNCTION */
PHP_MINFO_FUNCTION(mongodb)
{
php_info_print_table_start();
php_info_print_table_header(2, "MongoDB support", "enabled");
php_info_print_table_row(2, "MongoDB extension version", PHP_MONGODB_VERSION);
php_info_print_table_row(2, "MongoDB extension stability", PHP_MONGODB_STABILITY);
#ifdef HAVE_SYSTEM_LIBBSON
php_info_print_table_row(2, "libbson headers version", BSON_VERSION_S);
php_info_print_table_row(2, "libbson library version", bson_get_version());
#else
php_info_print_table_row(2, "libbson bundled version", BSON_VERSION_S);
#endif
#ifdef HAVE_SYSTEM_LIBMONGOC
php_info_print_table_row(2, "libmongoc headers version", MONGOC_VERSION_S);
php_info_print_table_row(2, "libmongoc library version", mongoc_get_version());
#else
/* Bundled libraries, buildtime = runtime */
php_info_print_table_row(2, "libmongoc bundled version", MONGOC_VERSION_S);
#endif
#ifdef MONGOC_ENABLE_SSL
php_info_print_table_row(2, "libmongoc SSL", "enabled");
# if defined(MONGOC_ENABLE_SSL_OPENSSL)
php_info_print_table_row(2, "libmongoc SSL library", "OpenSSL");
# elif defined(MONGOC_ENABLE_SSL_LIBRESSL)
php_info_print_table_row(2, "libmongoc SSL library", "LibreSSL");
# elif defined(MONGOC_ENABLE_SSL_SECURE_TRANSPORT)
php_info_print_table_row(2, "libmongoc SSL library", "Secure Transport");
# elif defined(MONGOC_ENABLE_SSL_SECURE_CHANNEL)
php_info_print_table_row(2, "libmongoc SSL library", "Secure Channel");
# else
php_info_print_table_row(2, "libmongoc SSL library", "unknown");
# endif
#else
php_info_print_table_row(2, "libmongoc SSL", "disabled");
#endif
#ifdef MONGOC_ENABLE_CRYPTO
php_info_print_table_row(2, "libmongoc crypto", "enabled");
# if defined(MONGOC_ENABLE_CRYPTO_LIBCRYPTO)
php_info_print_table_row(2, "libmongoc crypto library", "libcrypto");
# elif defined(MONGOC_ENABLE_CRYPTO_COMMON_CRYPTO)
php_info_print_table_row(2, "libmongoc crypto library", "Common Crypto");
# elif defined(MONGOC_ENABLE_CRYPTO_CNG)
php_info_print_table_row(2, "libmongoc crypto library", "CNG");
# else
php_info_print_table_row(2, "libmongoc crypto library", "unknown");
# endif
# ifdef MONGOC_ENABLE_CRYPTO_SYSTEM_PROFILE
php_info_print_table_row(2, "libmongoc crypto system profile", "enabled");
# else
php_info_print_table_row(2, "libmongoc crypto system profile", "disabled");
# endif
#else
php_info_print_table_row(2, "libmongoc crypto", "disabled");
#endif
#ifdef MONGOC_ENABLE_SASL
php_info_print_table_row(2, "libmongoc SASL", "enabled");
#else
php_info_print_table_row(2, "libmongoc SASL", "disabled");
#endif
+#ifdef MONGOC_ENABLE_COMPRESSION
+ php_info_print_table_row(2, "libmongoc compression", "enabled");
+# ifdef MONGOC_ENABLE_COMPRESSION_SNAPPY
+ php_info_print_table_row(2, "libmongoc compression snappy", "enabled");
+# else
+ php_info_print_table_row(2, "libmongoc compression snappy", "disabled");
+# endif
+# ifdef MONGOC_ENABLE_COMPRESSION_ZLIB
+ php_info_print_table_row(2, "libmongoc compression zlib", "enabled");
+# else
+ php_info_print_table_row(2, "libmongoc compression zlib", "disabled");
+# endif
+#else
+ php_info_print_table_row(2, "libmongoc compression", "disabled");
+#endif
+
php_info_print_table_end();
DISPLAY_INI_ENTRIES();
}
/* }}} */
/* }}} */
/* {{{ Shared function entries for disabling constructors and unserialize() */
PHP_FUNCTION(MongoDB_disabled___construct) /* {{{ */
{
phongo_throw_exception(PHONGO_ERROR_RUNTIME TSRMLS_CC, "Accessing private constructor");
} /* }}} */
PHP_FUNCTION(MongoDB_disabled___wakeup) /* {{{ */
{
if (zend_parse_parameters_none() == FAILURE) {
return;
}
phongo_throw_exception(PHONGO_ERROR_RUNTIME TSRMLS_CC, "%s", "MongoDB\\Driver objects cannot be serialized");
} /* }}} */
/* }}} */
/* {{{ mongodb_functions[]
*/
ZEND_BEGIN_ARG_INFO_EX(ai_bson_fromPHP, 0, 0, 1)
ZEND_ARG_INFO(0, value)
ZEND_END_ARG_INFO();
ZEND_BEGIN_ARG_INFO_EX(ai_bson_toPHP, 0, 0, 1)
ZEND_ARG_INFO(0, bson)
ZEND_ARG_ARRAY_INFO(0, typemap, 0)
ZEND_END_ARG_INFO();
ZEND_BEGIN_ARG_INFO_EX(ai_bson_toJSON, 0, 0, 1)
ZEND_ARG_INFO(0, bson)
ZEND_END_ARG_INFO();
ZEND_BEGIN_ARG_INFO_EX(ai_bson_fromJSON, 0, 0, 1)
ZEND_ARG_INFO(0, json)
ZEND_END_ARG_INFO();
ZEND_BEGIN_ARG_INFO_EX(ai_mongodb_driver_monitoring_subscriber, 0, 0, 1)
ZEND_ARG_OBJ_INFO(0, subscriber, MongoDB\\Driver\\Monitoring\\Subscriber, 0)
ZEND_END_ARG_INFO();
static const zend_function_entry mongodb_functions[] = {
ZEND_NS_NAMED_FE("MongoDB\\BSON", fromPHP, PHP_FN(MongoDB_BSON_fromPHP), ai_bson_fromPHP)
ZEND_NS_NAMED_FE("MongoDB\\BSON", toPHP, PHP_FN(MongoDB_BSON_toPHP), ai_bson_toPHP)
ZEND_NS_NAMED_FE("MongoDB\\BSON", toJSON, PHP_FN(MongoDB_BSON_toJSON), ai_bson_toJSON)
ZEND_NS_NAMED_FE("MongoDB\\BSON", toCanonicalExtendedJSON, PHP_FN(MongoDB_BSON_toCanonicalExtendedJSON), ai_bson_toJSON)
ZEND_NS_NAMED_FE("MongoDB\\BSON", toRelaxedExtendedJSON, PHP_FN(MongoDB_BSON_toRelaxedExtendedJSON), ai_bson_toJSON)
ZEND_NS_NAMED_FE("MongoDB\\BSON", fromJSON, PHP_FN(MongoDB_BSON_fromJSON), ai_bson_fromJSON)
ZEND_NS_NAMED_FE("MongoDB\\Driver\\Monitoring", addSubscriber, PHP_FN(MongoDB_Driver_Monitoring_addSubscriber), ai_mongodb_driver_monitoring_subscriber)
ZEND_NS_NAMED_FE("MongoDB\\Driver\\Monitoring", removeSubscriber, PHP_FN(MongoDB_Driver_Monitoring_removeSubscriber), ai_mongodb_driver_monitoring_subscriber)
PHP_FE_END
};
/* }}} */
static const zend_module_dep mongodb_deps[] = {
ZEND_MOD_REQUIRED("date")
ZEND_MOD_REQUIRED("json")
ZEND_MOD_REQUIRED("spl")
ZEND_MOD_REQUIRED("standard")
ZEND_MOD_END
};
/* {{{ mongodb_module_entry
*/
zend_module_entry mongodb_module_entry = {
STANDARD_MODULE_HEADER_EX,
NULL,
mongodb_deps,
"mongodb",
mongodb_functions,
PHP_MINIT(mongodb),
PHP_MSHUTDOWN(mongodb),
PHP_RINIT(mongodb),
PHP_RSHUTDOWN(mongodb),
PHP_MINFO(mongodb),
PHP_MONGODB_VERSION,
PHP_MODULE_GLOBALS(mongodb),
PHP_GINIT(mongodb),
PHP_GSHUTDOWN(mongodb),
NULL,
STANDARD_MODULE_PROPERTIES_EX
};
/* }}} */
#ifdef COMPILE_DL_MONGODB
ZEND_GET_MODULE(mongodb)
#endif
/*
* Local variables:
* tab-width: 4
* c-basic-offset: 4
* End:
* vim600: noet sw=4 ts=4 fdm=marker
* vim<600: noet sw=4 ts=4
*/
diff --git a/mongodb-1.3.4/php_phongo.h b/mongodb-1.4.2/php_phongo.h
similarity index 79%
rename from mongodb-1.3.4/php_phongo.h
rename to mongodb-1.4.2/php_phongo.h
index f57b7827..66e94876 100644
--- a/mongodb-1.3.4/php_phongo.h
+++ b/mongodb-1.4.2/php_phongo.h
@@ -1,194 +1,220 @@
/*
* Copyright 2014-2017 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef PHONGO_H
#define PHONGO_H
/* External libs */
#include "bson.h"
#include "mongoc.h"
+#include "phongo_compat.h"
+#include "php_phongo_classes.h"
+
#define phpext_mongodb_ptr &mongodb_module_entry
extern zend_module_entry mongodb_module_entry;
/* FIXME: Its annoying to bump version. Move into phongo_version.h.in */
-#define PHP_MONGODB_VERSION "1.3.4"
+#define PHP_MONGODB_VERSION "1.4.2"
#define PHP_MONGODB_STABILITY "stable"
/* Structure for persisted libmongoc clients. The PID is included to ensure that
* processes do not destroy clients created by other processes (relevant for
* forking). We avoid using pid_t for Windows compatibility. */
typedef struct {
mongoc_client_t *client;
int pid;
} php_phongo_pclient_t;
ZEND_BEGIN_MODULE_GLOBALS(mongodb)
char *debug;
FILE *debug_fd;
bson_mem_vtable_t bsonMemVTable;
HashTable pclients;
HashTable *subscribers;
ZEND_END_MODULE_GLOBALS(mongodb)
#if PHP_VERSION_ID >= 70000
# define MONGODB_G(v) ZEND_MODULE_GLOBALS_ACCESSOR(mongodb, v)
# if defined(ZTS) && defined(COMPILE_DL_MONGODB)
ZEND_TSRMLS_CACHE_EXTERN()
# endif
#else
# ifdef ZTS
# define MONGODB_G(v) TSRMG(mongodb_globals_id, zend_mongodb_globals *, v)
# define mglo mongodb_globals_id
# else
# define MONGODB_G(v) (mongodb_globals.v)
# define mglo mongodb_globals
# endif
#endif
#define PHONGO_WRITE_CONCERN_W_MAJORITY "majority"
-#include "php_phongo_classes.h"
-
/* This enum is necessary since mongoc_server_description_type_t is private and
* we need to translate strings returned by mongoc_server_description_type() to
* Server integer constants. */
typedef enum {
PHONGO_SERVER_UNKNOWN = 0,
PHONGO_SERVER_STANDALONE = 1,
PHONGO_SERVER_MONGOS = 2,
PHONGO_SERVER_POSSIBLE_PRIMARY = 3,
PHONGO_SERVER_RS_PRIMARY = 4,
PHONGO_SERVER_RS_SECONDARY = 5,
PHONGO_SERVER_RS_ARBITER = 6,
PHONGO_SERVER_RS_OTHER = 7,
PHONGO_SERVER_RS_GHOST = 8,
PHONGO_SERVER_DESCRIPTION_TYPES = 9,
} php_phongo_server_description_type_t;
typedef struct {
php_phongo_server_description_type_t type;
const char *name;
} php_phongo_server_description_type_map_t;
extern php_phongo_server_description_type_map_t php_phongo_server_description_type_map[];
typedef enum {
PHONGO_ERROR_INVALID_ARGUMENT = 1,
PHONGO_ERROR_RUNTIME = 2,
PHONGO_ERROR_UNEXPECTED_VALUE = 8,
PHONGO_ERROR_MONGOC_FAILED = 3,
PHONGO_ERROR_WRITE_FAILED = 5,
PHONGO_ERROR_CONNECTION_FAILED = 7,
PHONGO_ERROR_LOGIC = 9
} php_phongo_error_domain_t;
zend_class_entry* phongo_exception_from_mongoc_domain(uint32_t /* mongoc_error_domain_t */ domain, uint32_t /* mongoc_error_code_t */ code);
zend_class_entry* phongo_exception_from_phongo_domain(php_phongo_error_domain_t domain);
void phongo_throw_exception(php_phongo_error_domain_t domain TSRMLS_DC, const char *format, ...)
#if PHP_VERSION_ID < 70000
# ifndef PHP_WIN32
# ifdef ZTS
__attribute__ ((format(printf, 3, 4)))
# else
__attribute__ ((format(printf, 2, 3)))
# endif
# endif
#endif
;
void phongo_throw_exception_from_bson_error_t(bson_error_t *error TSRMLS_DC);
+/* This enum is used for processing options in phongo_execute_parse_options and
+ * selecting a libmongoc function to use in phongo_execute_command. The values
+ * are important, as READ and WRITE are also used as a bit field to determine
+ * whether readPreference, readConcern, and writeConcern options are parsed. */
+typedef enum {
+ PHONGO_OPTION_READ_CONCERN = 0x01,
+ PHONGO_OPTION_READ_PREFERENCE = 0x02,
+ PHONGO_OPTION_WRITE_CONCERN = 0x04,
+ PHONGO_COMMAND_RAW = 0x07,
+ PHONGO_COMMAND_READ = 0x03,
+ PHONGO_COMMAND_WRITE = 0x04,
+ PHONGO_COMMAND_READ_WRITE = 0x05,
+} php_phongo_command_type_t;
+
zend_object_handlers *phongo_get_std_object_handlers(void);
-void phongo_server_init (zval *return_value, mongoc_client_t *client, int server_id TSRMLS_DC);
+void phongo_server_init (zval *return_value, mongoc_client_t *client, uint32_t server_id TSRMLS_DC);
+void phongo_session_init (zval *return_value, mongoc_client_session_t *client_session TSRMLS_DC);
void phongo_readconcern_init (zval *return_value, const mongoc_read_concern_t *read_concern TSRMLS_DC);
void phongo_readpreference_init (zval *return_value, const mongoc_read_prefs_t *read_prefs TSRMLS_DC);
void phongo_writeconcern_init (zval *return_value, const mongoc_write_concern_t *write_concern TSRMLS_DC);
-mongoc_bulk_operation_t* phongo_bulkwrite_init (zend_bool ordered);
-bool phongo_execute_write (mongoc_client_t *client, const char *namespace, php_phongo_bulkwrite_t *bulk_write, const mongoc_write_concern_t *write_concern, int server_id, zval *return_value, int return_value_used TSRMLS_DC);
-int phongo_execute_command (mongoc_client_t *client, const char *db, zval *zcommand, zval *zreadPreference, int server_id, zval *return_value, int return_value_used TSRMLS_DC);
-int phongo_execute_query (mongoc_client_t *client, const char *namespace, zval *zquery, zval *zreadPreference, int server_id, zval *return_value, int return_value_used TSRMLS_DC);
+bool phongo_execute_bulk_write (mongoc_client_t *client, const char *namespace, php_phongo_bulkwrite_t *bulk_write, zval *zwriteConcern, uint32_t server_id, zval *return_value, int return_value_used TSRMLS_DC);
+int phongo_execute_command (mongoc_client_t *client, php_phongo_command_type_t type, const char *db, zval *zcommand, zval *zreadPreference, uint32_t server_id, zval *return_value, int return_value_used TSRMLS_DC);
+int phongo_execute_query (mongoc_client_t *client, const char *namespace, zval *zquery, zval *zreadPreference, uint32_t server_id, zval *return_value, int return_value_used TSRMLS_DC);
+
+bool phongo_cursor_advance_and_check_for_error(mongoc_cursor_t *cursor TSRMLS_DC);
const mongoc_read_concern_t* phongo_read_concern_from_zval (zval *zread_concern TSRMLS_DC);
const mongoc_read_prefs_t* phongo_read_preference_from_zval(zval *zread_preference TSRMLS_DC);
const mongoc_write_concern_t* phongo_write_concern_from_zval (zval *zwrite_concern TSRMLS_DC);
php_phongo_server_description_type_t php_phongo_server_description_type(mongoc_server_description_t *sd);
+bool phongo_parse_read_preference(zval *options, zval **zreadPreference TSRMLS_DC);
+
+zval* php_phongo_prep_legacy_option(zval *options, const char *key, bool *allocated TSRMLS_DC);
+void php_phongo_prep_legacy_option_free(zval *options TSRMLS_DC);
+
void php_phongo_read_preference_prep_tagsets(zval *tagSets TSRMLS_DC);
bool php_phongo_read_preference_tags_are_valid(const bson_t *tags);
void php_phongo_server_to_zval(zval *retval, mongoc_server_description_t *sd);
void php_phongo_read_concern_to_zval(zval *retval, const mongoc_read_concern_t *read_concern);
void php_phongo_read_preference_to_zval(zval *retval, const mongoc_read_prefs_t *read_prefs);
void php_phongo_write_concern_to_zval(zval *retval, const mongoc_write_concern_t *write_concern);
void php_phongo_cursor_to_zval(zval *retval, const mongoc_cursor_t *cursor);
void phongo_manager_init(php_phongo_manager_t *manager, const char *uri_string, zval *options, zval *driverOptions TSRMLS_DC);
int php_phongo_set_monitoring_callbacks(mongoc_client_t *client);
void php_phongo_objectid_new_from_oid(zval *object, const bson_oid_t *oid TSRMLS_DC);
void php_phongo_cursor_id_new_from_id(zval *object, int64_t cursorid TSRMLS_DC);
void php_phongo_new_utcdatetime_from_epoch(zval *object, int64_t msec_since_epoch TSRMLS_DC);
void php_phongo_new_timestamp_from_increment_and_timestamp(zval *object, uint32_t increment, uint32_t timestamp TSRMLS_DC);
void php_phongo_new_javascript_from_javascript(int init, zval *object, const char *code, size_t code_len TSRMLS_DC);
void php_phongo_new_javascript_from_javascript_and_scope(int init, zval *object, const char *code, size_t code_len, const bson_t *scope TSRMLS_DC);
void php_phongo_new_binary_from_binary_and_type(zval *object, const char *data, size_t data_len, bson_subtype_t type TSRMLS_DC);
void php_phongo_new_decimal128(zval *object, const bson_decimal128_t *decimal TSRMLS_DC);
void php_phongo_new_regex_from_regex_and_options(zval *object, const char *pattern, const char *flags TSRMLS_DC);
+void php_phongo_new_symbol(zval *object, const char *symbol, size_t symbol_len TSRMLS_DC);
+void php_phongo_new_dbpointer(zval *object, const char *namespace, size_t namespace_len, const bson_oid_t *oid TSRMLS_DC);
zend_bool phongo_writeerror_init(zval *return_value, bson_t *bson TSRMLS_DC);
zend_bool phongo_writeconcernerror_init(zval *return_value, bson_t *bson TSRMLS_DC);
#if PHP_VERSION_ID >= 70000
#define PHONGO_CE_FINAL(ce) do { \
ce->ce_flags |= ZEND_ACC_FINAL; \
} while(0);
#else
#define PHONGO_CE_FINAL(ce) do { \
ce->ce_flags |= ZEND_ACC_FINAL_CLASS; \
} while(0);
#endif
#define PHONGO_CE_DISABLE_SERIALIZATION(ce) do { \
ce->serialize = zend_class_serialize_deny; \
ce->unserialize = zend_class_unserialize_deny; \
} while(0);
#define PHONGO_GET_PROPERTY_HASH_INIT_PROPS(is_debug, intern, props, size) do { \
if (is_debug) { \
ALLOC_HASHTABLE(props); \
zend_hash_init((props), (size), NULL, ZVAL_PTR_DTOR, 0); \
} else if ((intern)->properties) { \
- zend_hash_clean((intern)->properties); \
(props) = (intern)->properties; \
} else { \
ALLOC_HASHTABLE(props); \
zend_hash_init((props), (size), NULL, ZVAL_PTR_DTOR, 0); \
(intern)->properties = (props); \
} \
} while(0);
+#define PHONGO_ZVAL_CLASS_OR_TYPE_NAME(zv) (Z_TYPE(zv) == IS_OBJECT ? ZSTR_VAL(Z_OBJCE(zv)->name) : zend_get_type_by_const(Z_TYPE(zv)))
+#define PHONGO_ZVAL_CLASS_OR_TYPE_NAME_P(zvp) PHONGO_ZVAL_CLASS_OR_TYPE_NAME(*(zvp))
+
#endif /* PHONGO_H */
/*
* Local variables:
* tab-width: 4
* c-basic-offset: 4
* End:
* vim600: noet sw=4 ts=4 fdm=marker
* vim<600: noet sw=4 ts=4
*/
diff --git a/mongodb-1.3.4/php_phongo_classes.h b/mongodb-1.4.2/php_phongo_classes.h
similarity index 89%
rename from mongodb-1.3.4/php_phongo_classes.h
rename to mongodb-1.4.2/php_phongo_classes.h
index 7e77f2b9..db52a0cb 100644
--- a/mongodb-1.3.4/php_phongo_classes.h
+++ b/mongodb-1.4.2/php_phongo_classes.h
@@ -1,346 +1,382 @@
/*
* Copyright 2014-2017 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef PHONGO_CLASSES_H
#define PHONGO_CLASSES_H
#include "php_phongo_structs.h"
/* Export zend_class_entry dependencies, which are initialized in MINIT */
extern zend_class_entry *php_phongo_date_immutable_ce;
extern zend_class_entry *php_phongo_json_serializable_ce;
#if PHP_VERSION_ID >= 70000
static inline php_phongo_bulkwrite_t* php_bulkwrite_fetch_object(zend_object *obj) {
return (php_phongo_bulkwrite_t *)((char *)obj - XtOffsetOf(php_phongo_bulkwrite_t, std));
}
static inline php_phongo_command_t* php_command_fetch_object(zend_object *obj) {
return (php_phongo_command_t *)((char *)obj - XtOffsetOf(php_phongo_command_t, std));
}
static inline php_phongo_cursor_t* php_cursor_fetch_object(zend_object *obj) {
return (php_phongo_cursor_t *)((char *)obj - XtOffsetOf(php_phongo_cursor_t, std));
}
static inline php_phongo_cursorid_t* php_cursorid_fetch_object(zend_object *obj) {
return (php_phongo_cursorid_t *)((char *)obj - XtOffsetOf(php_phongo_cursorid_t, std));
}
static inline php_phongo_manager_t* php_manager_fetch_object(zend_object *obj) {
return (php_phongo_manager_t *)((char *)obj - XtOffsetOf(php_phongo_manager_t, std));
}
static inline php_phongo_query_t* php_query_fetch_object(zend_object *obj) {
return (php_phongo_query_t *)((char *)obj - XtOffsetOf(php_phongo_query_t, std));
}
static inline php_phongo_readconcern_t* php_readconcern_fetch_object(zend_object *obj) {
return (php_phongo_readconcern_t *)((char *)obj - XtOffsetOf(php_phongo_readconcern_t, std));
}
static inline php_phongo_readpreference_t* php_readpreference_fetch_object(zend_object *obj) {
return (php_phongo_readpreference_t *)((char *)obj - XtOffsetOf(php_phongo_readpreference_t, std));
}
static inline php_phongo_server_t* php_server_fetch_object(zend_object *obj) {
return (php_phongo_server_t *)((char *)obj - XtOffsetOf(php_phongo_server_t, std));
}
+static inline php_phongo_session_t* php_session_fetch_object(zend_object *obj) {
+ return (php_phongo_session_t *)((char *)obj - XtOffsetOf(php_phongo_session_t, std));
+}
static inline php_phongo_writeconcern_t* php_writeconcern_fetch_object(zend_object *obj) {
return (php_phongo_writeconcern_t *)((char *)obj - XtOffsetOf(php_phongo_writeconcern_t, std));
}
static inline php_phongo_writeconcernerror_t* php_writeconcernerror_fetch_object(zend_object *obj) {
return (php_phongo_writeconcernerror_t *)((char *)obj - XtOffsetOf(php_phongo_writeconcernerror_t, std));
}
static inline php_phongo_writeerror_t* php_writeerror_fetch_object(zend_object *obj) {
return (php_phongo_writeerror_t *)((char *)obj - XtOffsetOf(php_phongo_writeerror_t, std));
}
static inline php_phongo_writeresult_t* php_writeresult_fetch_object(zend_object *obj) {
return (php_phongo_writeresult_t *)((char *)obj - XtOffsetOf(php_phongo_writeresult_t, std));
}
static inline php_phongo_binary_t* php_binary_fetch_object(zend_object *obj) {
return (php_phongo_binary_t *)((char *)obj - XtOffsetOf(php_phongo_binary_t, std));
}
+static inline php_phongo_dbpointer_t* php_dbpointer_fetch_object(zend_object *obj) {
+ return (php_phongo_dbpointer_t *)((char *)obj - XtOffsetOf(php_phongo_dbpointer_t, std));
+}
static inline php_phongo_decimal128_t* php_decimal128_fetch_object(zend_object *obj) {
return (php_phongo_decimal128_t *)((char *)obj - XtOffsetOf(php_phongo_decimal128_t, std));
}
static inline php_phongo_javascript_t* php_javascript_fetch_object(zend_object *obj) {
return (php_phongo_javascript_t *)((char *)obj - XtOffsetOf(php_phongo_javascript_t, std));
}
static inline php_phongo_maxkey_t* php_maxkey_fetch_object(zend_object *obj) {
return (php_phongo_maxkey_t *)((char *)obj - XtOffsetOf(php_phongo_maxkey_t, std));
}
static inline php_phongo_minkey_t* php_minkey_fetch_object(zend_object *obj) {
return (php_phongo_minkey_t *)((char *)obj - XtOffsetOf(php_phongo_minkey_t, std));
}
static inline php_phongo_objectid_t* php_objectid_fetch_object(zend_object *obj) {
return (php_phongo_objectid_t *)((char *)obj - XtOffsetOf(php_phongo_objectid_t, std));
}
static inline php_phongo_regex_t* php_regex_fetch_object(zend_object *obj) {
return (php_phongo_regex_t *)((char *)obj - XtOffsetOf(php_phongo_regex_t, std));
}
+static inline php_phongo_symbol_t* php_symbol_fetch_object(zend_object *obj) {
+ return (php_phongo_symbol_t *)((char *)obj - XtOffsetOf(php_phongo_symbol_t, std));
+}
static inline php_phongo_timestamp_t* php_timestamp_fetch_object(zend_object *obj) {
return (php_phongo_timestamp_t *)((char *)obj - XtOffsetOf(php_phongo_timestamp_t, std));
}
+static inline php_phongo_undefined_t* php_undefined_fetch_object(zend_object *obj) {
+ return (php_phongo_undefined_t *)((char *)obj - XtOffsetOf(php_phongo_undefined_t, std));
+}
static inline php_phongo_utcdatetime_t* php_utcdatetime_fetch_object(zend_object *obj) {
return (php_phongo_utcdatetime_t *)((char *)obj - XtOffsetOf(php_phongo_utcdatetime_t, std));
}
static inline php_phongo_commandfailedevent_t* php_commandfailedevent_fetch_object(zend_object *obj) {
return (php_phongo_commandfailedevent_t *)((char *)obj - XtOffsetOf(php_phongo_commandfailedevent_t, std));
}
static inline php_phongo_commandstartedevent_t* php_commandstartedevent_fetch_object(zend_object *obj) {
return (php_phongo_commandstartedevent_t *)((char *)obj - XtOffsetOf(php_phongo_commandstartedevent_t, std));
}
static inline php_phongo_commandsucceededevent_t* php_commandsucceededevent_fetch_object(zend_object *obj) {
return (php_phongo_commandsucceededevent_t *)((char *)obj - XtOffsetOf(php_phongo_commandsucceededevent_t, std));
}
# define Z_COMMAND_OBJ_P(zv) (php_command_fetch_object(Z_OBJ_P(zv)))
# define Z_CURSOR_OBJ_P(zv) (php_cursor_fetch_object(Z_OBJ_P(zv)))
# define Z_CURSORID_OBJ_P(zv) (php_cursorid_fetch_object(Z_OBJ_P(zv)))
# define Z_MANAGER_OBJ_P(zv) (php_manager_fetch_object(Z_OBJ_P(zv)))
# define Z_QUERY_OBJ_P(zv) (php_query_fetch_object(Z_OBJ_P(zv)))
# define Z_READCONCERN_OBJ_P(zv) (php_readconcern_fetch_object(Z_OBJ_P(zv)))
# define Z_READPREFERENCE_OBJ_P(zv) (php_readpreference_fetch_object(Z_OBJ_P(zv)))
# define Z_SERVER_OBJ_P(zv) (php_server_fetch_object(Z_OBJ_P(zv)))
+# define Z_SESSION_OBJ_P(zv) (php_session_fetch_object(Z_OBJ_P(zv)))
# define Z_BULKWRITE_OBJ_P(zv) (php_bulkwrite_fetch_object(Z_OBJ_P(zv)))
# define Z_WRITECONCERN_OBJ_P(zv) (php_writeconcern_fetch_object(Z_OBJ_P(zv)))
# define Z_WRITECONCERNERROR_OBJ_P(zv) (php_writeconcernerror_fetch_object(Z_OBJ_P(zv)))
# define Z_WRITEERROR_OBJ_P(zv) (php_writeerror_fetch_object(Z_OBJ_P(zv)))
# define Z_WRITERESULT_OBJ_P(zv) (php_writeresult_fetch_object(Z_OBJ_P(zv)))
# define Z_BINARY_OBJ_P(zv) (php_binary_fetch_object(Z_OBJ_P(zv)))
+# define Z_DBPOINTER_OBJ_P(zv) (php_dbpointer_fetch_object(Z_OBJ_P(zv)))
# define Z_DECIMAL128_OBJ_P(zv) (php_decimal128_fetch_object(Z_OBJ_P(zv)))
# define Z_JAVASCRIPT_OBJ_P(zv) (php_javascript_fetch_object(Z_OBJ_P(zv)))
# define Z_MAXKEY_OBJ_P(zv) (php_maxkey_fetch_object(Z_OBJ_P(zv)))
# define Z_MINKEY_OBJ_P(zv) (php_minkey_fetch_object(Z_OBJ_P(zv)))
# define Z_OBJECTID_OBJ_P(zv) (php_objectid_fetch_object(Z_OBJ_P(zv)))
# define Z_REGEX_OBJ_P(zv) (php_regex_fetch_object(Z_OBJ_P(zv)))
+# define Z_SYMBOL_OBJ_P(zv) (php_symbol_fetch_object(Z_OBJ_P(zv)))
# define Z_TIMESTAMP_OBJ_P(zv) (php_timestamp_fetch_object(Z_OBJ_P(zv)))
+# define Z_UNDEFINED_OBJ_P(zv) (php_undefined_fetch_object(Z_OBJ_P(zv)))
# define Z_UTCDATETIME_OBJ_P(zv) (php_utcdatetime_fetch_object(Z_OBJ_P(zv)))
# define Z_COMMANDFAILEDEVENT_OBJ_P(zv) (php_commandfailedevent_fetch_object(Z_OBJ_P(zv)))
# define Z_COMMANDSTARTEDEVENT_OBJ_P(zv) (php_commandstartedevent_fetch_object(Z_OBJ_P(zv)))
# define Z_COMMANDSUCCEEDEDEVENT_OBJ_P(zv) (php_commandsucceededevent_fetch_object(Z_OBJ_P(zv)))
# define Z_OBJ_COMMAND(zo) (php_command_fetch_object(zo))
# define Z_OBJ_CURSOR(zo) (php_cursor_fetch_object(zo))
# define Z_OBJ_CURSORID(zo) (php_cursorid_fetch_object(zo))
# define Z_OBJ_MANAGER(zo) (php_manager_fetch_object(zo))
# define Z_OBJ_QUERY(zo) (php_query_fetch_object(zo))
# define Z_OBJ_READCONCERN(zo) (php_readconcern_fetch_object(zo))
# define Z_OBJ_READPREFERENCE(zo) (php_readpreference_fetch_object(zo))
# define Z_OBJ_SERVER(zo) (php_server_fetch_object(zo))
+# define Z_OBJ_SESSION(zo) (php_session_fetch_object(zo))
# define Z_OBJ_BULKWRITE(zo) (php_bulkwrite_fetch_object(zo))
# define Z_OBJ_WRITECONCERN(zo) (php_writeconcern_fetch_object(zo))
# define Z_OBJ_WRITECONCERNERROR(zo) (php_writeconcernerror_fetch_object(zo))
# define Z_OBJ_WRITEERROR(zo) (php_writeerror_fetch_object(zo))
# define Z_OBJ_WRITERESULT(zo) (php_writeresult_fetch_object(zo))
# define Z_OBJ_BINARY(zo) (php_binary_fetch_object(zo))
+# define Z_OBJ_DBPOINTER(zo) (php_dbpointer_fetch_object(zo))
# define Z_OBJ_DECIMAL128(zo) (php_decimal128_fetch_object(zo))
# define Z_OBJ_JAVASCRIPT(zo) (php_javascript_fetch_object(zo))
# define Z_OBJ_MAXKEY(zo) (php_maxkey_fetch_object(zo))
# define Z_OBJ_MINKEY(zo) (php_minkey_fetch_object(zo))
# define Z_OBJ_OBJECTID(zo) (php_objectid_fetch_object(zo))
# define Z_OBJ_REGEX(zo) (php_regex_fetch_object(zo))
+# define Z_OBJ_SYMBOL(zo) (php_symbol_fetch_object(zo))
# define Z_OBJ_TIMESTAMP(zo) (php_timestamp_fetch_object(zo))
+# define Z_OBJ_UNDEFINED(zo) (php_undefined_fetch_object(zo))
# define Z_OBJ_UTCDATETIME(zo) (php_utcdatetime_fetch_object(zo))
# define Z_OBJ_COMMANDFAILEDEVENT(zo) (php_commandfailedevent_fetch_object(zo))
# define Z_OBJ_COMMANDSTARTEDEVENT(zo) (php_commandstartedevent_fetch_object(zo))
# define Z_OBJ_COMMANDSUCCEEDEDEVENT(zo) (php_commandsucceededevent_fetch_object(zo))
#else
# define Z_COMMAND_OBJ_P(zv) ((php_phongo_command_t *)zend_object_store_get_object(zv TSRMLS_CC))
# define Z_CURSOR_OBJ_P(zv) ((php_phongo_cursor_t *)zend_object_store_get_object(zv TSRMLS_CC))
# define Z_CURSORID_OBJ_P(zv) ((php_phongo_cursorid_t *)zend_object_store_get_object(zv TSRMLS_CC))
# define Z_MANAGER_OBJ_P(zv) ((php_phongo_manager_t *)zend_object_store_get_object(zv TSRMLS_CC))
# define Z_QUERY_OBJ_P(zv) ((php_phongo_query_t *)zend_object_store_get_object(zv TSRMLS_CC))
# define Z_READCONCERN_OBJ_P(zv) ((php_phongo_readconcern_t *)zend_object_store_get_object(zv TSRMLS_CC))
# define Z_READPREFERENCE_OBJ_P(zv) ((php_phongo_readpreference_t *)zend_object_store_get_object(zv TSRMLS_CC))
# define Z_SERVER_OBJ_P(zv) ((php_phongo_server_t *)zend_object_store_get_object(zv TSRMLS_CC))
+# define Z_SESSION_OBJ_P(zv) ((php_phongo_session_t *)zend_object_store_get_object(zv TSRMLS_CC))
# define Z_BULKWRITE_OBJ_P(zv) ((php_phongo_bulkwrite_t *)zend_object_store_get_object(zv TSRMLS_CC))
# define Z_WRITECONCERN_OBJ_P(zv) ((php_phongo_writeconcern_t *)zend_object_store_get_object(zv TSRMLS_CC))
# define Z_WRITECONCERNERROR_OBJ_P(zv) ((php_phongo_writeconcernerror_t *)zend_object_store_get_object(zv TSRMLS_CC))
# define Z_WRITEERROR_OBJ_P(zv) ((php_phongo_writeerror_t *)zend_object_store_get_object(zv TSRMLS_CC))
# define Z_WRITERESULT_OBJ_P(zv) ((php_phongo_writeresult_t *)zend_object_store_get_object(zv TSRMLS_CC))
# define Z_BINARY_OBJ_P(zv) ((php_phongo_binary_t *)zend_object_store_get_object(zv TSRMLS_CC))
+# define Z_DBPOINTER_OBJ_P(zv) ((php_phongo_dbpointer_t *)zend_object_store_get_object(zv TSRMLS_CC))
# define Z_DECIMAL128_OBJ_P(zv) ((php_phongo_decimal128_t *)zend_object_store_get_object(zv TSRMLS_CC))
# define Z_JAVASCRIPT_OBJ_P(zv) ((php_phongo_javascript_t *)zend_object_store_get_object(zv TSRMLS_CC))
# define Z_MAXKEY_OBJ_P(zv) ((php_phongo_maxkey_t *)zend_object_store_get_object(zv TSRMLS_CC))
# define Z_MINKEY_OBJ_P(zv) ((php_phongo_minkey_t *)zend_object_store_get_object(zv TSRMLS_CC))
# define Z_OBJECTID_OBJ_P(zv) ((php_phongo_objectid_t *)zend_object_store_get_object(zv TSRMLS_CC))
# define Z_REGEX_OBJ_P(zv) ((php_phongo_regex_t *)zend_object_store_get_object(zv TSRMLS_CC))
+# define Z_SYMBOL_OBJ_P(zv) ((php_phongo_symbol_t *)zend_object_store_get_object(zv TSRMLS_CC))
# define Z_TIMESTAMP_OBJ_P(zv) ((php_phongo_timestamp_t *)zend_object_store_get_object(zv TSRMLS_CC))
+# define Z_UNDEFINED_OBJ_P(zv) ((php_phongo_undefined_t *)zend_object_store_get_object(zv TSRMLS_CC))
# define Z_UTCDATETIME_OBJ_P(zv) ((php_phongo_utcdatetime_t *)zend_object_store_get_object(zv TSRMLS_CC))
# define Z_COMMANDFAILEDEVENT_OBJ_P(zv) ((php_phongo_commandfailedevent_t *)zend_object_store_get_object(zv TSRMLS_CC))
# define Z_COMMANDSTARTEDEVENT_OBJ_P(zv) ((php_phongo_commandstartedevent_t *)zend_object_store_get_object(zv TSRMLS_CC))
# define Z_COMMANDSUCCEEDEDEVENT_OBJ_P(zv) ((php_phongo_commandsucceededevent_t *)zend_object_store_get_object(zv TSRMLS_CC))
# define Z_OBJ_COMMAND(zo) ((php_phongo_command_t *)zo)
# define Z_OBJ_CURSOR(zo) ((php_phongo_cursor_t *)zo)
# define Z_OBJ_CURSORID(zo) ((php_phongo_cursorid_t *)zo)
# define Z_OBJ_MANAGER(zo) ((php_phongo_manager_t *)zo)
# define Z_OBJ_QUERY(zo) ((php_phongo_query_t *)zo)
# define Z_OBJ_READCONCERN(zo) ((php_phongo_readconcern_t *)zo)
# define Z_OBJ_READPREFERENCE(zo) ((php_phongo_readpreference_t *)zo)
# define Z_OBJ_SERVER(zo) ((php_phongo_server_t *)zo)
+# define Z_OBJ_SESSION(zo) ((php_phongo_session_t *)zo)
# define Z_OBJ_BULKWRITE(zo) ((php_phongo_bulkwrite_t *)zo)
# define Z_OBJ_WRITECONCERN(zo) ((php_phongo_writeconcern_t *)zo)
# define Z_OBJ_WRITECONCERNERROR(zo) ((php_phongo_writeconcernerror_t *)zo)
# define Z_OBJ_WRITEERROR(zo) ((php_phongo_writeerror_t *)zo)
# define Z_OBJ_WRITERESULT(zo) ((php_phongo_writeresult_t *)zo)
# define Z_OBJ_BINARY(zo) ((php_phongo_binary_t *)zo)
+# define Z_OBJ_DBPOINTER(zo) ((php_phongo_dbpointer_t *)zo)
# define Z_OBJ_DECIMAL128(zo) ((php_phongo_decimal128_t *)zo)
# define Z_OBJ_JAVASCRIPT(zo) ((php_phongo_javascript_t *)zo)
# define Z_OBJ_MAXKEY(zo) ((php_phongo_maxkey_t *)zo)
# define Z_OBJ_MINKEY(zo) ((php_phongo_minkey_t *)zo)
# define Z_OBJ_OBJECTID(zo) ((php_phongo_objectid_t *)zo)
# define Z_OBJ_REGEX(zo) ((php_phongo_regex_t *)zo)
+# define Z_OBJ_SYMBOL(zo) ((php_phongo_symbol_t *)zo)
# define Z_OBJ_TIMESTAMP(zo) ((php_phongo_timestamp_t *)zo)
+# define Z_OBJ_UNDEFINED(zo) ((php_phongo_undefined_t *)zo)
# define Z_OBJ_UTCDATETIME(zo) ((php_phongo_utcdatetime_t *)zo)
# define Z_OBJ_COMMANDFAILEDEVENT(zo) ((php_phongo_commandfailedevent_t *)zo)
# define Z_OBJ_COMMANDSTARTEDEVENT(zo) ((php_phongo_commandstartedevent_t *)zo)
# define Z_OBJ_COMMANDSUCCEEDEDEVENT(zo) ((php_phongo_commandsucceededevent_t *)zo)
#endif
typedef struct {
zend_object_iterator intern;
php_phongo_cursor_t *cursor;
} php_phongo_cursor_iterator;
extern zend_class_entry *php_phongo_command_ce;
extern zend_class_entry *php_phongo_cursor_ce;
extern zend_class_entry *php_phongo_cursorid_ce;
extern zend_class_entry *php_phongo_manager_ce;
extern zend_class_entry *php_phongo_query_ce;
extern zend_class_entry *php_phongo_readconcern_ce;
extern zend_class_entry *php_phongo_readpreference_ce;
extern zend_class_entry *php_phongo_server_ce;
+extern zend_class_entry *php_phongo_session_ce;
extern zend_class_entry *php_phongo_bulkwrite_ce;
extern zend_class_entry *php_phongo_writeconcern_ce;
extern zend_class_entry *php_phongo_writeconcernerror_ce;
extern zend_class_entry *php_phongo_writeerror_ce;
extern zend_class_entry *php_phongo_writeresult_ce;
extern zend_class_entry *php_phongo_exception_ce;
extern zend_class_entry *php_phongo_logicexception_ce;
extern zend_class_entry *php_phongo_runtimeexception_ce;
extern zend_class_entry *php_phongo_unexpectedvalueexception_ce;
extern zend_class_entry *php_phongo_invalidargumentexception_ce;
extern zend_class_entry *php_phongo_connectionexception_ce;
extern zend_class_entry *php_phongo_authenticationexception_ce;
extern zend_class_entry *php_phongo_sslconnectionexception_ce;
extern zend_class_entry *php_phongo_executiontimeoutexception_ce;
extern zend_class_entry *php_phongo_connectiontimeoutexception_ce;
extern zend_class_entry *php_phongo_writeexception_ce;
extern zend_class_entry *php_phongo_bulkwriteexception_ce;
extern zend_class_entry *php_phongo_type_ce;
extern zend_class_entry *php_phongo_persistable_ce;
extern zend_class_entry *php_phongo_unserializable_ce;
extern zend_class_entry *php_phongo_serializable_ce;
extern zend_class_entry *php_phongo_binary_ce;
+extern zend_class_entry *php_phongo_dbpointer_ce;
extern zend_class_entry *php_phongo_decimal128_ce;
extern zend_class_entry *php_phongo_javascript_ce;
extern zend_class_entry *php_phongo_maxkey_ce;
extern zend_class_entry *php_phongo_minkey_ce;
extern zend_class_entry *php_phongo_objectid_ce;
extern zend_class_entry *php_phongo_regex_ce;
+extern zend_class_entry *php_phongo_symbol_ce;
extern zend_class_entry *php_phongo_timestamp_ce;
+extern zend_class_entry *php_phongo_undefined_ce;
extern zend_class_entry *php_phongo_utcdatetime_ce;
extern zend_class_entry *php_phongo_binary_interface_ce;
extern zend_class_entry *php_phongo_decimal128_interface_ce;
extern zend_class_entry *php_phongo_javascript_interface_ce;
extern zend_class_entry *php_phongo_maxkey_interface_ce;
extern zend_class_entry *php_phongo_minkey_interface_ce;
extern zend_class_entry *php_phongo_objectid_interface_ce;
extern zend_class_entry *php_phongo_regex_interface_ce;
extern zend_class_entry *php_phongo_timestamp_interface_ce;
extern zend_class_entry *php_phongo_utcdatetime_interface_ce;
extern zend_class_entry *php_phongo_commandfailedevent_ce;
extern zend_class_entry *php_phongo_commandstartedevent_ce;
extern zend_class_entry *php_phongo_commandsubscriber_ce;
extern zend_class_entry *php_phongo_commandsucceededevent_ce;
extern zend_class_entry *php_phongo_subscriber_ce;
extern void php_phongo_binary_init_ce(INIT_FUNC_ARGS);
+extern void php_phongo_dbpointer_init_ce(INIT_FUNC_ARGS);
extern void php_phongo_decimal128_init_ce(INIT_FUNC_ARGS);
extern void php_phongo_javascript_init_ce(INIT_FUNC_ARGS);
extern void php_phongo_maxkey_init_ce(INIT_FUNC_ARGS);
extern void php_phongo_minkey_init_ce(INIT_FUNC_ARGS);
extern void php_phongo_objectid_init_ce(INIT_FUNC_ARGS);
extern void php_phongo_persistable_init_ce(INIT_FUNC_ARGS);
extern void php_phongo_regex_init_ce(INIT_FUNC_ARGS);
extern void php_phongo_serializable_init_ce(INIT_FUNC_ARGS);
+extern void php_phongo_symbol_init_ce(INIT_FUNC_ARGS);
extern void php_phongo_timestamp_init_ce(INIT_FUNC_ARGS);
extern void php_phongo_type_init_ce(INIT_FUNC_ARGS);
-extern void php_phongo_utcdatetime_init_ce(INIT_FUNC_ARGS);
+extern void php_phongo_undefined_init_ce(INIT_FUNC_ARGS);
extern void php_phongo_unserializable_init_ce(INIT_FUNC_ARGS);
+extern void php_phongo_utcdatetime_init_ce(INIT_FUNC_ARGS);
extern void php_phongo_binary_interface_init_ce(INIT_FUNC_ARGS);
extern void php_phongo_decimal128_interface_init_ce(INIT_FUNC_ARGS);
extern void php_phongo_javascript_interface_init_ce(INIT_FUNC_ARGS);
extern void php_phongo_maxkey_interface_init_ce(INIT_FUNC_ARGS);
extern void php_phongo_minkey_interface_init_ce(INIT_FUNC_ARGS);
extern void php_phongo_objectid_interface_init_ce(INIT_FUNC_ARGS);
extern void php_phongo_regex_interface_init_ce(INIT_FUNC_ARGS);
extern void php_phongo_timestamp_interface_init_ce(INIT_FUNC_ARGS);
extern void php_phongo_utcdatetime_interface_init_ce(INIT_FUNC_ARGS);
extern void php_phongo_bulkwrite_init_ce(INIT_FUNC_ARGS);
extern void php_phongo_command_init_ce(INIT_FUNC_ARGS);
extern void php_phongo_cursor_init_ce(INIT_FUNC_ARGS);
extern void php_phongo_cursorid_init_ce(INIT_FUNC_ARGS);
extern void php_phongo_manager_init_ce(INIT_FUNC_ARGS);
extern void php_phongo_query_init_ce(INIT_FUNC_ARGS);
extern void php_phongo_readconcern_init_ce(INIT_FUNC_ARGS);
extern void php_phongo_readpreference_init_ce(INIT_FUNC_ARGS);
extern void php_phongo_server_init_ce(INIT_FUNC_ARGS);
+extern void php_phongo_session_init_ce(INIT_FUNC_ARGS);
extern void php_phongo_writeconcern_init_ce(INIT_FUNC_ARGS);
extern void php_phongo_writeconcernerror_init_ce(INIT_FUNC_ARGS);
extern void php_phongo_writeerror_init_ce(INIT_FUNC_ARGS);
extern void php_phongo_writeresult_init_ce(INIT_FUNC_ARGS);
extern void php_phongo_authenticationexception_init_ce(INIT_FUNC_ARGS);
extern void php_phongo_bulkwriteexception_init_ce(INIT_FUNC_ARGS);
extern void php_phongo_connectionexception_init_ce(INIT_FUNC_ARGS);
extern void php_phongo_connectiontimeoutexception_init_ce(INIT_FUNC_ARGS);
extern void php_phongo_exception_init_ce(INIT_FUNC_ARGS);
extern void php_phongo_executiontimeoutexception_init_ce(INIT_FUNC_ARGS);
extern void php_phongo_invalidargumentexception_init_ce(INIT_FUNC_ARGS);
extern void php_phongo_logicexception_init_ce(INIT_FUNC_ARGS);
extern void php_phongo_runtimeexception_init_ce(INIT_FUNC_ARGS);
extern void php_phongo_sslconnectionexception_init_ce(INIT_FUNC_ARGS);
extern void php_phongo_unexpectedvalueexception_init_ce(INIT_FUNC_ARGS);
extern void php_phongo_writeexception_init_ce(INIT_FUNC_ARGS);
extern void php_phongo_commandfailedevent_init_ce(INIT_FUNC_ARGS);
extern void php_phongo_commandstartedevent_init_ce(INIT_FUNC_ARGS);
extern void php_phongo_commandsubscriber_init_ce(INIT_FUNC_ARGS);
extern void php_phongo_commandsucceededevent_init_ce(INIT_FUNC_ARGS);
extern void php_phongo_subscriber_init_ce(INIT_FUNC_ARGS);
/* Shared function entries for disabling constructors and unserialize() */
PHP_FUNCTION(MongoDB_disabled___construct);
PHP_FUNCTION(MongoDB_disabled___wakeup);
#endif /* PHONGO_CLASSES_H */
/*
* Local variables:
* tab-width: 4
* c-basic-offset: 4
* End:
* vim600: noet sw=4 ts=4 fdm=marker
* vim<600: noet sw=4 ts=4
*/
diff --git a/mongodb-1.3.4/php_phongo_structs.h b/mongodb-1.4.2/php_phongo_structs.h
similarity index 88%
rename from mongodb-1.3.4/php_phongo_structs.h
rename to mongodb-1.4.2/php_phongo_structs.h
index 52ab8f60..d96a3953 100644
--- a/mongodb-1.3.4/php_phongo_structs.h
+++ b/mongodb-1.4.2/php_phongo_structs.h
@@ -1,260 +1,291 @@
/*
* Copyright 2015-2017 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef PHONGO_STRUCTS_H
#define PHONGO_STRUCTS_H
#include <php.h>
#include "php_bson.h"
#if PHP_VERSION_ID >= 70000
# define PHONGO_ZEND_OBJECT_PRE
# define PHONGO_ZEND_OBJECT_POST zend_object std;
# define PHONGO_STRUCT_ZVAL zval
#else
# define PHONGO_ZEND_OBJECT_PRE zend_object std;
# define PHONGO_ZEND_OBJECT_POST
# define PHONGO_STRUCT_ZVAL zval*
#endif
typedef struct {
PHONGO_ZEND_OBJECT_PRE
mongoc_bulk_operation_t *bulk;
size_t num_ops;
bool ordered;
int bypass;
char *database;
char *collection;
bool executed;
PHONGO_ZEND_OBJECT_POST
} php_phongo_bulkwrite_t;
typedef struct {
PHONGO_ZEND_OBJECT_PRE
- bson_t *bson;
+ bson_t *bson;
+ uint32_t max_await_time_ms;
+ uint32_t batch_size;
PHONGO_ZEND_OBJECT_POST
} php_phongo_command_t;
typedef struct {
PHONGO_ZEND_OBJECT_PRE
mongoc_cursor_t *cursor;
mongoc_client_t *client;
- int server_id;
+ uint32_t server_id;
+ bool advanced;
php_phongo_bson_state visitor_data;
int got_iterator;
long current;
char *database;
char *collection;
PHONGO_STRUCT_ZVAL query;
PHONGO_STRUCT_ZVAL command;
PHONGO_STRUCT_ZVAL read_preference;
PHONGO_ZEND_OBJECT_POST
} php_phongo_cursor_t;
typedef struct {
PHONGO_ZEND_OBJECT_PRE
int64_t id;
PHONGO_ZEND_OBJECT_POST
} php_phongo_cursorid_t;
typedef struct {
PHONGO_ZEND_OBJECT_PRE
mongoc_client_t *client;
PHONGO_ZEND_OBJECT_POST
} php_phongo_manager_t;
typedef struct {
PHONGO_ZEND_OBJECT_PRE
bson_t *filter;
bson_t *opts;
mongoc_read_concern_t *read_concern;
uint32_t max_await_time_ms;
PHONGO_ZEND_OBJECT_POST
} php_phongo_query_t;
typedef struct {
PHONGO_ZEND_OBJECT_PRE
mongoc_read_concern_t *read_concern;
PHONGO_ZEND_OBJECT_POST
} php_phongo_readconcern_t;
typedef struct {
PHONGO_ZEND_OBJECT_PRE
mongoc_read_prefs_t *read_preference;
PHONGO_ZEND_OBJECT_POST
} php_phongo_readpreference_t;
typedef struct {
PHONGO_ZEND_OBJECT_PRE
mongoc_client_t *client;
- int server_id;
+ uint32_t server_id;
PHONGO_ZEND_OBJECT_POST
} php_phongo_server_t;
+typedef struct {
+ PHONGO_ZEND_OBJECT_PRE
+ mongoc_client_session_t *client_session;
+ PHONGO_ZEND_OBJECT_POST
+} php_phongo_session_t;
+
typedef struct {
PHONGO_ZEND_OBJECT_PRE
mongoc_write_concern_t *write_concern;
PHONGO_ZEND_OBJECT_POST
} php_phongo_writeconcern_t;
typedef struct {
PHONGO_ZEND_OBJECT_PRE
int code;
char *message;
PHONGO_STRUCT_ZVAL info;
PHONGO_ZEND_OBJECT_POST
} php_phongo_writeconcernerror_t;
typedef struct {
PHONGO_ZEND_OBJECT_PRE
int code;
char *message;
PHONGO_STRUCT_ZVAL info;
uint32_t index;
PHONGO_ZEND_OBJECT_POST
} php_phongo_writeerror_t;
typedef struct {
PHONGO_ZEND_OBJECT_PRE
mongoc_write_concern_t *write_concern;
bson_t *reply;
mongoc_client_t *client;
- int server_id;
+ uint32_t server_id;
PHONGO_ZEND_OBJECT_POST
} php_phongo_writeresult_t;
typedef struct {
PHONGO_ZEND_OBJECT_PRE
char *data;
int data_len;
uint8_t type;
HashTable *properties;
PHONGO_ZEND_OBJECT_POST
} php_phongo_binary_t;
+typedef struct {
+ PHONGO_ZEND_OBJECT_PRE
+ char *ref;
+ size_t ref_len;
+ char id[25];
+ HashTable *properties;
+ PHONGO_ZEND_OBJECT_POST
+} php_phongo_dbpointer_t;
+
typedef struct {
PHONGO_ZEND_OBJECT_PRE
bool initialized;
bson_decimal128_t decimal;
HashTable *properties;
PHONGO_ZEND_OBJECT_POST
} php_phongo_decimal128_t;
typedef struct {
PHONGO_ZEND_OBJECT_PRE
char *code;
size_t code_len;
bson_t *scope;
HashTable *properties;
PHONGO_ZEND_OBJECT_POST
} php_phongo_javascript_t;
typedef struct {
PHONGO_ZEND_OBJECT_PRE
PHONGO_ZEND_OBJECT_POST
} php_phongo_maxkey_t;
typedef struct {
PHONGO_ZEND_OBJECT_PRE
PHONGO_ZEND_OBJECT_POST
} php_phongo_minkey_t;
typedef struct {
PHONGO_ZEND_OBJECT_PRE
bool initialized;
char oid[25];
HashTable *properties;
PHONGO_ZEND_OBJECT_POST
} php_phongo_objectid_t;
typedef struct {
PHONGO_ZEND_OBJECT_PRE
char *pattern;
int pattern_len;
char *flags;
int flags_len;
HashTable *properties;
PHONGO_ZEND_OBJECT_POST
} php_phongo_regex_t;
+typedef struct {
+ PHONGO_ZEND_OBJECT_PRE
+ char *symbol;
+ size_t symbol_len;
+ HashTable *properties;
+ PHONGO_ZEND_OBJECT_POST
+} php_phongo_symbol_t;
+
typedef struct {
PHONGO_ZEND_OBJECT_PRE
bool initialized;
uint32_t increment;
uint32_t timestamp;
HashTable *properties;
PHONGO_ZEND_OBJECT_POST
} php_phongo_timestamp_t;
+typedef struct {
+ PHONGO_ZEND_OBJECT_PRE
+ PHONGO_ZEND_OBJECT_POST
+} php_phongo_undefined_t;
+
typedef struct {
PHONGO_ZEND_OBJECT_PRE
bool initialized;
int64_t milliseconds;
HashTable *properties;
PHONGO_ZEND_OBJECT_POST
} php_phongo_utcdatetime_t;
typedef struct {
PHONGO_ZEND_OBJECT_PRE
mongoc_client_t *client;
char *command_name;
uint32_t server_id;
uint64_t operation_id;
uint64_t request_id;
uint64_t duration_micros;
PHONGO_STRUCT_ZVAL z_error;
PHONGO_ZEND_OBJECT_POST
} php_phongo_commandfailedevent_t;
typedef struct {
PHONGO_ZEND_OBJECT_PRE
mongoc_client_t *client;
char *command_name;
uint32_t server_id;
uint64_t operation_id;
uint64_t request_id;
bson_t *command;
char *database_name;
PHONGO_ZEND_OBJECT_POST
} php_phongo_commandstartedevent_t;
typedef struct {
PHONGO_ZEND_OBJECT_PRE
mongoc_client_t *client;
char *command_name;
uint32_t server_id;
uint64_t operation_id;
uint64_t request_id;
uint64_t duration_micros;
bson_t *reply;
PHONGO_ZEND_OBJECT_POST
} php_phongo_commandsucceededevent_t;
#undef PHONGO_ZEND_OBJECT_PRE
#undef PHONGO_ZEND_OBJECT_POST
#undef PHONGO_STRUCT_ZVAL
#endif /* PHONGO_STRUCTS */
/*
* Local variables:
* tab-width: 4
* c-basic-offset: 4
* End:
* vim600: noet sw=4 ts=4 fdm=marker
* vim<600: noet sw=4 ts=4
*/
diff --git a/mongodb-1.3.4/src/libmongoc/build/autotools/CheckHost.m4 b/mongodb-1.4.2/scripts/build/autotools/CheckHost.m4
similarity index 80%
copy from mongodb-1.3.4/src/libmongoc/build/autotools/CheckHost.m4
copy to mongodb-1.4.2/scripts/build/autotools/CheckHost.m4
index b48dc349..a5b7fa83 100644
--- a/mongodb-1.3.4/src/libmongoc/build/autotools/CheckHost.m4
+++ b/mongodb-1.4.2/scripts/build/autotools/CheckHost.m4
@@ -1,54 +1,54 @@
AC_CANONICAL_HOST
os_win32=no
os_netbsd=no
os_freebsd=no
os_openbsd=no
os_hpux=no
os_linux=no
os_solaris=no
os_darwin=no
os_gnu=no
case "$host" in
*-mingw*|*-*-cygwin*)
os_win32=yes
TARGET_OS=windows
;;
*-*-*netbsd*)
os_netbsd=yes
- TARGET_OS=unix
+ ARGET_OS=unix
;;
*-*-*freebsd*)
os_freebsd=yes
- TARGET_OS=unix
+ TARGET_OS=unix
;;
*-*-*openbsd*)
os_openbsd=yes
- TARGET_OS=unix
+ TARGET_OS=unix
;;
*-*-hpux*)
os_hpux=yes
- TARGET_OS=unix
+ TARGET_OS=unix
;;
*-*-linux*)
os_linux=yes
os_gnu=yes
- TARGET_OS=unix
+ TARGET_OS=unix
;;
*-*-solaris*)
os_solaris=yes
- TARGET_OS=unix
+ TARGET_OS=unix
;;
*-*-darwin*)
os_darwin=yes
- TARGET_OS=unix
+ TARGET_OS=unix
;;
gnu*|k*bsd*-gnu*)
os_gnu=yes
- TARGET_OS=unix
+ TARGET_OS=unix
;;
*)
AC_MSG_WARN([*** Please add $host to configure.ac checks!])
;;
esac
diff --git a/mongodb-1.4.2/scripts/build/autotools/CheckSSL.m4 b/mongodb-1.4.2/scripts/build/autotools/CheckSSL.m4
new file mode 100644
index 00000000..e45bdd94
--- /dev/null
+++ b/mongodb-1.4.2/scripts/build/autotools/CheckSSL.m4
@@ -0,0 +1,218 @@
+PHP_ARG_WITH([mongodb-ssl],
+ [whether to enable crypto and TLS],
+ [AS_HELP_STRING([--with-mongodb-ssl=@<:@auto/no/openssl/libressl/darwin@:>@],
+ [MongoDB: Enable TLS connections and SCRAM-SHA-1 authentication [default=auto]])],
+ [auto],
+ [no])
+
+PHP_ARG_WITH([openssl-dir],
+ [deprecated option for OpenSSL library path],
+ [AC_HELP_STRING([--with-openssl-dir=@<:@auto/DIR@:>@],
+ [MongoDB: OpenSSL library path (deprecated for pkg-config) [default=auto]])],
+ [auto],
+ [no])
+
+AS_IF([test "$PHP_MONGODB_SSL" = "openssl" -o "$PHP_MONGODB_SSL" = "auto"],[
+ found_openssl="no"
+
+ PKG_CHECK_MODULES([PHP_MONGODB_SSL],[openssl],[
+ PHP_EVAL_INCLINE([$PHP_MONGODB_SSL_CFLAGS])
+ PHP_EVAL_LIBLINE([$PHP_MONGODB_SSL_LIBS],[MONGODB_SHARED_LIBADD])
+ PHP_MONGODB_SSL="openssl"
+ found_openssl="yes"
+
+ old_CFLAGS="$CFLAGS"
+ CFLAGS="$PHP_MONGODB_SSL_CFLAGS $CFLAGS"
+
+ AC_CHECK_DECLS([ASN1_STRING_get0_data],
+ [have_ASN1_STRING_get0_data="yes"],
+ [have_ASN1_STRING_get0_data="no"],
+ [[#include <openssl/asn1.h>]])
+
+ CFLAGS="$old_CFLAGS"
+ ],[
+ unset OPENSSL_INCDIR
+ unset OPENSSL_LIBDIR
+
+ dnl Use a list of directories from PHP_SETUP_OPENSSL by default.
+ dnl Support documented "auto" and older, undocumented "yes" options
+ if test "$PHP_OPENSSL_DIR" = "auto" -o "$PHP_OPENSSL_DIR" = "yes"; then
+ PHP_OPENSSL_DIR="/usr/local/ssl /usr/local /usr /usr/local/openssl"
+ fi
+
+ for i in $PHP_OPENSSL_DIR; do
+ if test -r $i/include/openssl/evp.h; then
+ OPENSSL_INCDIR="$i/include"
+ fi
+ if test -r $i/$PHP_LIBDIR/libssl.a -o -r $i/$PHP_LIBDIR/libssl.$SHLIB_SUFFIX_NAME; then
+ OPENSSL_LIBDIR="$i/$PHP_LIBDIR"
+ fi
+ test -n "$OPENSSL_INCDIR" && test -n "$OPENSSL_LIBDIR" && break
+ done
+
+ if test -n "$OPENSSL_LIBDIR"; then
+ OPENSSL_LIBDIR_LDFLAG="-L$OPENSSL_LIBDIR"
+ fi
+
+ PHP_CHECK_LIBRARY([crypto],
+ [EVP_DigestInit_ex],
+ [have_crypto_lib="yes"],
+ [have_crypto_lib="no"],
+ [$OPENSSL_LIBDIR_LDFLAG])
+ PHP_CHECK_LIBRARY([ssl],
+ [SSL_library_init],
+ [have_ssl_lib="yes"],
+ [have_ssl_lib="no"],
+ [$OPENSSL_LIBDIR_LDFLAG -lcrypto])
+
+ if test "$have_ssl_lib" = "yes" -a "$have_crypto_lib" = "yes"; then
+ PHP_ADD_LIBRARY([ssl],,[MONGODB_SHARED_LIBADD])
+ PHP_ADD_LIBRARY([crypto],,[MONGODB_SHARED_LIBADD])
+
+ if test -n "$OPENSSL_LIBDIR"; then
+ PHP_ADD_LIBPATH([$OPENSSL_LIBDIR],[MONGODB_SHARED_LIBADD])
+ fi
+
+ if test -n "$OPENSSL_INCDIR"; then
+ PHP_ADD_INCLUDE($OPENSSL_INCDIR)
+ fi
+
+ old_CFLAGS="$CFLAGS"
+ CFLAGS="-I$OPENSSL_INCDIR $CFLAGS"
+
+ AC_CHECK_DECLS([ASN1_STRING_get0_data],
+ [have_ASN1_STRING_get0_data="yes"],
+ [have_ASN1_STRING_get0_data="no"],
+ [[#include <openssl/asn1.h>]])
+
+ CFLAGS="$old_CFLAGS"
+
+ PHP_MONGODB_SSL="openssl"
+ found_openssl="yes"
+ fi
+ ])
+
+ if test "$PHP_MONGODB_SSL" = "openssl" -a "$found_openssl" != "yes"; then
+ AC_MSG_ERROR([OpenSSL libraries and development headers could not be found])
+ fi
+])
+
+AS_IF([test "$PHP_MONGODB_SSL" = "libressl" -o "$PHP_MONGODB_SSL" = "auto"],[
+ found_libressl="no"
+
+ PKG_CHECK_MODULES([PHP_MONGODB_SSL],[libtls libcrypto],[
+ PHP_EVAL_INCLINE([$PHP_MONGODB_SSL_CFLAGS])
+ PHP_EVAL_LIBLINE([$PHP_MONGODB_SSL_LIBS],[MONGODB_SHARED_LIBADD])
+ PHP_MONGODB_SSL="libressl"
+ found_libressl="yes"
+ ],[
+ PHP_CHECK_LIBRARY([crypto],
+ [EVP_DigestInit_ex],
+ [have_crypto_lib="yes"],
+ [have_crypto_lib="no"])
+ PHP_CHECK_LIBRARY([tls],
+ [tls_init],
+ [have_ssl_lib="yes"],
+ [have_ssl_lib="no"],
+ [-lcrypto])
+
+ if test "$have_ssl_lib" = "yes" -a "$have_crypto_lib" = "yes"; then
+ PHP_ADD_LIBRARY([tls],,[MONGODB_SHARED_LIBADD])
+ PHP_ADD_LIBRARY([crypto],,[MONGODB_SHARED_LIBADD])
+ PHP_MONGODB_SSL="libressl"
+ found_libressl="yes"
+ fi
+ ])
+
+ if test "$PHP_MONGODB_SSL" = "libressl" -a "$found_libressl" != "yes"; then
+ AC_MSG_ERROR([LibreSSL libraries and development headers could not be found])
+ fi
+])
+
+AS_IF([test "$PHP_MONGODB_SSL" = "darwin" -o \( "$PHP_MONGODB_SSL" = "auto" -a "$os_darwin" = "yes" \)],[
+ if test "$os_darwin" = "no"; then
+ AC_MSG_ERROR([Darwin SSL is only supported on macOS])
+ fi
+ dnl PHP_FRAMEWORKS is only used for SAPI builds, so use MONGODB_SHARED_LIBADD for shared builds
+ if test "$ext_shared" = "yes"; then
+ MONGODB_SHARED_LIBADD="-framework Security -framework CoreFoundation $MONGODB_SHARED_LIBADD"
+ else
+ PHP_ADD_FRAMEWORK([Security])
+ PHP_ADD_FRAMEWORK([CoreFoundation])
+ fi
+ PHP_MONGODB_SSL="darwin"
+])
+
+AS_IF([test "$PHP_MONGODB_SSL" = "auto"],[
+ PHP_MONGODB_SSL="no"
+])
+
+AC_MSG_CHECKING([which TLS library to use])
+AC_MSG_RESULT([$PHP_MONGODB_SSL])
+
+dnl Disable Windows SSL and crypto
+AC_SUBST(MONGOC_ENABLE_SSL_SECURE_CHANNEL, 0)
+AC_SUBST(MONGOC_ENABLE_CRYPTO_CNG, 0)
+
+if test "$PHP_MONGODB_SSL" = "openssl" -o "$PHP_MONGODB_SSL" = "libressl" -o "$PHP_MONGODB_SSL" = "darwin"; then
+ AC_SUBST(MONGOC_ENABLE_SSL, 1)
+ AC_SUBST(MONGOC_ENABLE_CRYPTO, 1)
+ if test "$PHP_MONGODB_SSL" = "darwin"; then
+ AC_SUBST(MONGOC_ENABLE_SSL_OPENSSL, 0)
+ AC_SUBST(MONGOC_ENABLE_SSL_LIBRESSL, 0)
+ AC_SUBST(MONGOC_ENABLE_SSL_SECURE_TRANSPORT, 1)
+ AC_SUBST(MONGOC_ENABLE_CRYPTO_LIBCRYPTO, 0)
+ AC_SUBST(MONGOC_ENABLE_CRYPTO_COMMON_CRYPTO, 1)
+ elif test "$PHP_MONGODB_SSL" = "openssl"; then
+ AC_SUBST(MONGOC_ENABLE_SSL_OPENSSL, 1)
+ AC_SUBST(MONGOC_ENABLE_SSL_LIBRESSL, 0)
+ AC_SUBST(MONGOC_ENABLE_SSL_SECURE_TRANSPORT, 0)
+ AC_SUBST(MONGOC_ENABLE_CRYPTO_LIBCRYPTO, 1)
+ AC_SUBST(MONGOC_ENABLE_CRYPTO_COMMON_CRYPTO, 0)
+ elif test "$PHP_MONGODB_SSL" = "libressl"; then
+ AC_SUBST(MONGOC_ENABLE_SSL_OPENSSL, 0)
+ AC_SUBST(MONGOC_ENABLE_SSL_LIBRESSL, 1)
+ AC_SUBST(MONGOC_ENABLE_SSL_SECURE_TRANSPORT, 0)
+ AC_SUBST(MONGOC_ENABLE_CRYPTO_LIBCRYPTO, 1)
+ AC_SUBST(MONGOC_ENABLE_CRYPTO_COMMON_CRYPTO, 0)
+ fi
+else
+ AC_SUBST(MONGOC_ENABLE_SSL, 0)
+ AC_SUBST(MONGOC_ENABLE_SSL_LIBRESSL, 0)
+ AC_SUBST(MONGOC_ENABLE_SSL_OPENSSL, 0)
+ AC_SUBST(MONGOC_ENABLE_SSL_SECURE_TRANSPORT, 0)
+ AC_SUBST(MONGOC_ENABLE_CRYPTO, 0)
+ AC_SUBST(MONGOC_ENABLE_CRYPTO_LIBCRYPTO, 0)
+ AC_SUBST(MONGOC_ENABLE_CRYPTO_COMMON_CRYPTO, 0)
+fi
+
+if test "x$have_ASN1_STRING_get0_data" = "xyes"; then
+ AC_SUBST(MONGOC_HAVE_ASN1_STRING_GET0_DATA, 1)
+else
+ AC_SUBST(MONGOC_HAVE_ASN1_STRING_GET0_DATA, 0)
+fi
+
+PHP_ARG_ENABLE([mongodb-crypto-system-profile],
+ [whether to use system crypto profile],
+ [AC_HELP_STRING([--enable-mongodb-crypto-system-profile],
+ [MongoDB: Use system crypto profile (OpenSSL only) [default=no]])],
+ [no],
+ [no])
+
+PHP_ARG_WITH([system-ciphers],
+ [deprecated option for whether to use system crypto profile],
+ AC_HELP_STRING([--enable-system-ciphers],
+ [MongoDB: whether to use system crypto profile (deprecated for --enable-mongodb-crypto-system-profile) [default=no]]),
+ [no],
+ [no])
+
+dnl Also consider the deprecated --enable-system-ciphers option
+if test "$PHP_MONGODB_CRYPTO_SYSTEM_PROFILE" = "yes" -o "$PHP_SYSTEM_CIPHERS" = "yes"; then
+ if test "$PHP_MONGODB_SSL" = "openssl"; then
+ AC_SUBST(MONGOC_ENABLE_CRYPTO_SYSTEM_PROFILE, 1)
+ else
+ AC_MSG_ERROR([System crypto profile is only available with OpenSSL])
+ fi
+else
+ AC_SUBST(MONGOC_ENABLE_CRYPTO_SYSTEM_PROFILE, 0)
+fi
diff --git a/mongodb-1.3.4/src/libbson/build/autotools/m4/pkg.m4 b/mongodb-1.4.2/scripts/build/autotools/m4/pkg.m4
similarity index 100%
copy from mongodb-1.3.4/src/libbson/build/autotools/m4/pkg.m4
copy to mongodb-1.4.2/scripts/build/autotools/m4/pkg.m4
diff --git a/mongodb-1.3.4/scripts/centos/essentials.sh b/mongodb-1.4.2/scripts/centos/essentials.sh
similarity index 100%
rename from mongodb-1.3.4/scripts/centos/essentials.sh
rename to mongodb-1.4.2/scripts/centos/essentials.sh
diff --git a/mongodb-1.3.4/scripts/centos/ldap/Domain.ldif b/mongodb-1.4.2/scripts/centos/ldap/Domain.ldif
similarity index 100%
rename from mongodb-1.3.4/scripts/centos/ldap/Domain.ldif
rename to mongodb-1.4.2/scripts/centos/ldap/Domain.ldif
diff --git a/mongodb-1.3.4/scripts/centos/ldap/Users.ldif b/mongodb-1.4.2/scripts/centos/ldap/Users.ldif
similarity index 100%
rename from mongodb-1.3.4/scripts/centos/ldap/Users.ldif
rename to mongodb-1.4.2/scripts/centos/ldap/Users.ldif
diff --git a/mongodb-1.3.4/scripts/centos/ldap/basics.ldif b/mongodb-1.4.2/scripts/centos/ldap/basics.ldif
similarity index 100%
rename from mongodb-1.3.4/scripts/centos/ldap/basics.ldif
rename to mongodb-1.4.2/scripts/centos/ldap/basics.ldif
diff --git a/mongodb-1.3.4/scripts/centos/ldap/install.sh b/mongodb-1.4.2/scripts/centos/ldap/install.sh
similarity index 100%
rename from mongodb-1.3.4/scripts/centos/ldap/install.sh
rename to mongodb-1.4.2/scripts/centos/ldap/install.sh
diff --git a/mongodb-1.3.4/scripts/centos/ldap/ldapconfig.py b/mongodb-1.4.2/scripts/centos/ldap/ldapconfig.py
similarity index 100%
rename from mongodb-1.3.4/scripts/centos/ldap/ldapconfig.py
rename to mongodb-1.4.2/scripts/centos/ldap/ldapconfig.py
diff --git a/mongodb-1.3.4/scripts/centos/ldap/mongod.ldif b/mongodb-1.4.2/scripts/centos/ldap/mongod.ldif
similarity index 100%
rename from mongodb-1.3.4/scripts/centos/ldap/mongod.ldif
rename to mongodb-1.4.2/scripts/centos/ldap/mongod.ldif
diff --git a/mongodb-1.3.4/scripts/centos/ldap/pw.ldif b/mongodb-1.4.2/scripts/centos/ldap/pw.ldif
similarity index 100%
rename from mongodb-1.3.4/scripts/centos/ldap/pw.ldif
rename to mongodb-1.4.2/scripts/centos/ldap/pw.ldif
diff --git a/mongodb-1.3.4/scripts/centos/ldap/saslauthd.conf b/mongodb-1.4.2/scripts/centos/ldap/saslauthd.conf
similarity index 100%
rename from mongodb-1.3.4/scripts/centos/ldap/saslauthd.conf
rename to mongodb-1.4.2/scripts/centos/ldap/saslauthd.conf
diff --git a/mongodb-1.3.4/scripts/centos/ldap/users b/mongodb-1.4.2/scripts/centos/ldap/users
similarity index 100%
rename from mongodb-1.3.4/scripts/centos/ldap/users
rename to mongodb-1.4.2/scripts/centos/ldap/users
diff --git a/mongodb-1.3.4/scripts/convert-bson-corpus-tests.php b/mongodb-1.4.2/scripts/convert-bson-corpus-tests.php
similarity index 94%
rename from mongodb-1.3.4/scripts/convert-bson-corpus-tests.php
rename to mongodb-1.4.2/scripts/convert-bson-corpus-tests.php
index 7f72410b..d33b9843 100644
--- a/mongodb-1.3.4/scripts/convert-bson-corpus-tests.php
+++ b/mongodb-1.4.2/scripts/convert-bson-corpus-tests.php
@@ -1,300 +1,303 @@
<?php
require_once __DIR__ . '/../tests/utils/tools.php';
$expectedFailures = [
'Double type: 1.23456789012345677E+18' => 'Variation in double\'s string representation (SPEC-850)',
'Double type: -1.23456789012345677E+18' => 'Variation in double\'s string representation (SPEC-850)',
'Int64 type: -1' => 'PHP encodes integers as 32-bit if range allows',
'Int64 type: 0' => 'PHP encodes integers as 32-bit if range allows',
'Int64 type: 1' => 'PHP encodes integers as 32-bit if range allows',
'Javascript Code with Scope: bad scope doc (field has bad string length)' => 'Depends on PHPC-889',
'Javascript Code with Scope: Unicode and embedded null in code string, empty scope' => 'Embedded null in code string is not supported in libbson (CDRIVER-1879)',
'Multiple types within the same document: All BSON types' => 'PHP encodes integers as 32-bit if range allows',
'Top-level document validity: Bad $date (number, not string or hash)' => 'Legacy extended JSON $date syntax uses numbers (CDRIVER-2223)',
];
+$for64bitOnly = [
+ 'Int64 type: MinValue' => "Can't represent 64-bit ints on a 32-bit platform",
+ 'Int64 type: MaxValue' => "Can't represent 64-bit ints on a 32-bit platform",
+];
+
$outputPath = realpath(__DIR__ . '/../tests') . '/bson-corpus/';
if ( ! is_dir($outputPath) && ! mkdir($outputPath, 0755, true)) {
printf("Error creating output path: %s\n", $outputPath);
}
foreach (array_slice($argv, 1) as $inputFile) {
if ( ! is_readable($inputFile) || ! is_file($inputFile)) {
printf("Error reading %s\n", $inputFile);
continue;
}
$test = json_decode(file_get_contents($inputFile), true);
if (json_last_error() !== JSON_ERROR_NONE) {
printf("Error decoding %s: %s\n", $inputFile, json_last_error_msg());
continue;
}
if ( ! isset($test['description'])) {
printf("Skipping test file without \"description\" field: %s\n", $inputFile);
continue;
}
- if ( ! empty($test['deprecated'])) {
- printf("Skipping deprecated test file: %s\n", $inputFile);
- continue;
- }
-
if ( ! empty($test['valid'])) {
foreach ($test['valid'] as $i => $case) {
$outputFile = sprintf('%s-valid-%03d.phpt', pathinfo($inputFile, PATHINFO_FILENAME), $i + 1);
try {
- $output = renderPhpt(getParamsForValid($test, $case), $expectedFailures);
+ $output = renderPhpt(getParamsForValid($test, $case), $expectedFailures, $for64bitOnly);
} catch (Exception $e) {
printf("Error processing valid[%d] in %s: %s\n", $i, $inputFile, $e->getMessage());
continue;
}
if (false === file_put_contents($outputPath . '/' . $outputFile, $output)) {
printf("Error writing valid[%d] in %s\n", $i, $inputFile);
continue;
}
}
}
if ( ! empty($test['decodeErrors'])) {
foreach ($test['decodeErrors'] as $i => $case) {
$outputFile = sprintf('%s-decodeError-%03d.phpt', pathinfo($inputFile, PATHINFO_FILENAME), $i + 1);
try {
- $output = renderPhpt(getParamsForDecodeError($test, $case), $expectedFailures);
+ $output = renderPhpt(getParamsForDecodeError($test, $case), $expectedFailures, $for64bitOnly);
} catch (Exception $e) {
printf("Error processing decodeErrors[%d] in %s: %s\n", $i, $inputFile, $e->getMessage());
continue;
}
if (false === file_put_contents($outputPath . '/' . $outputFile, $output)) {
printf("Error writing decodeErrors[%d] in %s\n", $i, $inputFile);
continue;
}
}
}
if ( ! empty($test['parseErrors'])) {
foreach ($test['parseErrors'] as $i => $case) {
$outputFile = sprintf('%s-parseError-%03d.phpt', pathinfo($inputFile, PATHINFO_FILENAME), $i + 1);
try {
- $output = renderPhpt(getParamsForParseError($test, $case), $expectedFailures);
+ $output = renderPhpt(getParamsForParseError($test, $case), $expectedFailures, $for64bitOnly);
} catch (Exception $e) {
printf("Error processing parseErrors[%d] in %s: %s\n", $i, $inputFile, $e->getMessage());
continue;
}
if (false === file_put_contents($outputPath . '/' . $outputFile, $output)) {
printf("Error writing parseErrors[%d] in %s\n", $i, $inputFile);
continue;
}
}
}
}
function getParamsForValid(array $test, array $case)
{
foreach (['description', 'canonical_bson', 'canonical_extjson'] as $field) {
if (!isset($case[$field])) {
throw new InvalidArgumentException(sprintf('Missing "%s" field', $field));
}
}
$code = '';
$expect = '';
$lossy = isset($case['lossy']) ? (boolean) $case['lossy'] : false;
$canonicalBson = $case['canonical_bson'];
$expectedCanonicalBson = strtolower($canonicalBson);
$code .= sprintf('$canonicalBson = hex2bin(%s);', var_export($canonicalBson, true)) . "\n";
if (isset($case['degenerate_bson'])) {
$degenerateBson = $case['degenerate_bson'];
$expectedDegenerateBson = strtolower($degenerateBson);
$code .= sprintf('$degenerateBson = hex2bin(%s);', var_export($degenerateBson, true)) . "\n";
}
if (isset($case['converted_bson'])) {
$convertedBson = $case['converted_bson'];
$expectedConvertedBson = strtolower($convertedBson);
$code .= sprintf('$convertedBson = hex2bin(%s);', var_export($convertedBson, true)) . "\n";
}
$canonicalExtJson = $case['canonical_extjson'];
$expectedCanonicalExtJson = json_canonicalize($canonicalExtJson);
$code .= sprintf('$canonicalExtJson = %s;', var_export($canonicalExtJson, true)) . "\n";
if (isset($case['relaxed_extjson'])) {
$relaxedExtJson = $case['relaxed_extjson'];
$expectedRelaxedExtJson = json_canonicalize($relaxedExtJson);
$code .= sprintf('$relaxedExtJson = %s;', var_export($relaxedExtJson, true)) . "\n";
}
if (isset($case['degenerate_extjson'])) {
$degenerateExtJson = $case['degenerate_extjson'];
$expectedDegenerateExtJson = json_canonicalize($degenerateExtJson);
$code .= sprintf('$degenerateExtJson = %s;', var_export($degenerateExtJson, true)) . "\n";
}
if (isset($case['converted_extjson'])) {
$convertedExtJson = $case['converted_extjson'];
$expectedConvertedExtJson = json_canonicalize($convertedExtJson);
$code .= sprintf('$convertedExtJson = %s;', var_export($convertedExtJson, true)) . "\n";
}
$code .= "\n// Canonical BSON -> Native -> Canonical BSON \n";
$code .= 'echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n";' . "\n";
$expect .= $expectedCanonicalBson . "\n";
$code .= "\n// Canonical BSON -> Canonical extJSON \n";
$code .= 'echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n";' . "\n";;
$expect .= $expectedCanonicalExtJson . "\n";
if (isset($relaxedExtJson)) {
$code .= "\n// Canonical BSON -> Relaxed extJSON \n";
$code .= 'echo json_canonicalize(toRelaxedExtendedJSON($canonicalBson)), "\n";' . "\n";;
$expect .= $expectedRelaxedExtJson . "\n";
}
if (!$lossy) {
$code .= "\n// Canonical extJSON -> Canonical BSON \n";
$code .= 'echo bin2hex(fromJSON($canonicalExtJson)), "\n";' . "\n";
$expect .= $expectedCanonicalBson . "\n";
}
if (isset($degenerateBson)) {
$code .= "\n// Degenerate BSON -> Native -> Canonical BSON \n";
$code .= 'echo bin2hex(fromPHP(toPHP($degenerateBson))), "\n";' . "\n";
$expect .= $expectedCanonicalBson . "\n";
$code .= "\n// Degenerate BSON -> Canonical extJSON \n";
$code .= 'echo json_canonicalize(toCanonicalExtendedJSON($degenerateBson)), "\n";' . "\n";;
$expect .= $expectedCanonicalExtJson . "\n";
if (isset($relaxedExtJson)) {
$code .= "\n// Degenerate BSON -> Relaxed extJSON \n";
$code .= 'echo json_canonicalize(toRelaxedExtendedJSON($degenerateBson)), "\n";' . "\n";;
$expect .= $expectedRelaxedExtJson . "\n";
}
}
if (isset($degenerateExtJson) && !$lossy) {
$code .= "\n// Degenerate extJSON -> Canonical BSON \n";
$code .= 'echo bin2hex(fromJSON($degenerateExtJson)), "\n";' . "\n";
$expect .= $expectedCanonicalBson . "\n";
}
if (isset($relaxedExtJson)) {
$code .= "\n// Relaxed extJSON -> BSON -> Relaxed extJSON \n";
$code .= 'echo json_canonicalize(toRelaxedExtendedJSON(fromJSON($relaxedExtJson))), "\n";' . "\n";
$expect .= $expectedRelaxedExtJson . "\n";
}
return [
'%NAME%' => sprintf('%s: %s', trim($test['description']), trim($case['description'])),
'%CODE%' => trim($code),
'%EXPECT%' => trim($expect),
];
}
function getParamsForDecodeError(array $test, array $case)
{
foreach (['description', 'bson'] as $field) {
if (!isset($case[$field])) {
throw new InvalidArgumentException(sprintf('Missing "%s" field', $field));
}
}
$code = sprintf('$bson = hex2bin(%s);', var_export($case['bson'], true)) . "\n\n";
$code .= "throws(function() use (\$bson) {\n";
$code .= " var_dump(toPHP(\$bson));\n";
$code .= "}, 'MongoDB\Driver\Exception\UnexpectedValueException');";
/* We do not test for the exception message, since that may differ based on
* the nature of the decoding error. */
$expect = "OK: Got MongoDB\Driver\Exception\UnexpectedValueException";
return [
'%NAME%' => sprintf('%s: %s', trim($test['description']), trim($case['description'])),
'%CODE%' => trim($code),
'%EXPECT%' => trim($expect),
];
}
function getParamsForParseError(array $test, array $case)
{
foreach (['description', 'string'] as $field) {
if (!isset($case[$field])) {
throw new InvalidArgumentException(sprintf('Missing "%s" field', $field));
}
}
$code = '';
$expect = '';
switch ($test['bson_type']) {
case '0x00': // Top-level document
$code = "throws(function() {\n";
$code .= sprintf(" fromJSON(%s);\n", var_export($case['string'], true));
$code .= "}, 'MongoDB\Driver\Exception\UnexpectedValueException');";
/* We do not test for the exception message, since that may differ
* based on the nature of the parse error. */
$expect = "OK: Got MongoDB\Driver\Exception\UnexpectedValueException";
break;
case '0x13': // Decimal128
$code = "throws(function() {\n";
$code .= sprintf(" new MongoDB\BSON\Decimal128(%s);\n", var_export($case['string'], true));
$code .= "}, 'MongoDB\Driver\Exception\InvalidArgumentException');";
/* We do not test for the exception message, since that may differ
* based on the nature of the parse error. */
$expect = "OK: Got MongoDB\Driver\Exception\InvalidArgumentException";
break;
default:
throw new UnexpectedValueException(sprintf("Parse errors not supported for BSON type: %s", $test['bson_type']));
}
return [
'%NAME%' => sprintf('%s: %s', trim($test['description']), trim($case['description'])),
'%CODE%' => trim($code),
'%EXPECT%' => trim($expect),
];
}
-function renderPhpt(array $params, array $expectedFailures)
+function renderPhpt(array $params, array $expectedFailures, array $for64bitOnly)
{
$params['%XFAIL%'] = isset($expectedFailures[$params['%NAME%']])
? "--XFAIL--\n" . $expectedFailures[$params['%NAME%']] . "\n"
: '';
+ $params['%SKIPIF%'] = isset($for64bitOnly[$params['%NAME%']])
+ ? "--SKIPIF--\n" . "<?php if (PHP_INT_SIZE !== 8) { die(\"skip {$for64bitOnly[$params['%NAME%']]}\"); } ?>" . "\n"
+ : '';
$template = <<< 'TEMPLATE'
--TEST--
%NAME%
-%XFAIL%--DESCRIPTION--
+%XFAIL%%SKIPIF%--DESCRIPTION--
Generated by scripts/convert-bson-corpus-tests.php
DO NOT EDIT THIS FILE
--FILE--
<?php
require_once __DIR__ . '/../utils/tools.php';
%CODE%
?>
===DONE===
<?php exit(0); ?>
--EXPECT--
%EXPECT%
===DONE===
TEMPLATE;
return str_replace(array_keys($params), array_values($params), $template);
}
diff --git a/mongodb-1.3.4/scripts/convert-mo-tests.php b/mongodb-1.4.2/scripts/convert-mo-tests.php
similarity index 100%
rename from mongodb-1.3.4/scripts/convert-mo-tests.php
rename to mongodb-1.4.2/scripts/convert-mo-tests.php
diff --git a/mongodb-1.3.4/scripts/freebsd/essentials.sh b/mongodb-1.4.2/scripts/freebsd/essentials.sh
similarity index 100%
rename from mongodb-1.3.4/scripts/freebsd/essentials.sh
rename to mongodb-1.4.2/scripts/freebsd/essentials.sh
diff --git a/mongodb-1.3.4/scripts/freebsd/phongo.sh b/mongodb-1.4.2/scripts/freebsd/phongo.sh
similarity index 100%
rename from mongodb-1.3.4/scripts/freebsd/phongo.sh
rename to mongodb-1.4.2/scripts/freebsd/phongo.sh
diff --git a/mongodb-1.3.4/scripts/list-servers.php b/mongodb-1.4.2/scripts/list-servers.php
similarity index 100%
rename from mongodb-1.3.4/scripts/list-servers.php
rename to mongodb-1.4.2/scripts/list-servers.php
diff --git a/mongodb-1.3.4/scripts/presets/replicaset-30.json b/mongodb-1.4.2/scripts/presets/replicaset-30.json
similarity index 94%
rename from mongodb-1.3.4/scripts/presets/replicaset-30.json
rename to mongodb-1.4.2/scripts/presets/replicaset-30.json
index 86a47c4d..65c5d1f2 100644
--- a/mongodb-1.3.4/scripts/presets/replicaset-30.json
+++ b/mongodb-1.4.2/scripts/presets/replicaset-30.json
@@ -1,74 +1,71 @@
{
"id": "REPLICASET_30",
"name": "mongod",
"members": [
{
"procParams": {
"dbpath": "/tmp/REPLICASET/3100/",
"ipv6": true,
"logappend": true,
"logpath": "/tmp/REPLICASET/3100/mongod.log",
- "nohttpinterface": true,
"journal": true,
"noprealloc": true,
"nssize": 1,
"port": 3100,
"smallfiles": true,
"setParameter": {"enableTestCommands": 1}
},
"rsParams": {
"priority": 99,
"tags": {
"ordinal": "one",
"dc": "pa"
}
},
"server_id": "RS-30-one"
},
{
"procParams": {
"dbpath": "/tmp/REPLICASET/3101/",
"ipv6": true,
"logappend": true,
"logpath": "/tmp/REPLICASET/3101/mongod.log",
- "nohttpinterface": true,
"journal": true,
"noprealloc": true,
"nssize": 1,
"port": 3101,
"smallfiles": true,
"setParameter": {"enableTestCommands": 1}
},
"rsParams": {
"priority": 1.1,
"tags": {
"ordinal": "two",
"dc": "nyc"
}
},
"server_id": "RS-30-two"
},
{
"procParams": {
"dbpath": "/tmp/REPLICASET/3102/",
"ipv6": true,
"logappend": true,
"logpath": "/tmp/REPLICASET/3002/mongod.log",
- "nohttpinterface": true,
"journal": true,
"noprealloc": true,
"nssize": 1,
"port": 3102,
"smallfiles": true,
"setParameter": {"enableTestCommands": 1}
},
"rsParams": {
"arbiterOnly": true
},
"server_id": "RS-30-arbiter"
}
],
"version": "30-release"
}
diff --git a/mongodb-1.3.4/scripts/presets/replicaset.json b/mongodb-1.4.2/scripts/presets/replicaset-dns.json
similarity index 51%
copy from mongodb-1.3.4/scripts/presets/replicaset.json
copy to mongodb-1.4.2/scripts/presets/replicaset-dns.json
index 765d9718..9850d726 100644
--- a/mongodb-1.3.4/scripts/presets/replicaset.json
+++ b/mongodb-1.4.2/scripts/presets/replicaset-dns.json
@@ -1,73 +1,62 @@
{
- "id": "REPLICASET",
+ "id": "REPLICASET_DNS",
"name": "mongod",
"members": [
{
"procParams": {
- "dbpath": "/tmp/REPLICASET/3000/",
+ "dbpath": "/tmp/REPLICASET/27017/",
"ipv6": true,
"logappend": true,
- "logpath": "/tmp/REPLICASET/3000/mongod.log",
- "nohttpinterface": true,
+ "logpath": "/tmp/REPLICASET/27017/mongod.log",
"journal": true,
"noprealloc": true,
"nssize": 1,
- "port": 3000,
+ "port": 27017,
"smallfiles": true,
"setParameter": {"enableTestCommands": 1}
},
"rsParams": {
- "priority": 99,
- "tags": {
- "ordinal": "one",
- "dc": "pa"
- }
+ "priority": 1
},
- "server_id": "RS-one"
+ "server_id": "DNS-one"
},
{
"procParams": {
- "dbpath": "/tmp/REPLICASET/3001/",
+ "dbpath": "/tmp/REPLICASET/27018/",
"ipv6": true,
"logappend": true,
- "logpath": "/tmp/REPLICASET/3001/mongod.log",
- "nohttpinterface": true,
+ "logpath": "/tmp/REPLICASET/27018/mongod.log",
"journal": true,
"noprealloc": true,
"nssize": 1,
- "port": 3001,
+ "port": 27018,
"smallfiles": true,
"setParameter": {"enableTestCommands": 1}
},
"rsParams": {
- "priority": 1.1,
- "tags": {
- "ordinal": "two",
- "dc": "nyc"
- }
+ "priority": 1
},
- "server_id": "RS-two"
+ "server_id": "DNS-two"
},
{
"procParams": {
- "dbpath": "/tmp/REPLICASET/3002/",
+ "dbpath": "/tmp/REPLICASET/27019/",
"ipv6": true,
"logappend": true,
- "logpath": "/tmp/REPLICASET/3002/mongod.log",
- "nohttpinterface": true,
+ "logpath": "/tmp/REPLICASET/27019/mongod.log",
"journal": true,
"noprealloc": true,
"nssize": 1,
- "port": 3002,
+ "port": 27019,
"smallfiles": true,
"setParameter": {"enableTestCommands": 1}
},
"rsParams": {
- "arbiterOnly": true
+ "priority": 1
},
- "server_id": "RS-arbiter"
+ "server_id": "DNS-three"
}
]
}
diff --git a/mongodb-1.3.4/scripts/presets/replicaset.json b/mongodb-1.4.2/scripts/presets/replicaset.json
similarity index 93%
rename from mongodb-1.3.4/scripts/presets/replicaset.json
rename to mongodb-1.4.2/scripts/presets/replicaset.json
index 765d9718..88299b41 100644
--- a/mongodb-1.3.4/scripts/presets/replicaset.json
+++ b/mongodb-1.4.2/scripts/presets/replicaset.json
@@ -1,73 +1,70 @@
{
"id": "REPLICASET",
"name": "mongod",
"members": [
{
"procParams": {
"dbpath": "/tmp/REPLICASET/3000/",
"ipv6": true,
"logappend": true,
"logpath": "/tmp/REPLICASET/3000/mongod.log",
- "nohttpinterface": true,
"journal": true,
"noprealloc": true,
"nssize": 1,
"port": 3000,
"smallfiles": true,
"setParameter": {"enableTestCommands": 1}
},
"rsParams": {
"priority": 99,
"tags": {
"ordinal": "one",
"dc": "pa"
}
},
"server_id": "RS-one"
},
{
"procParams": {
"dbpath": "/tmp/REPLICASET/3001/",
"ipv6": true,
"logappend": true,
"logpath": "/tmp/REPLICASET/3001/mongod.log",
- "nohttpinterface": true,
"journal": true,
"noprealloc": true,
"nssize": 1,
"port": 3001,
"smallfiles": true,
"setParameter": {"enableTestCommands": 1}
},
"rsParams": {
"priority": 1.1,
"tags": {
"ordinal": "two",
"dc": "nyc"
}
},
"server_id": "RS-two"
},
{
"procParams": {
"dbpath": "/tmp/REPLICASET/3002/",
"ipv6": true,
"logappend": true,
"logpath": "/tmp/REPLICASET/3002/mongod.log",
- "nohttpinterface": true,
"journal": true,
"noprealloc": true,
"nssize": 1,
"port": 3002,
"smallfiles": true,
"setParameter": {"enableTestCommands": 1}
},
"rsParams": {
"arbiterOnly": true
},
"server_id": "RS-arbiter"
}
]
}
diff --git a/mongodb-1.3.4/scripts/presets/standalone-30.json b/mongodb-1.4.2/scripts/presets/standalone-30.json
similarity index 100%
rename from mongodb-1.3.4/scripts/presets/standalone-30.json
rename to mongodb-1.4.2/scripts/presets/standalone-30.json
diff --git a/mongodb-1.3.4/scripts/presets/standalone-auth.json b/mongodb-1.4.2/scripts/presets/standalone-auth.json
similarity index 100%
rename from mongodb-1.3.4/scripts/presets/standalone-auth.json
rename to mongodb-1.4.2/scripts/presets/standalone-auth.json
diff --git a/mongodb-1.3.4/scripts/presets/standalone-plain.json b/mongodb-1.4.2/scripts/presets/standalone-plain.json
similarity index 100%
rename from mongodb-1.3.4/scripts/presets/standalone-plain.json
rename to mongodb-1.4.2/scripts/presets/standalone-plain.json
diff --git a/mongodb-1.3.4/scripts/presets/standalone-ssl.json b/mongodb-1.4.2/scripts/presets/standalone-ssl.json
similarity index 93%
rename from mongodb-1.3.4/scripts/presets/standalone-ssl.json
rename to mongodb-1.4.2/scripts/presets/standalone-ssl.json
index cca1f2f7..54534b4e 100644
--- a/mongodb-1.3.4/scripts/presets/standalone-ssl.json
+++ b/mongodb-1.4.2/scripts/presets/standalone-ssl.json
@@ -1,20 +1,20 @@
{
"name": "mongod",
"id" : "STANDALONE_SSL",
"procParams": {
"dbpath": "/tmp/standalone-ssl/",
"ipv6": true,
"logappend": true,
"logpath": "/tmp/standalone-ssl/m.log",
"journal": true,
"port": 2100,
"setParameter": {"enableTestCommands": 1}
},
"sslParams": {
- "sslMode": "requireSSL",
+ "sslMode": "requireSSL",
"sslCAFile": "/phongo/scripts/ssl/ca.pem",
"sslPEMKeyFile": "/phongo/scripts/ssl/server.pem",
"sslWeakCertificateValidation": true
}
}
diff --git a/mongodb-1.3.4/scripts/presets/standalone-x509.json b/mongodb-1.4.2/scripts/presets/standalone-x509.json
similarity index 95%
rename from mongodb-1.3.4/scripts/presets/standalone-x509.json
rename to mongodb-1.4.2/scripts/presets/standalone-x509.json
index bbbe9874..20cf0365 100644
--- a/mongodb-1.3.4/scripts/presets/standalone-x509.json
+++ b/mongodb-1.4.2/scripts/presets/standalone-x509.json
@@ -1,22 +1,22 @@
{
"name": "mongod",
"id" : "STANDALONE_X509",
"authSource": "$external",
"login": "C=US,ST=New York,L=New York City,O=MongoDB,OU=KernelUser,CN=client",
"procParams": {
"dbpath": "/tmp/standalone-x509/",
"ipv6": true,
"logappend": true,
"logpath": "/tmp/standalone-x509/m.log",
"journal": true,
"port": 2300,
"setParameter": {"enableTestCommands": 1, "authenticationMechanisms": "MONGODB-X509"}
},
"sslParams": {
- "sslMode": "requireSSL",
+ "sslMode": "requireSSL",
"sslCAFile": "/phongo/scripts/ssl/ca.pem",
"sslPEMKeyFile": "/phongo/scripts/ssl/server.pem",
"sslWeakCertificateValidation": true
}
}
diff --git a/mongodb-1.3.4/scripts/presets/standalone.json b/mongodb-1.4.2/scripts/presets/standalone.json
similarity index 100%
rename from mongodb-1.3.4/scripts/presets/standalone.json
rename to mongodb-1.4.2/scripts/presets/standalone.json
diff --git a/mongodb-1.3.4/scripts/run-tests-on.sh b/mongodb-1.4.2/scripts/run-tests-on.sh
similarity index 100%
rename from mongodb-1.3.4/scripts/run-tests-on.sh
rename to mongodb-1.4.2/scripts/run-tests-on.sh
diff --git a/mongodb-1.3.4/scripts/ssl/ca.pem b/mongodb-1.4.2/scripts/ssl/ca.pem
similarity index 100%
rename from mongodb-1.3.4/scripts/ssl/ca.pem
rename to mongodb-1.4.2/scripts/ssl/ca.pem
diff --git a/mongodb-1.3.4/scripts/ssl/client.pem b/mongodb-1.4.2/scripts/ssl/client.pem
similarity index 100%
rename from mongodb-1.3.4/scripts/ssl/client.pem
rename to mongodb-1.4.2/scripts/ssl/client.pem
diff --git a/mongodb-1.3.4/scripts/ssl/crl.pem b/mongodb-1.4.2/scripts/ssl/crl.pem
similarity index 100%
rename from mongodb-1.3.4/scripts/ssl/crl.pem
rename to mongodb-1.4.2/scripts/ssl/crl.pem
diff --git a/mongodb-1.3.4/scripts/ssl/server.pem b/mongodb-1.4.2/scripts/ssl/server.pem
similarity index 100%
rename from mongodb-1.3.4/scripts/ssl/server.pem
rename to mongodb-1.4.2/scripts/ssl/server.pem
diff --git a/mongodb-1.3.4/scripts/start-servers.php b/mongodb-1.4.2/scripts/start-servers.php
similarity index 94%
rename from mongodb-1.3.4/scripts/start-servers.php
rename to mongodb-1.4.2/scripts/start-servers.php
index 634373ac..54082238 100644
--- a/mongodb-1.3.4/scripts/start-servers.php
+++ b/mongodb-1.4.2/scripts/start-servers.php
@@ -1,151 +1,148 @@
<?php
require __DIR__ . "/" . "../tests/utils/tools.php";
$SERVERS = array();
$FILENAME = sys_get_temp_dir() . "/PHONGO-SERVERS.json";
ini_set("default_socket_timeout", 60000);
function lap() {
static $then = 0;
static $now;
$now = microtime(true);
$ret = $now - $then;
$then = $now;
return $ret;
}
$PRESETS = [
"standalone" => [
"scripts/presets/standalone.json",
- "scripts/presets/standalone-24.json",
- "scripts/presets/standalone-26.json",
"scripts/presets/standalone-30.json",
"scripts/presets/standalone-ssl.json",
"scripts/presets/standalone-auth.json",
"scripts/presets/standalone-x509.json",
"scripts/presets/standalone-plain.json",
],
"replicasets" => [
"scripts/presets/replicaset.json",
"scripts/presets/replicaset-30.json",
+ "scripts/presets/replicaset-dns.json",
],
];
function make_ctx($preset, $method = "POST") {
$opts = [
"http" => [
"timeout" => 60,
"method" => $method,
"header" => "Accept: application/json\r\n" .
"Content-type: application/x-www-form-urlencoded",
"content" => json_encode(array("preset" => $preset)),
"ignore_errors" => true,
],
];
$ctx = stream_context_create($opts);
return $ctx;
}
function failed($result) {
echo "\n\n";
echo join("\n", $result);
printf("Last operation took: %.2f secs\n", lap());
exit();
}
function mo_http_request($uri, $context) {
global $http_response_header;
$result = file_get_contents($uri, false, $context);
if ($result === false) {
failed($http_response_header);
}
return $result;
}
printf("Cleaning out previous processes, if any ");
lap();
/* Remove all pre-existing ReplicaSets */
$replicasets = mo_http_request(getMOUri() . "/replica_sets", make_ctx(getMOPresetBase(), "GET"));
$replicasets = json_decode($replicasets, true);
foreach($replicasets["replica_sets"] as $replicaset) {
$uri = getMOUri() . "/replica_sets/" . $replicaset["id"];
mo_http_request($uri, make_ctx(getMOPresetBase(), "DELETE"));
echo ".";
}
echo " ";
/* Remove all pre-existing servers */
$servers = mo_http_request(getMOUri() . "/servers", make_ctx(getMOPresetBase(), "GET"));
$servers = json_decode($servers, true);
foreach($servers["servers"] as $server) {
$uri = getMOUri() . "/servers/" . $server["id"];
mo_http_request($uri, make_ctx(getMOPresetBase(), "DELETE"));
echo ".";
}
printf("\t(took: %.2f secs)\n", lap());
foreach($PRESETS["standalone"] as $preset) {
lap();
$json = json_decode(file_get_contents($preset), true);
printf("Starting %-20s ... ", $json["id"]);
$result = mo_http_request(getMOUri() . "/servers", make_ctx(getMOPresetBase() . $preset));
$decode = json_decode($result, true);
if (!isset($decode["id"])) {
failed($decode);
}
$SERVERS[$decode["id"]] = isset($decode["mongodb_auth_uri"]) ? $decode["mongodb_auth_uri"] : $decode["mongodb_uri"];
printf("'%s'\t(took: %.2f secs)\n", $SERVERS[$decode["id"]], lap());
}
echo "---\n";
foreach($PRESETS["replicasets"] as $preset) {
lap();
$json = json_decode(file_get_contents($preset), true);
printf("Starting %-20s ... ", $json["id"]);
$result = mo_http_request(getMOUri() . "/replica_sets", make_ctx(getMOPresetBase() . $preset));
$decode = json_decode($result, true);
if (!isset($decode["id"])) {
failed($decode);
}
$SERVERS[$decode["id"]] = isset($decode["mongodb_auth_uri"]) ? $decode["mongodb_auth_uri"] : $decode["mongodb_uri"];
printf("'%s'\t(took: %.2f secs)\n", $SERVERS[$decode["id"]], lap());
}
file_put_contents($FILENAME, json_encode($SERVERS, JSON_PRETTY_PRINT));
/*
wget --body-data='' --method='GET' --header='Accept: application/json' --header='Content-type: application/x-www-form-urlencoded' http://192.168.112.10:8889/servers
wget --body-data='' --method='DELETE' --header='Accept: application/json' --header='Content-type: application/x-www-form-urlencoded' http://192.168.112.10:8889/servers/STANDALONE-AUTH
wget --body-data='' --method='DELETE' --header='Accept: application/json' --header='Content-type: application/x-www-form-urlencoded' http://192.168.112.10:8889/servers/STANDALONE
-wget --body-data='' --method='DELETE' --header='Accept: application/json' --header='Content-type: application/x-www-form-urlencoded' http://192.168.112.10:8889/servers/STANDALONE-24
wget --body-data='' --method='DELETE' --header='Accept: application/json' --header='Content-type: application/x-www-form-urlencoded' http://192.168.112.10:8889/servers/STANDALONE-26
wget --body-data='' --method='DELETE' --header='Accept: application/json' --header='Content-type: application/x-www-form-urlencoded' http://192.168.112.10:8889/servers/RS-two
wget --body-data='' --method='DELETE' --header='Accept: application/json' --header='Content-type: application/x-www-form-urlencoded' http://192.168.112.10:8889/servers/RS-arbiter
wget --body-data='' --method='DELETE' --header='Accept: application/json' --header='Content-type: application/x-www-form-urlencoded' http://192.168.112.10:8889/servers/STANDALONE-PLAIN
wget --body-data='' --method='DELETE' --header='Accept: application/json' --header='Content-type: application/x-www-form-urlencoded' http://192.168.112.10:8889/servers/STANDALONE-X509
wget --body-data='' --method='DELETE' --header='Accept: application/json' --header='Content-type: application/x-www-form-urlencoded' http://192.168.112.10:8889/servers/RS-one
wget --body-data='' --method='DELETE' --header='Accept: application/json' --header='Content-type: application/x-www-form-urlencoded' http://192.168.112.10:8889/servers/STANDALONE-SSL
wget --body-data='' --method='GET' --header='Accept: application/json' --header='Content-type: application/x-www-form-urlencoded' http://192.168.112.10:8889/replica_sets
wget --body-data='' --method='DELETE' --header='Accept: application/json' --header='Content-type: application/x-www-form-urlencoded' http://192.168.112.10:8889/replica_sets/REPLICASET
wget --body-data='' --method='GET' --header='Accept: application/json' --header='Content-type: application/x-www-form-urlencoded' http://192.168.112.10:8889/
wget --body-data='' --method='GET' --header='Accept: application/json' --header='Content-type: application/x-www-form-urlencoded' http://192.168.112.10:8889/servers
wget --body-data='' --method='GET' --header='Accept: application/json' --header='Content-type: application/x-www-form-urlencoded' http://192.168.112.10:8889/replica_sets
wget --body-data='{"preset":"\/phongo\/\/scripts\/presets\/standalone.json"}' --method='POST' --header='Accept: application/json' --header='Content-type: application/x-www-form-urlencoded' http://192.168.112.10:8889/servers
-wget --body-data='{"preset":"\/phongo\/\/scripts\/presets\/standalone-24.json"}' --method='POST' --header='Accept: application/json' --header='Content-type: application/x-www-form-urlencoded' http://192.168.112.10:8889/servers
wget --body-data='{"preset":"\/phongo\/\/scripts\/presets\/standalone-26.json"}' --method='POST' --header='Accept: application/json' --header='Content-type: application/x-www-form-urlencoded' http://192.168.112.10:8889/servers
wget --body-data='{"preset":"\/phongo\/\/scripts\/presets\/standalone-ssl.json"}' --method='POST' --header='Accept: application/json' --header='Content-type: application/x-www-form-urlencoded' http://192.168.112.10:8889/servers
wget --body-data='{"preset":"\/phongo\/\/scripts\/presets\/standalone-auth.json"}' --method='POST' --header='Accept: application/json' --header='Content-type: application/x-www-form-urlencoded' http://192.168.112.10:8889/servers
wget --body-data='{"preset":"\/phongo\/\/scripts\/presets\/standalone-x509.json"}' --method='POST' --header='Accept: application/json' --header='Content-type: application/x-www-form-urlencoded' http://192.168.112.10:8889/servers
wget --body-data='{"preset":"\/phongo\/\/scripts\/presets\/standalone-plain.json"}' --method='POST' --header='Accept: application/json' --header='Content-type: application/x-www-form-urlencoded' http://192.168.112.10:8889/servers
wget --body-data='{"preset":"\/phongo\/\/scripts\/presets\/replicaset.json"}' --method='POST' --header='Accept: application/json' --header='Content-type: application/x-www-form-urlencoded' http://192.168.112.10:8889/replica_sets
*/
diff --git a/mongodb-1.3.4/scripts/ubuntu/essentials.sh b/mongodb-1.4.2/scripts/ubuntu/essentials.sh
similarity index 100%
rename from mongodb-1.3.4/scripts/ubuntu/essentials.sh
rename to mongodb-1.4.2/scripts/ubuntu/essentials.sh
diff --git a/mongodb-1.3.4/scripts/ubuntu/ldap/install.sh b/mongodb-1.4.2/scripts/ubuntu/ldap/install.sh
similarity index 100%
rename from mongodb-1.3.4/scripts/ubuntu/ldap/install.sh
rename to mongodb-1.4.2/scripts/ubuntu/ldap/install.sh
diff --git a/mongodb-1.3.4/scripts/ubuntu/ldap/saslauthd.conf b/mongodb-1.4.2/scripts/ubuntu/ldap/saslauthd.conf
similarity index 100%
rename from mongodb-1.3.4/scripts/ubuntu/ldap/saslauthd.conf
rename to mongodb-1.4.2/scripts/ubuntu/ldap/saslauthd.conf
diff --git a/mongodb-1.4.2/scripts/ubuntu/mongo-orchestration.sh b/mongodb-1.4.2/scripts/ubuntu/mongo-orchestration.sh
new file mode 100644
index 00000000..8077a737
--- /dev/null
+++ b/mongodb-1.4.2/scripts/ubuntu/mongo-orchestration.sh
@@ -0,0 +1,28 @@
+# 3.0
+apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 7F0CEB10
+
+# 3.6
+apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 2930ADAE8CAF5059EE73BB4B58712A2291FA4AD5
+
+echo 'deb http://repo.mongodb.com/apt/ubuntu trusty/mongodb-enterprise/3.0 multiverse' | tee /etc/apt/sources.list.d/mongodb-enterprise-3.0.list
+echo 'deb http://repo.mongodb.com/apt/ubuntu trusty/mongodb-enterprise/3.6 multiverse' | tee /etc/apt/sources.list.d/mongodb-enterprise-3.6.list
+
+apt-get update
+
+apt-get install -y libsnmp30 libgsasl7 libcurl4-openssl-dev
+
+apt-get download mongodb-enterprise-server=3.0.15
+apt-get download mongodb-enterprise-server=3.6.1
+apt-get download mongodb-enterprise-mongos=3.6.1
+dpkg -x mongodb-enterprise-server_3.0.15_amd64.deb 3.0
+dpkg -x mongodb-enterprise-server_3.6.1_amd64.deb 3.6
+dpkg -x mongodb-enterprise-mongos_3.6.1_amd64.deb 3.6
+
+# Python stuff for mongo-orchestration
+apt-get install -y python python-dev
+python get-pip.py
+
+pip install --upgrade mongo-orchestration
+
+# Launch mongo-orchestration
+mongo-orchestration -f mongo-orchestration-config.json -b 192.168.112.10 --enable-majority-read-concern start
diff --git a/mongodb-1.3.4/scripts/ubuntu/phongo.sh b/mongodb-1.4.2/scripts/ubuntu/phongo.sh
similarity index 100%
rename from mongodb-1.3.4/scripts/ubuntu/phongo.sh
rename to mongodb-1.4.2/scripts/ubuntu/phongo.sh
diff --git a/mongodb-1.3.4/scripts/vmware/kernel.sh b/mongodb-1.4.2/scripts/vmware/kernel.sh
similarity index 100%
rename from mongodb-1.3.4/scripts/vmware/kernel.sh
rename to mongodb-1.4.2/scripts/vmware/kernel.sh
diff --git a/mongodb-1.3.4/src/BSON/Binary.c b/mongodb-1.4.2/src/BSON/Binary.c
similarity index 100%
rename from mongodb-1.3.4/src/BSON/Binary.c
rename to mongodb-1.4.2/src/BSON/Binary.c
diff --git a/mongodb-1.3.4/src/BSON/BinaryInterface.c b/mongodb-1.4.2/src/BSON/BinaryInterface.c
similarity index 100%
rename from mongodb-1.3.4/src/BSON/BinaryInterface.c
rename to mongodb-1.4.2/src/BSON/BinaryInterface.c
diff --git a/mongodb-1.4.2/src/BSON/DBPointer.c b/mongodb-1.4.2/src/BSON/DBPointer.c
new file mode 100644
index 00000000..db812577
--- /dev/null
+++ b/mongodb-1.4.2/src/BSON/DBPointer.c
@@ -0,0 +1,401 @@
+/*
+ * Copyright 2014-2017 MongoDB, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include <php.h>
+#include <Zend/zend_interfaces.h>
+#include <ext/standard/php_var.h>
+#if PHP_VERSION_ID >= 70000
+# include <zend_smart_str.h>
+#else
+# include <ext/standard/php_smart_str.h>
+#endif
+
+#include "phongo_compat.h"
+#include "php_phongo.h"
+#include "php_bson.h"
+
+zend_class_entry *php_phongo_dbpointer_ce;
+
+/* Initialize the object and return whether it was successful. An exception will
+ * be thrown on error. */
+static bool php_phongo_dbpointer_init(php_phongo_dbpointer_t *intern, const char *ref, phongo_zpp_char_len ref_len, const char *id, phongo_zpp_char_len id_len TSRMLS_DC) /* {{{ */
+{
+ if (strlen(ref) != ref_len) {
+ phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Ref cannot contain null bytes");
+ return false;
+ }
+
+ if (!bson_oid_is_valid(id, id_len)) {
+ phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Error parsing ObjectId string: %s", id);
+ return false;
+ }
+
+ intern->ref = estrndup(ref, ref_len);
+ intern->ref_len = ref_len;
+
+ strncpy(intern->id, id, sizeof(intern->id));
+
+ return true;
+} /* }}} */
+
+/* Initialize the object from a HashTable and return whether it was successful.
+ * An exception will be thrown on error. */
+static bool php_phongo_dbpointer_init_from_hash(php_phongo_dbpointer_t *intern, HashTable *props TSRMLS_DC) /* {{{ */
+{
+#if PHP_VERSION_ID >= 70000
+ zval *ref, *id;
+
+ if ((ref = zend_hash_str_find(props, "ref", sizeof("ref")-1)) && Z_TYPE_P(ref) == IS_STRING &&
+ (id = zend_hash_str_find(props, "id", sizeof("id")-1)) && Z_TYPE_P(id) == IS_STRING) {
+ return php_phongo_dbpointer_init(intern, Z_STRVAL_P(ref), Z_STRLEN_P(ref), Z_STRVAL_P(id), Z_STRLEN_P(id) TSRMLS_CC);
+ }
+#else
+ zval **ref, **id;
+
+ if (zend_hash_find(props, "ref", sizeof("ref"), (void**) &ref) == SUCCESS && Z_TYPE_PP(ref) == IS_STRING &&
+ zend_hash_find(props, "id", sizeof("id"), (void**) &id) == SUCCESS && Z_TYPE_PP(id) == IS_STRING) {
+
+ return php_phongo_dbpointer_init(intern, Z_STRVAL_PP(ref), Z_STRLEN_PP(ref), Z_STRVAL_PP(id), Z_STRLEN_PP(id) TSRMLS_CC);
+ }
+#endif
+
+ phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "%s initialization requires \"ref\" and \"id\" string fields", ZSTR_VAL(php_phongo_dbpointer_ce->name));
+ return false;
+} /* }}} */
+
+/* {{{ proto string MongoDB\BSON\DBPointer::__toString()
+ Return the DBPointer's namespace string and ObjectId. */
+static PHP_METHOD(DBPointer, __toString)
+{
+ php_phongo_dbpointer_t *intern;
+ char *retval;
+ int retval_len;
+
+ if (zend_parse_parameters_none() == FAILURE) {
+ return;
+ }
+
+ intern = Z_DBPOINTER_OBJ_P(getThis());
+
+ retval_len = spprintf(&retval, 0, "[%s/%s]", intern->ref, intern->id);
+ PHONGO_RETVAL_STRINGL(retval, retval_len);
+ efree(retval);
+} /* }}} */
+
+/* {{{ proto array MongoDB\BSON\Symbol::jsonSerialize()
+*/
+static PHP_METHOD(DBPointer, jsonSerialize)
+{
+ php_phongo_dbpointer_t *intern;
+#if PHP_VERSION_ID >= 70000
+ zval zdb_pointer, zoid;
+#else
+ zval *zdb_pointer, *zoid;
+#endif
+
+ if (zend_parse_parameters_none() == FAILURE) {
+ return;
+ }
+
+ intern = Z_DBPOINTER_OBJ_P(getThis());
+
+#if PHP_VERSION_ID >= 70000
+ array_init_size(&zdb_pointer, 2);
+ array_init_size(&zoid, 1);
+ ADD_ASSOC_STRINGL(&zdb_pointer, "$ref", intern->ref, intern->ref_len);
+ ADD_ASSOC_STRING(&zoid, "$oid", intern->id);
+ ADD_ASSOC_ZVAL(&zdb_pointer, "$id", &zoid);
+
+ array_init_size(return_value, 1);
+ ADD_ASSOC_ZVAL(return_value, "$dbPointer", &zdb_pointer);
+#else
+ ALLOC_INIT_ZVAL(zdb_pointer);
+ ALLOC_INIT_ZVAL(zoid);
+ array_init_size(zdb_pointer, 2);
+ array_init_size(zoid, 1);
+ ADD_ASSOC_STRINGL(zdb_pointer, "$ref", intern->ref, intern->ref_len);
+ ADD_ASSOC_STRING(zoid, "$oid", intern->id);
+ ADD_ASSOC_ZVAL(zdb_pointer, "$id", zoid);
+
+ array_init_size(return_value, 1);
+ ADD_ASSOC_ZVAL(return_value, "$dbPointer", zdb_pointer);
+#endif
+} /* }}} */
+
+/* {{{ proto string MongoDB\BSON\DBPointer::serialize()
+*/
+static PHP_METHOD(DBPointer, serialize)
+{
+ php_phongo_dbpointer_t *intern;
+#if PHP_VERSION_ID >= 70000
+ zval retval;
+#else
+ zval *retval;
+#endif
+
+ php_serialize_data_t var_hash;
+ smart_str buf = { 0 };
+
+ intern = Z_DBPOINTER_OBJ_P(getThis());
+
+ if (zend_parse_parameters_none() == FAILURE) {
+ return;
+ }
+
+#if PHP_VERSION_ID >= 70000
+ array_init_size(&retval, 2);
+ ADD_ASSOC_STRINGL(&retval, "ref", intern->ref, intern->ref_len);
+ ADD_ASSOC_STRING(&retval, "id", intern->id);
+#else
+ ALLOC_INIT_ZVAL(retval);
+ array_init_size(retval, 2);
+ ADD_ASSOC_STRINGL(retval, "ref", intern->ref, intern->ref_len);
+ ADD_ASSOC_STRING(retval, "id", intern->id);
+#endif
+
+ PHP_VAR_SERIALIZE_INIT(var_hash);
+ php_var_serialize(&buf, &retval, &var_hash TSRMLS_CC);
+ smart_str_0(&buf);
+ PHP_VAR_SERIALIZE_DESTROY(var_hash);
+
+ PHONGO_RETVAL_SMART_STR(buf);
+
+ smart_str_free(&buf);
+ zval_ptr_dtor(&retval);
+} /* }}} */
+
+/* {{{ proto void MongoDB\BSON\DBPointer::unserialize(string $serialized)
+*/
+static PHP_METHOD(DBPointer, unserialize)
+{
+ php_phongo_dbpointer_t *intern;
+ zend_error_handling error_handling;
+ char *serialized;
+ phongo_zpp_char_len serialized_len;
+#if PHP_VERSION_ID >= 70000
+ zval props;
+#else
+ zval *props;
+#endif
+ php_unserialize_data_t var_hash;
+
+ intern = Z_DBPOINTER_OBJ_P(getThis());
+
+ zend_replace_error_handling(EH_THROW, phongo_exception_from_phongo_domain(PHONGO_ERROR_INVALID_ARGUMENT), &error_handling TSRMLS_CC);
+
+ if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &serialized, &serialized_len) == FAILURE) {
+ zend_restore_error_handling(&error_handling TSRMLS_CC);
+ return;
+ }
+ zend_restore_error_handling(&error_handling TSRMLS_CC);
+
+#if PHP_VERSION_ID < 70000
+ ALLOC_INIT_ZVAL(props);
+#endif
+ PHP_VAR_UNSERIALIZE_INIT(var_hash);
+ if (!php_var_unserialize(&props, (const unsigned char**) &serialized, (unsigned char *) serialized + serialized_len, &var_hash TSRMLS_CC)) {
+ zval_ptr_dtor(&props);
+ phongo_throw_exception(PHONGO_ERROR_UNEXPECTED_VALUE TSRMLS_CC, "%s unserialization failed", ZSTR_VAL(php_phongo_dbpointer_ce->name));
+
+ PHP_VAR_UNSERIALIZE_DESTROY(var_hash);
+ return;
+ }
+ PHP_VAR_UNSERIALIZE_DESTROY(var_hash);
+
+#if PHP_VERSION_ID >= 70000
+ php_phongo_dbpointer_init_from_hash(intern, HASH_OF(&props) TSRMLS_CC);
+#else
+ php_phongo_dbpointer_init_from_hash(intern, HASH_OF(props) TSRMLS_CC);
+#endif
+ zval_ptr_dtor(&props);
+} /* }}} */
+
+/* {{{ MongoDB\BSON\DBPointer function entries */
+ZEND_BEGIN_ARG_INFO_EX(ai_DBPointer_unserialize, 0, 0, 1)
+ ZEND_ARG_INFO(0, serialized)
+ZEND_END_ARG_INFO()
+
+ZEND_BEGIN_ARG_INFO_EX(ai_DBPointer_void, 0, 0, 0)
+ZEND_END_ARG_INFO()
+
+static zend_function_entry php_phongo_dbpointer_me[] = {
+ /* __set_state intentionally missing */
+ PHP_ME(DBPointer, __toString, ai_DBPointer_void, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
+ PHP_ME(DBPointer, jsonSerialize, ai_DBPointer_void, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
+ PHP_ME(DBPointer, serialize, ai_DBPointer_void, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
+ PHP_ME(DBPointer, unserialize, ai_DBPointer_unserialize, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
+ ZEND_NAMED_ME(__construct, PHP_FN(MongoDB_disabled___construct), ai_DBPointer_void, ZEND_ACC_PRIVATE|ZEND_ACC_FINAL)
+ PHP_FE_END
+};
+/* }}} */
+
+/* {{{ MongoDB\BSON\DBPointer object handlers */
+static zend_object_handlers php_phongo_handler_dbpointer;
+
+static void php_phongo_dbpointer_free_object(phongo_free_object_arg *object TSRMLS_DC) /* {{{ */
+{
+ php_phongo_dbpointer_t *intern = Z_OBJ_DBPOINTER(object);
+
+ zend_object_std_dtor(&intern->std TSRMLS_CC);
+
+ if (intern->ref) {
+ efree(intern->ref);
+ }
+
+ if (intern->properties) {
+ zend_hash_destroy(intern->properties);
+ FREE_HASHTABLE(intern->properties);
+ }
+
+#if PHP_VERSION_ID < 70000
+ efree(intern);
+#endif
+} /* }}} */
+
+phongo_create_object_retval php_phongo_dbpointer_create_object(zend_class_entry *class_type TSRMLS_DC) /* {{{ */
+{
+ php_phongo_dbpointer_t *intern = NULL;
+
+ intern = PHONGO_ALLOC_OBJECT_T(php_phongo_dbpointer_t, class_type);
+ zend_object_std_init(&intern->std, class_type TSRMLS_CC);
+ object_properties_init(&intern->std, class_type);
+
+#if PHP_VERSION_ID >= 70000
+ intern->std.handlers = &php_phongo_handler_dbpointer;
+
+ return &intern->std;
+#else
+ {
+ zend_object_value retval;
+ retval.handle = zend_objects_store_put(intern, (zend_objects_store_dtor_t) zend_objects_destroy_object, php_phongo_dbpointer_free_object, NULL TSRMLS_CC);
+ retval.handlers = &php_phongo_handler_dbpointer;
+
+ return retval;
+ }
+#endif
+} /* }}} */
+
+static int php_phongo_dbpointer_compare_objects(zval *o1, zval *o2 TSRMLS_DC) /* {{{ */
+{
+ php_phongo_dbpointer_t *intern1, *intern2;
+ int retval;
+
+ intern1 = Z_DBPOINTER_OBJ_P(o1);
+ intern2 = Z_DBPOINTER_OBJ_P(o2);
+
+ retval = strcmp(intern1->ref, intern2->ref);
+
+ if (retval != 0) {
+ return retval;
+ }
+
+ return strcmp(intern1->id, intern2->id);
+} /* }}} */
+
+static HashTable *php_phongo_dbpointer_get_gc(zval *object, phongo_get_gc_table table, int *n TSRMLS_DC) /* {{{ */
+{
+ *table = NULL;
+ *n = 0;
+
+ return Z_DBPOINTER_OBJ_P(object)->properties;
+} /* }}} */
+
+HashTable *php_phongo_dbpointer_get_properties_hash(zval *object, bool is_debug TSRMLS_DC) /* {{{ */
+{
+ php_phongo_dbpointer_t *intern;
+ HashTable *props;
+
+ intern = Z_DBPOINTER_OBJ_P(object);
+
+ PHONGO_GET_PROPERTY_HASH_INIT_PROPS(is_debug, intern, props, 2);
+
+ if (!intern->ref) {
+ return props;
+ }
+
+#if PHP_VERSION_ID >= 70000
+ {
+ zval ref, id;
+
+ ZVAL_STRING(&ref, intern->ref);
+ ZVAL_STRING(&id, intern->id);
+ zend_hash_str_update(props, "ref", sizeof("ref")-1, &ref);
+ zend_hash_str_update(props, "id", sizeof("id")-1, &id);
+ }
+#else
+ {
+ zval *ref, *id;
+
+ MAKE_STD_ZVAL(ref);
+ ZVAL_STRING(ref, intern->ref, 1);
+ MAKE_STD_ZVAL(id);
+ ZVAL_STRING(id, intern->id, 1);
+ zend_hash_update(props, "ref", sizeof("ref"), &ref, sizeof(ref), NULL);
+ zend_hash_update(props, "id", sizeof("id"), &id, sizeof(id), NULL);
+ }
+#endif
+
+ return props;
+} /* }}} */
+
+static HashTable *php_phongo_dbpointer_get_debug_info(zval *object, int *is_temp TSRMLS_DC) /* {{{ */
+{
+ *is_temp = 1;
+ return php_phongo_dbpointer_get_properties_hash(object, true TSRMLS_CC);
+} /* }}} */
+
+static HashTable *php_phongo_dbpointer_get_properties(zval *object TSRMLS_DC) /* {{{ */
+{
+ return php_phongo_dbpointer_get_properties_hash(object, false TSRMLS_CC);
+} /* }}} */
+/* }}} */
+
+void php_phongo_dbpointer_init_ce(INIT_FUNC_ARGS) /* {{{ */
+{
+ zend_class_entry ce;
+
+ INIT_NS_CLASS_ENTRY(ce, "MongoDB\\BSON", "DBPointer", php_phongo_dbpointer_me);
+ php_phongo_dbpointer_ce = zend_register_internal_class(&ce TSRMLS_CC);
+ php_phongo_dbpointer_ce->create_object = php_phongo_dbpointer_create_object;
+ PHONGO_CE_FINAL(php_phongo_dbpointer_ce);
+
+ zend_class_implements(php_phongo_dbpointer_ce TSRMLS_CC, 1, php_phongo_json_serializable_ce);
+ zend_class_implements(php_phongo_dbpointer_ce TSRMLS_CC, 1, php_phongo_type_ce);
+ zend_class_implements(php_phongo_dbpointer_ce TSRMLS_CC, 1, zend_ce_serializable);
+
+ memcpy(&php_phongo_handler_dbpointer, phongo_get_std_object_handlers(), sizeof(zend_object_handlers));
+ php_phongo_handler_dbpointer.compare_objects = php_phongo_dbpointer_compare_objects;
+ php_phongo_handler_dbpointer.get_debug_info = php_phongo_dbpointer_get_debug_info;
+ php_phongo_handler_dbpointer.get_gc = php_phongo_dbpointer_get_gc;
+ php_phongo_handler_dbpointer.get_properties = php_phongo_dbpointer_get_properties;
+#if PHP_VERSION_ID >= 70000
+ php_phongo_handler_dbpointer.free_obj = php_phongo_dbpointer_free_object;
+ php_phongo_handler_dbpointer.offset = XtOffsetOf(php_phongo_dbpointer_t, std);
+#endif
+} /* }}} */
+
+/*
+ * Local variables:
+ * tab-width: 4
+ * c-basic-offset: 4
+ * End:
+ * vim600: noet sw=4 ts=4 fdm=marker
+ * vim<600: noet sw=4 ts=4
+ */
diff --git a/mongodb-1.3.4/src/BSON/Decimal128.c b/mongodb-1.4.2/src/BSON/Decimal128.c
similarity index 100%
rename from mongodb-1.3.4/src/BSON/Decimal128.c
rename to mongodb-1.4.2/src/BSON/Decimal128.c
diff --git a/mongodb-1.3.4/src/BSON/Decimal128Interface.c b/mongodb-1.4.2/src/BSON/Decimal128Interface.c
similarity index 100%
rename from mongodb-1.3.4/src/BSON/Decimal128Interface.c
rename to mongodb-1.4.2/src/BSON/Decimal128Interface.c
diff --git a/mongodb-1.3.4/src/BSON/Javascript.c b/mongodb-1.4.2/src/BSON/Javascript.c
similarity index 100%
rename from mongodb-1.3.4/src/BSON/Javascript.c
rename to mongodb-1.4.2/src/BSON/Javascript.c
diff --git a/mongodb-1.3.4/src/BSON/JavascriptInterface.c b/mongodb-1.4.2/src/BSON/JavascriptInterface.c
similarity index 100%
rename from mongodb-1.3.4/src/BSON/JavascriptInterface.c
rename to mongodb-1.4.2/src/BSON/JavascriptInterface.c
diff --git a/mongodb-1.3.4/src/BSON/MaxKey.c b/mongodb-1.4.2/src/BSON/MaxKey.c
similarity index 100%
rename from mongodb-1.3.4/src/BSON/MaxKey.c
rename to mongodb-1.4.2/src/BSON/MaxKey.c
diff --git a/mongodb-1.3.4/src/BSON/MaxKeyInterface.c b/mongodb-1.4.2/src/BSON/MaxKeyInterface.c
similarity index 100%
rename from mongodb-1.3.4/src/BSON/MaxKeyInterface.c
rename to mongodb-1.4.2/src/BSON/MaxKeyInterface.c
diff --git a/mongodb-1.3.4/src/BSON/MinKey.c b/mongodb-1.4.2/src/BSON/MinKey.c
similarity index 100%
rename from mongodb-1.3.4/src/BSON/MinKey.c
rename to mongodb-1.4.2/src/BSON/MinKey.c
diff --git a/mongodb-1.3.4/src/BSON/MinKeyInterface.c b/mongodb-1.4.2/src/BSON/MinKeyInterface.c
similarity index 100%
rename from mongodb-1.3.4/src/BSON/MinKeyInterface.c
rename to mongodb-1.4.2/src/BSON/MinKeyInterface.c
diff --git a/mongodb-1.3.4/src/BSON/ObjectId.c b/mongodb-1.4.2/src/BSON/ObjectId.c
similarity index 99%
rename from mongodb-1.3.4/src/BSON/ObjectId.c
rename to mongodb-1.4.2/src/BSON/ObjectId.c
index 2e85c8a5..c791ba61 100644
--- a/mongodb-1.3.4/src/BSON/ObjectId.c
+++ b/mongodb-1.4.2/src/BSON/ObjectId.c
@@ -1,440 +1,440 @@
/*
* Copyright 2014-2017 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifdef HAVE_CONFIG_H
# include "config.h"
#endif
#include <php.h>
#include <Zend/zend_interfaces.h>
#include <ext/standard/php_var.h>
#if PHP_VERSION_ID >= 70000
# include <zend_smart_str.h>
#else
# include <ext/standard/php_smart_str.h>
#endif
#include "phongo_compat.h"
#include "php_phongo.h"
zend_class_entry *php_phongo_objectid_ce;
/* Initialize the object with a generated value and return whether it was
* successful. */
static bool php_phongo_objectid_init(php_phongo_objectid_t *intern)
{
bson_oid_t oid;
intern->initialized = true;
bson_oid_init(&oid, NULL);
bson_oid_to_string(&oid, intern->oid);
return true;
}
/* Initialize the object from a hex string and return whether it was successful.
* An exception will be thrown on error. */
static bool php_phongo_objectid_init_from_hex_string(php_phongo_objectid_t *intern, const char *hex, phongo_zpp_char_len hex_len TSRMLS_DC) /* {{{ */
{
if (bson_oid_is_valid(hex, hex_len)) {
bson_oid_t oid;
bson_oid_init_from_string(&oid, hex);
bson_oid_to_string(&oid, intern->oid);
intern->initialized = true;
return true;
}
phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Error parsing ObjectId string: %s", hex);
return false;
} /* }}} */
/* Initialize the object from a HashTable and return whether it was successful.
* An exception will be thrown on error. */
static bool php_phongo_objectid_init_from_hash(php_phongo_objectid_t *intern, HashTable *props TSRMLS_DC) /* {{{ */
{
#if PHP_VERSION_ID >= 70000
zval *z_oid;
z_oid = zend_hash_str_find(props, "oid", sizeof("oid")-1);
if (z_oid && Z_TYPE_P(z_oid) == IS_STRING) {
return php_phongo_objectid_init_from_hex_string(intern, Z_STRVAL_P(z_oid), Z_STRLEN_P(z_oid) TSRMLS_CC);
}
#else
zval **z_oid;
if (zend_hash_find(props, "oid", sizeof("oid"), (void**) &z_oid) == SUCCESS && Z_TYPE_PP(z_oid) == IS_STRING) {
return php_phongo_objectid_init_from_hex_string(intern, Z_STRVAL_PP(z_oid), Z_STRLEN_PP(z_oid) TSRMLS_CC);
}
#endif
phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "%s initialization requires \"oid\" string field", ZSTR_VAL(php_phongo_objectid_ce->name));
return false;
} /* }}} */
/* {{{ proto void MongoDB\BSON\ObjectId::__construct([string $id])
Constructs a new BSON ObjectId type, optionally from a hex string. */
static PHP_METHOD(ObjectId, __construct)
{
php_phongo_objectid_t *intern;
zend_error_handling error_handling;
char *id = NULL;
phongo_zpp_char_len id_len;
zend_replace_error_handling(EH_THROW, phongo_exception_from_phongo_domain(PHONGO_ERROR_INVALID_ARGUMENT), &error_handling TSRMLS_CC);
intern = Z_OBJECTID_OBJ_P(getThis());
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|s!", &id, &id_len) == FAILURE) {
zend_restore_error_handling(&error_handling TSRMLS_CC);
return;
}
zend_restore_error_handling(&error_handling TSRMLS_CC);
if (id) {
php_phongo_objectid_init_from_hex_string(intern, id, id_len TSRMLS_CC);
} else {
php_phongo_objectid_init(intern);
}
} /* }}} */
/* {{{ proto integer MongoDB\BSON\ObjectId::getTimestamp()
*/
static PHP_METHOD(ObjectId, getTimestamp)
{
php_phongo_objectid_t *intern;
bson_oid_t tmp_oid;
intern = Z_OBJECTID_OBJ_P(getThis());
if (zend_parse_parameters_none() == FAILURE) {
return;
}
bson_oid_init_from_string(&tmp_oid, intern->oid);
RETVAL_LONG(bson_oid_get_time_t(&tmp_oid));
} /* }}} */
/* {{{ proto MongoDB\BSON\ObjectId::__set_state(array $properties)
*/
static PHP_METHOD(ObjectId, __set_state)
{
php_phongo_objectid_t *intern;
HashTable *props;
zval *array;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "a", &array) == FAILURE) {
RETURN_FALSE;
}
object_init_ex(return_value, php_phongo_objectid_ce);
intern = Z_OBJECTID_OBJ_P(return_value);
props = Z_ARRVAL_P(array);
php_phongo_objectid_init_from_hash(intern, props TSRMLS_CC);
} /* }}} */
/* {{{ proto string MongoDB\BSON\ObjectId::__toString()
*/
static PHP_METHOD(ObjectId, __toString)
{
php_phongo_objectid_t *intern;
intern = Z_OBJECTID_OBJ_P(getThis());
if (zend_parse_parameters_none() == FAILURE) {
return;
}
PHONGO_RETURN_STRINGL(intern->oid, 24);
} /* }}} */
/* {{{ proto array MongoDB\BSON\ObjectId::jsonSerialize()
*/
static PHP_METHOD(ObjectId, jsonSerialize)
{
php_phongo_objectid_t *intern;
if (zend_parse_parameters_none() == FAILURE) {
return;
}
intern = Z_OBJECTID_OBJ_P(getThis());
array_init_size(return_value, 1);
ADD_ASSOC_STRINGL(return_value, "$oid", intern->oid, 24);
} /* }}} */
/* {{{ proto string MongoDB\BSON\ObjectId::serialize()
*/
static PHP_METHOD(ObjectId, serialize)
{
php_phongo_objectid_t *intern;
#if PHP_VERSION_ID >= 70000
zval retval;
#else
zval *retval;
#endif
php_serialize_data_t var_hash;
smart_str buf = { 0 };
intern = Z_OBJECTID_OBJ_P(getThis());
if (zend_parse_parameters_none() == FAILURE) {
return;
}
#if PHP_VERSION_ID >= 70000
array_init_size(&retval, 2);
ADD_ASSOC_STRINGL(&retval, "oid", intern->oid, 24);
#else
ALLOC_INIT_ZVAL(retval);
array_init_size(retval, 2);
ADD_ASSOC_STRINGL(retval, "oid", intern->oid, 24);
#endif
PHP_VAR_SERIALIZE_INIT(var_hash);
php_var_serialize(&buf, &retval, &var_hash TSRMLS_CC);
smart_str_0(&buf);
PHP_VAR_SERIALIZE_DESTROY(var_hash);
PHONGO_RETVAL_SMART_STR(buf);
smart_str_free(&buf);
zval_ptr_dtor(&retval);
} /* }}} */
/* {{{ proto void MongoDB\BSON\ObjectId::unserialize(string $serialized)
*/
static PHP_METHOD(ObjectId, unserialize)
{
php_phongo_objectid_t *intern;
zend_error_handling error_handling;
char *serialized;
phongo_zpp_char_len serialized_len;
#if PHP_VERSION_ID >= 70000
zval props;
#else
zval *props;
#endif
php_unserialize_data_t var_hash;
intern = Z_OBJECTID_OBJ_P(getThis());
zend_replace_error_handling(EH_THROW, phongo_exception_from_phongo_domain(PHONGO_ERROR_INVALID_ARGUMENT), &error_handling TSRMLS_CC);
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &serialized, &serialized_len) == FAILURE) {
zend_restore_error_handling(&error_handling TSRMLS_CC);
return;
}
zend_restore_error_handling(&error_handling TSRMLS_CC);
#if PHP_VERSION_ID < 70000
ALLOC_INIT_ZVAL(props);
#endif
PHP_VAR_UNSERIALIZE_INIT(var_hash);
if (!php_var_unserialize(&props, (const unsigned char**) &serialized, (unsigned char *) serialized + serialized_len, &var_hash TSRMLS_CC)) {
zval_ptr_dtor(&props);
phongo_throw_exception(PHONGO_ERROR_UNEXPECTED_VALUE TSRMLS_CC, "%s unserialization failed", ZSTR_VAL(php_phongo_objectid_ce->name));
PHP_VAR_UNSERIALIZE_DESTROY(var_hash);
return;
}
PHP_VAR_UNSERIALIZE_DESTROY(var_hash);
#if PHP_VERSION_ID >= 70000
php_phongo_objectid_init_from_hash(intern, HASH_OF(&props) TSRMLS_CC);
#else
php_phongo_objectid_init_from_hash(intern, HASH_OF(props) TSRMLS_CC);
#endif
zval_ptr_dtor(&props);
} /* }}} */
/* {{{ MongoDB\BSON\ObjectId function entries */
ZEND_BEGIN_ARG_INFO_EX(ai_ObjectId___construct, 0, 0, 0)
ZEND_ARG_INFO(0, id)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(ai_ObjectId___set_state, 0, 0, 1)
ZEND_ARG_ARRAY_INFO(0, properties, 0)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(ai_ObjectId_unserialize, 0, 0, 1)
ZEND_ARG_INFO(0, serialized)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(ai_ObjectId_void, 0, 0, 0)
ZEND_END_ARG_INFO()
static zend_function_entry php_phongo_objectid_me[] = {
PHP_ME(ObjectId, __construct, ai_ObjectId___construct, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_ME(ObjectId, getTimestamp, ai_ObjectId_void, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_ME(ObjectId, __set_state, ai_ObjectId___set_state, ZEND_ACC_PUBLIC|ZEND_ACC_STATIC)
PHP_ME(ObjectId, __toString, ai_ObjectId_void, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_ME(ObjectId, jsonSerialize, ai_ObjectId_void, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_ME(ObjectId, serialize, ai_ObjectId_void, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_ME(ObjectId, unserialize, ai_ObjectId_unserialize, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_FE_END
};
/* }}} */
/* {{{ MongoDB\BSON\ObjectId object handlers */
static zend_object_handlers php_phongo_handler_objectid;
static void php_phongo_objectid_free_object(phongo_free_object_arg *object TSRMLS_DC) /* {{{ */
{
php_phongo_objectid_t *intern = Z_OBJ_OBJECTID(object);
zend_object_std_dtor(&intern->std TSRMLS_CC);
if (intern->properties) {
zend_hash_destroy(intern->properties);
FREE_HASHTABLE(intern->properties);
}
#if PHP_VERSION_ID < 70000
efree(intern);
#endif
} /* }}} */
static phongo_create_object_retval php_phongo_objectid_create_object(zend_class_entry *class_type TSRMLS_DC) /* {{{ */
{
php_phongo_objectid_t *intern = NULL;
intern = PHONGO_ALLOC_OBJECT_T(php_phongo_objectid_t, class_type);
zend_object_std_init(&intern->std, class_type TSRMLS_CC);
object_properties_init(&intern->std, class_type);
#if PHP_VERSION_ID >= 70000
intern->std.handlers = &php_phongo_handler_objectid;
return &intern->std;
#else
{
zend_object_value retval;
retval.handle = zend_objects_store_put(intern, (zend_objects_store_dtor_t) zend_objects_destroy_object, php_phongo_objectid_free_object, NULL TSRMLS_CC);
retval.handlers = &php_phongo_handler_objectid;
return retval;
}
#endif
} /* }}} */
static int php_phongo_objectid_compare_objects(zval *o1, zval *o2 TSRMLS_DC) /* {{{ */
{
php_phongo_objectid_t *intern1;
php_phongo_objectid_t *intern2;
intern1 = Z_OBJECTID_OBJ_P(o1);
intern2 = Z_OBJECTID_OBJ_P(o2);
return strcmp(intern1->oid, intern2->oid);
} /* }}} */
static HashTable *php_phongo_objectid_get_gc(zval *object, phongo_get_gc_table table, int *n TSRMLS_DC) /* {{{ */
{
*table = NULL;
*n = 0;
return Z_OBJECTID_OBJ_P(object)->properties;
} /* }}} */
static HashTable *php_phongo_objectid_get_properties_hash(zval *object, bool is_debug TSRMLS_DC) /* {{{ */
{
php_phongo_objectid_t *intern;
HashTable *props;
intern = Z_OBJECTID_OBJ_P(object);
PHONGO_GET_PROPERTY_HASH_INIT_PROPS(is_debug, intern, props, 1);
- if (!intern->oid) {
+ if (!intern->initialized) {
return props;
}
#if PHP_VERSION_ID >= 70000
{
zval zv;
ZVAL_STRING(&zv, intern->oid);
zend_hash_str_update(props, "oid", sizeof("oid")-1, &zv);
}
#else
{
zval *zv;
MAKE_STD_ZVAL(zv);
ZVAL_STRING(zv, intern->oid, 1);
zend_hash_update(props, "oid", sizeof("oid"), &zv, sizeof(zv), NULL);
}
#endif
return props;
} /* }}} */
static HashTable *php_phongo_objectid_get_debug_info(zval *object, int *is_temp TSRMLS_DC) /* {{{ */
{
*is_temp = 1;
return php_phongo_objectid_get_properties_hash(object, true TSRMLS_CC);
} /* }}} */
static HashTable *php_phongo_objectid_get_properties(zval *object TSRMLS_DC) /* {{{ */
{
return php_phongo_objectid_get_properties_hash(object, false TSRMLS_CC);
} /* }}} */
/* }}} */
void php_phongo_objectid_init_ce(INIT_FUNC_ARGS) /* {{{ */
{
zend_class_entry ce;
INIT_NS_CLASS_ENTRY(ce, "MongoDB\\BSON", "ObjectId", php_phongo_objectid_me);
php_phongo_objectid_ce = zend_register_internal_class(&ce TSRMLS_CC);
php_phongo_objectid_ce->create_object = php_phongo_objectid_create_object;
PHONGO_CE_FINAL(php_phongo_objectid_ce);
zend_class_implements(php_phongo_objectid_ce TSRMLS_CC, 1, php_phongo_objectid_interface_ce);
zend_class_implements(php_phongo_objectid_ce TSRMLS_CC, 1, php_phongo_json_serializable_ce);
zend_class_implements(php_phongo_objectid_ce TSRMLS_CC, 1, php_phongo_type_ce);
zend_class_implements(php_phongo_objectid_ce TSRMLS_CC, 1, zend_ce_serializable);
memcpy(&php_phongo_handler_objectid, phongo_get_std_object_handlers(), sizeof(zend_object_handlers));
php_phongo_handler_objectid.compare_objects = php_phongo_objectid_compare_objects;
php_phongo_handler_objectid.get_debug_info = php_phongo_objectid_get_debug_info;
php_phongo_handler_objectid.get_gc = php_phongo_objectid_get_gc;
php_phongo_handler_objectid.get_properties = php_phongo_objectid_get_properties;
#if PHP_VERSION_ID >= 70000
php_phongo_handler_objectid.free_obj = php_phongo_objectid_free_object;
php_phongo_handler_objectid.offset = XtOffsetOf(php_phongo_objectid_t, std);
#endif
} /* }}} */
/*
* Local variables:
* tab-width: 4
* c-basic-offset: 4
* End:
* vim600: noet sw=4 ts=4 fdm=marker
* vim<600: noet sw=4 ts=4
*/
diff --git a/mongodb-1.3.4/src/BSON/ObjectIdInterface.c b/mongodb-1.4.2/src/BSON/ObjectIdInterface.c
similarity index 100%
rename from mongodb-1.3.4/src/BSON/ObjectIdInterface.c
rename to mongodb-1.4.2/src/BSON/ObjectIdInterface.c
diff --git a/mongodb-1.3.4/src/BSON/Persistable.c b/mongodb-1.4.2/src/BSON/Persistable.c
similarity index 100%
rename from mongodb-1.3.4/src/BSON/Persistable.c
rename to mongodb-1.4.2/src/BSON/Persistable.c
diff --git a/mongodb-1.3.4/src/BSON/Regex.c b/mongodb-1.4.2/src/BSON/Regex.c
similarity index 100%
rename from mongodb-1.3.4/src/BSON/Regex.c
rename to mongodb-1.4.2/src/BSON/Regex.c
diff --git a/mongodb-1.3.4/src/BSON/RegexInterface.c b/mongodb-1.4.2/src/BSON/RegexInterface.c
similarity index 100%
rename from mongodb-1.3.4/src/BSON/RegexInterface.c
rename to mongodb-1.4.2/src/BSON/RegexInterface.c
diff --git a/mongodb-1.3.4/src/BSON/Serializable.c b/mongodb-1.4.2/src/BSON/Serializable.c
similarity index 100%
rename from mongodb-1.3.4/src/BSON/Serializable.c
rename to mongodb-1.4.2/src/BSON/Serializable.c
diff --git a/mongodb-1.4.2/src/BSON/Symbol.c b/mongodb-1.4.2/src/BSON/Symbol.c
new file mode 100644
index 00000000..43bd501d
--- /dev/null
+++ b/mongodb-1.4.2/src/BSON/Symbol.c
@@ -0,0 +1,349 @@
+/*
+ * Copyright 2014-2017 MongoDB, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include <php.h>
+#include <Zend/zend_interfaces.h>
+#include <ext/standard/php_var.h>
+#if PHP_VERSION_ID >= 70000
+# include <zend_smart_str.h>
+#else
+# include <ext/standard/php_smart_str.h>
+#endif
+
+#include "phongo_compat.h"
+#include "php_phongo.h"
+#include "php_bson.h"
+
+zend_class_entry *php_phongo_symbol_ce;
+
+/* Initialize the object and return whether it was successful. An exception will
+ * be thrown on error. */
+static bool php_phongo_symbol_init(php_phongo_symbol_t *intern, const char *symbol, phongo_zpp_char_len symbol_len TSRMLS_DC) /* {{{ */
+{
+ if (strlen(symbol) != symbol_len) {
+ phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Symbol cannot contain null bytes");
+ return false;
+ }
+
+ intern->symbol = estrndup(symbol, symbol_len);
+ intern->symbol_len = symbol_len;
+
+ return true;
+} /* }}} */
+
+/* Initialize the object from a HashTable and return whether it was successful.
+ * An exception will be thrown on error. */
+static bool php_phongo_symbol_init_from_hash(php_phongo_symbol_t *intern, HashTable *props TSRMLS_DC) /* {{{ */
+{
+#if PHP_VERSION_ID >= 70000
+ zval *symbol;
+
+ if ((symbol = zend_hash_str_find(props, "symbol", sizeof("symbol")-1)) && Z_TYPE_P(symbol) == IS_STRING) {
+ return php_phongo_symbol_init(intern, Z_STRVAL_P(symbol), Z_STRLEN_P(symbol) TSRMLS_CC);
+ }
+#else
+ zval **symbol;
+
+ if (zend_hash_find(props, "symbol", sizeof("symbol"), (void**) &symbol) == SUCCESS && Z_TYPE_PP(symbol) == IS_STRING) {
+ return php_phongo_symbol_init(intern, Z_STRVAL_PP(symbol), Z_STRLEN_PP(symbol) TSRMLS_CC);
+ }
+#endif
+
+ phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "%s initialization requires \"symbol\" string field", ZSTR_VAL(php_phongo_symbol_ce->name));
+ return false;
+} /* }}} */
+
+/* {{{ proto string MongoDB\BSON\Symbol::__toString()
+ Return the Symbol's symbol string. */
+static PHP_METHOD(Symbol, __toString)
+{
+ php_phongo_symbol_t *intern;
+
+ if (zend_parse_parameters_none() == FAILURE) {
+ return;
+ }
+
+ intern = Z_SYMBOL_OBJ_P(getThis());
+
+ PHONGO_RETURN_STRINGL(intern->symbol, intern->symbol_len);
+} /* }}} */
+
+/* {{{ proto array MongoDB\BSON\Symbol::jsonSerialize()
+*/
+static PHP_METHOD(Symbol, jsonSerialize)
+{
+ php_phongo_symbol_t *intern;
+
+ if (zend_parse_parameters_none() == FAILURE) {
+ return;
+ }
+
+ intern = Z_SYMBOL_OBJ_P(getThis());
+
+ array_init_size(return_value, 1);
+ ADD_ASSOC_STRINGL(return_value, "$symbol", intern->symbol, intern->symbol_len);
+} /* }}} */
+
+/* {{{ proto string MongoDB\BSON\Symbol::serialize()
+*/
+static PHP_METHOD(Symbol, serialize)
+{
+ php_phongo_symbol_t *intern;
+#if PHP_VERSION_ID >= 70000
+ zval retval;
+#else
+ zval *retval;
+#endif
+
+ php_serialize_data_t var_hash;
+ smart_str buf = { 0 };
+
+ intern = Z_SYMBOL_OBJ_P(getThis());
+
+ if (zend_parse_parameters_none() == FAILURE) {
+ return;
+ }
+
+#if PHP_VERSION_ID >= 70000
+ array_init_size(&retval, 1);
+ ADD_ASSOC_STRINGL(&retval, "symbol", intern->symbol, intern->symbol_len);
+#else
+ ALLOC_INIT_ZVAL(retval);
+ array_init_size(retval, 1);
+ ADD_ASSOC_STRINGL(retval, "symbol", intern->symbol, intern->symbol_len);
+#endif
+
+ PHP_VAR_SERIALIZE_INIT(var_hash);
+ php_var_serialize(&buf, &retval, &var_hash TSRMLS_CC);
+ smart_str_0(&buf);
+ PHP_VAR_SERIALIZE_DESTROY(var_hash);
+
+ PHONGO_RETVAL_SMART_STR(buf);
+
+ smart_str_free(&buf);
+ zval_ptr_dtor(&retval);
+} /* }}} */
+
+/* {{{ proto void MongoDB\BSON\Symbol::unserialize(string $serialized)
+*/
+static PHP_METHOD(Symbol, unserialize)
+{
+ php_phongo_symbol_t *intern;
+ zend_error_handling error_handling;
+ char *serialized;
+ phongo_zpp_char_len serialized_len;
+#if PHP_VERSION_ID >= 70000
+ zval props;
+#else
+ zval *props;
+#endif
+ php_unserialize_data_t var_hash;
+
+ intern = Z_SYMBOL_OBJ_P(getThis());
+
+ zend_replace_error_handling(EH_THROW, phongo_exception_from_phongo_domain(PHONGO_ERROR_INVALID_ARGUMENT), &error_handling TSRMLS_CC);
+
+ if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &serialized, &serialized_len) == FAILURE) {
+ zend_restore_error_handling(&error_handling TSRMLS_CC);
+ return;
+ }
+ zend_restore_error_handling(&error_handling TSRMLS_CC);
+
+#if PHP_VERSION_ID < 70000
+ ALLOC_INIT_ZVAL(props);
+#endif
+ PHP_VAR_UNSERIALIZE_INIT(var_hash);
+ if (!php_var_unserialize(&props, (const unsigned char**) &serialized, (unsigned char *) serialized + serialized_len, &var_hash TSRMLS_CC)) {
+ zval_ptr_dtor(&props);
+ phongo_throw_exception(PHONGO_ERROR_UNEXPECTED_VALUE TSRMLS_CC, "%s unserialization failed", ZSTR_VAL(php_phongo_symbol_ce->name));
+
+ PHP_VAR_UNSERIALIZE_DESTROY(var_hash);
+ return;
+ }
+ PHP_VAR_UNSERIALIZE_DESTROY(var_hash);
+
+#if PHP_VERSION_ID >= 70000
+ php_phongo_symbol_init_from_hash(intern, HASH_OF(&props) TSRMLS_CC);
+#else
+ php_phongo_symbol_init_from_hash(intern, HASH_OF(props) TSRMLS_CC);
+#endif
+ zval_ptr_dtor(&props);
+} /* }}} */
+
+/* {{{ MongoDB\BSON\Symbol function entries */
+ZEND_BEGIN_ARG_INFO_EX(ai_Symbol_unserialize, 0, 0, 1)
+ ZEND_ARG_INFO(0, serialized)
+ZEND_END_ARG_INFO()
+
+ZEND_BEGIN_ARG_INFO_EX(ai_Symbol_void, 0, 0, 0)
+ZEND_END_ARG_INFO()
+
+static zend_function_entry php_phongo_symbol_me[] = {
+ /* __set_state intentionally missing */
+ PHP_ME(Symbol, __toString, ai_Symbol_void, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
+ PHP_ME(Symbol, jsonSerialize, ai_Symbol_void, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
+ PHP_ME(Symbol, serialize, ai_Symbol_void, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
+ PHP_ME(Symbol, unserialize, ai_Symbol_unserialize, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
+ ZEND_NAMED_ME(__construct, PHP_FN(MongoDB_disabled___construct), ai_Symbol_void, ZEND_ACC_PRIVATE|ZEND_ACC_FINAL)
+ PHP_FE_END
+};
+/* }}} */
+
+/* {{{ MongoDB\BSON\Symbol object handlers */
+static zend_object_handlers php_phongo_handler_symbol;
+
+static void php_phongo_symbol_free_object(phongo_free_object_arg *object TSRMLS_DC) /* {{{ */
+{
+ php_phongo_symbol_t *intern = Z_OBJ_SYMBOL(object);
+
+ zend_object_std_dtor(&intern->std TSRMLS_CC);
+
+ if (intern->symbol) {
+ efree(intern->symbol);
+ }
+
+ if (intern->properties) {
+ zend_hash_destroy(intern->properties);
+ FREE_HASHTABLE(intern->properties);
+ }
+
+#if PHP_VERSION_ID < 70000
+ efree(intern);
+#endif
+} /* }}} */
+
+phongo_create_object_retval php_phongo_symbol_create_object(zend_class_entry *class_type TSRMLS_DC) /* {{{ */
+{
+ php_phongo_symbol_t *intern = NULL;
+
+ intern = PHONGO_ALLOC_OBJECT_T(php_phongo_symbol_t, class_type);
+ zend_object_std_init(&intern->std, class_type TSRMLS_CC);
+ object_properties_init(&intern->std, class_type);
+
+#if PHP_VERSION_ID >= 70000
+ intern->std.handlers = &php_phongo_handler_symbol;
+
+ return &intern->std;
+#else
+ {
+ zend_object_value retval;
+ retval.handle = zend_objects_store_put(intern, (zend_objects_store_dtor_t) zend_objects_destroy_object, php_phongo_symbol_free_object, NULL TSRMLS_CC);
+ retval.handlers = &php_phongo_handler_symbol;
+
+ return retval;
+ }
+#endif
+} /* }}} */
+
+static int php_phongo_symbol_compare_objects(zval *o1, zval *o2 TSRMLS_DC) /* {{{ */
+{
+ php_phongo_symbol_t *intern1, *intern2;
+
+ intern1 = Z_SYMBOL_OBJ_P(o1);
+ intern2 = Z_SYMBOL_OBJ_P(o2);
+
+ return strcmp(intern1->symbol, intern2->symbol);
+} /* }}} */
+
+static HashTable *php_phongo_symbol_get_gc(zval *object, phongo_get_gc_table table, int *n TSRMLS_DC) /* {{{ */
+{
+ *table = NULL;
+ *n = 0;
+
+ return Z_SYMBOL_OBJ_P(object)->properties;
+} /* }}} */
+
+HashTable *php_phongo_symbol_get_properties_hash(zval *object, bool is_debug TSRMLS_DC) /* {{{ */
+{
+ php_phongo_symbol_t *intern;
+ HashTable *props;
+
+ intern = Z_SYMBOL_OBJ_P(object);
+
+ PHONGO_GET_PROPERTY_HASH_INIT_PROPS(is_debug, intern, props, 2);
+
+ if (!intern->symbol) {
+ return props;
+ }
+
+#if PHP_VERSION_ID >= 70000
+ {
+ zval symbol;
+
+ ZVAL_STRING(&symbol, intern->symbol);
+ zend_hash_str_update(props, "symbol", sizeof("symbol")-1, &symbol);
+ }
+#else
+ {
+ zval *symbol;
+
+ MAKE_STD_ZVAL(symbol);
+ ZVAL_STRING(symbol, intern->symbol, 1);
+ zend_hash_update(props, "symbol", sizeof("symbol"), &symbol, sizeof(symbol), NULL);
+ }
+#endif
+
+ return props;
+} /* }}} */
+
+static HashTable *php_phongo_symbol_get_debug_info(zval *object, int *is_temp TSRMLS_DC) /* {{{ */
+{
+ *is_temp = 1;
+ return php_phongo_symbol_get_properties_hash(object, true TSRMLS_CC);
+} /* }}} */
+
+static HashTable *php_phongo_symbol_get_properties(zval *object TSRMLS_DC) /* {{{ */
+{
+ return php_phongo_symbol_get_properties_hash(object, false TSRMLS_CC);
+} /* }}} */
+/* }}} */
+
+void php_phongo_symbol_init_ce(INIT_FUNC_ARGS) /* {{{ */
+{
+ zend_class_entry ce;
+
+ INIT_NS_CLASS_ENTRY(ce, "MongoDB\\BSON", "Symbol", php_phongo_symbol_me);
+ php_phongo_symbol_ce = zend_register_internal_class(&ce TSRMLS_CC);
+ php_phongo_symbol_ce->create_object = php_phongo_symbol_create_object;
+ PHONGO_CE_FINAL(php_phongo_symbol_ce);
+
+ zend_class_implements(php_phongo_symbol_ce TSRMLS_CC, 1, php_phongo_json_serializable_ce);
+ zend_class_implements(php_phongo_symbol_ce TSRMLS_CC, 1, php_phongo_type_ce);
+ zend_class_implements(php_phongo_symbol_ce TSRMLS_CC, 1, zend_ce_serializable);
+
+ memcpy(&php_phongo_handler_symbol, phongo_get_std_object_handlers(), sizeof(zend_object_handlers));
+ php_phongo_handler_symbol.compare_objects = php_phongo_symbol_compare_objects;
+ php_phongo_handler_symbol.get_debug_info = php_phongo_symbol_get_debug_info;
+ php_phongo_handler_symbol.get_gc = php_phongo_symbol_get_gc;
+ php_phongo_handler_symbol.get_properties = php_phongo_symbol_get_properties;
+#if PHP_VERSION_ID >= 70000
+ php_phongo_handler_symbol.free_obj = php_phongo_symbol_free_object;
+ php_phongo_handler_symbol.offset = XtOffsetOf(php_phongo_symbol_t, std);
+#endif
+} /* }}} */
+
+/*
+ * Local variables:
+ * tab-width: 4
+ * c-basic-offset: 4
+ * End:
+ * vim600: noet sw=4 ts=4 fdm=marker
+ * vim<600: noet sw=4 ts=4
+ */
diff --git a/mongodb-1.3.4/src/BSON/Timestamp.c b/mongodb-1.4.2/src/BSON/Timestamp.c
similarity index 99%
rename from mongodb-1.3.4/src/BSON/Timestamp.c
rename to mongodb-1.4.2/src/BSON/Timestamp.c
index 2a010c37..34dc7b86 100644
--- a/mongodb-1.3.4/src/BSON/Timestamp.c
+++ b/mongodb-1.4.2/src/BSON/Timestamp.c
@@ -1,557 +1,557 @@
/*
* Copyright 2014-2017 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifdef HAVE_CONFIG_H
# include "config.h"
#endif
#include <php.h>
#include <Zend/zend_interfaces.h>
#include <ext/standard/php_var.h>
#if PHP_VERSION_ID >= 70000
# include <zend_smart_str.h>
#else
# include <ext/standard/php_smart_str.h>
#endif
#include "phongo_compat.h"
#include "php_phongo.h"
zend_class_entry *php_phongo_timestamp_ce;
/* Initialize the object and return whether it was successful. An exception will
* be thrown on error. */
static bool php_phongo_timestamp_init(php_phongo_timestamp_t *intern, int64_t increment, int64_t timestamp TSRMLS_DC) /* {{{ */
{
if (increment < 0 || increment > UINT32_MAX) {
phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected increment to be an unsigned 32-bit integer, %" PHONGO_LONG_FORMAT " given", increment);
return false;
}
if (timestamp < 0 || timestamp > UINT32_MAX) {
phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected timestamp to be an unsigned 32-bit integer, %" PHONGO_LONG_FORMAT " given", timestamp);
return false;
}
intern->increment = (uint32_t) increment;
intern->timestamp = (uint32_t) timestamp;
intern->initialized = true;
return true;
} /* }}} */
/* Initialize the object from numeric strings and return whether it was
* successful. An exception will be thrown on error. */
static bool php_phongo_timestamp_init_from_string(php_phongo_timestamp_t *intern, const char *s_increment, phongo_zpp_char_len s_increment_len, const char *s_timestamp, phongo_zpp_char_len s_timestamp_len TSRMLS_DC) /* {{{ */
{
int64_t increment, timestamp;
char *endptr = NULL;
errno = 0;
/* errno will set errno if conversion fails; however, we do not need to
* specify the type of error.
*
* Note: bson_ascii_strtoll() does not properly detect out-of-range values
* (see: CDRIVER-1377). strtoll() would be preferable, but it is not
* available on all platforms (e.g. HP-UX), and atoll() provides no error
* reporting at all. */
increment = bson_ascii_strtoll(s_increment, &endptr, 10);
if (errno || (endptr && endptr != ((const char *)s_increment + s_increment_len))) {
phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Error parsing \"%s\" as 64-bit integer increment for %s initialization", s_increment, ZSTR_VAL(php_phongo_timestamp_ce->name));
return false;
}
timestamp = bson_ascii_strtoll(s_timestamp, &endptr, 10);
if (errno || (endptr && endptr != ((const char *)s_timestamp + s_timestamp_len))) {
phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Error parsing \"%s\" as 64-bit integer timestamp for %s initialization", s_timestamp, ZSTR_VAL(php_phongo_timestamp_ce->name));
return false;
}
return php_phongo_timestamp_init(intern, increment, timestamp TSRMLS_CC);
} /* }}} */
/* Initialize the object from a HashTable and return whether it was successful.
* An exception will be thrown on error. */
static bool php_phongo_timestamp_init_from_hash(php_phongo_timestamp_t *intern, HashTable *props TSRMLS_DC) /* {{{ */
{
#if PHP_VERSION_ID >= 70000
zval *increment, *timestamp;
if ((increment = zend_hash_str_find(props, "increment", sizeof("increment")-1)) && Z_TYPE_P(increment) == IS_LONG &&
(timestamp = zend_hash_str_find(props, "timestamp", sizeof("timestamp")-1)) && Z_TYPE_P(timestamp) == IS_LONG) {
return php_phongo_timestamp_init(intern, Z_LVAL_P(increment), Z_LVAL_P(timestamp) TSRMLS_CC);
}
if ((increment = zend_hash_str_find(props, "increment", sizeof("increment")-1)) && Z_TYPE_P(increment) == IS_STRING &&
(timestamp = zend_hash_str_find(props, "timestamp", sizeof("timestamp")-1)) && Z_TYPE_P(timestamp) == IS_STRING) {
return php_phongo_timestamp_init_from_string(intern, Z_STRVAL_P(increment), Z_STRLEN_P(increment), Z_STRVAL_P(timestamp), Z_STRLEN_P(timestamp) TSRMLS_CC);
}
#else
zval **increment, **timestamp;
if (zend_hash_find(props, "increment", sizeof("increment"), (void**) &increment) == SUCCESS && Z_TYPE_PP(increment) == IS_LONG &&
zend_hash_find(props, "timestamp", sizeof("timestamp"), (void**) &timestamp) == SUCCESS && Z_TYPE_PP(timestamp) == IS_LONG) {
return php_phongo_timestamp_init(intern, Z_LVAL_PP(increment), Z_LVAL_PP(timestamp) TSRMLS_CC);
}
if (zend_hash_find(props, "increment", sizeof("increment"), (void**) &increment) == SUCCESS && Z_TYPE_PP(increment) == IS_STRING &&
zend_hash_find(props, "timestamp", sizeof("timestamp"), (void**) &timestamp) == SUCCESS && Z_TYPE_PP(timestamp) == IS_STRING) {
return php_phongo_timestamp_init_from_string(intern, Z_STRVAL_PP(increment), Z_STRLEN_PP(increment), Z_STRVAL_PP(timestamp), Z_STRLEN_PP(timestamp) TSRMLS_CC);
}
#endif
phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "%s initialization requires \"increment\" and \"timestamp\" integer or numeric string fields", ZSTR_VAL(php_phongo_timestamp_ce->name));
return false;
} /* }}} */
/* {{{ proto void MongoDB\BSON\Timestamp::__construct(int|string $increment, int|string $timestamp)
Construct a new BSON timestamp type, which consists of a 4-byte increment and
4-byte timestamp. */
static PHP_METHOD(Timestamp, __construct)
{
php_phongo_timestamp_t *intern;
zend_error_handling error_handling;
zval *increment = NULL, *timestamp = NULL;
zend_replace_error_handling(EH_THROW, phongo_exception_from_phongo_domain(PHONGO_ERROR_INVALID_ARGUMENT), &error_handling TSRMLS_CC);
intern = Z_TIMESTAMP_OBJ_P(getThis());
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "zz", &increment, &timestamp) == FAILURE) {
zend_restore_error_handling(&error_handling TSRMLS_CC);
return;
}
zend_restore_error_handling(&error_handling TSRMLS_CC);
if (Z_TYPE_P(increment) == IS_LONG && Z_TYPE_P(timestamp) == IS_LONG) {
php_phongo_timestamp_init(intern, Z_LVAL_P(increment), Z_LVAL_P(timestamp) TSRMLS_CC);
return;
}
if (Z_TYPE_P(increment) == IS_LONG) {
convert_to_string(increment);
}
if (Z_TYPE_P(increment) != IS_STRING) {
- phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected increment to be an unsigned 32-bit integer or string, %s given", zend_get_type_by_const(Z_TYPE_P(increment)));
+ phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected increment to be an unsigned 32-bit integer or string, %s given", PHONGO_ZVAL_CLASS_OR_TYPE_NAME_P(increment));
return;
}
if (Z_TYPE_P(timestamp) == IS_LONG) {
convert_to_string(timestamp);
}
if (Z_TYPE_P(timestamp) != IS_STRING) {
- phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected timestamp to be an unsigned 32-bit integer or string, %s given", zend_get_type_by_const(Z_TYPE_P(timestamp)));
+ phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected timestamp to be an unsigned 32-bit integer or string, %s given", PHONGO_ZVAL_CLASS_OR_TYPE_NAME_P(timestamp));
return;
}
php_phongo_timestamp_init_from_string(intern, Z_STRVAL_P(increment), Z_STRLEN_P(increment), Z_STRVAL_P(timestamp), Z_STRLEN_P(timestamp) TSRMLS_CC);
} /* }}} */
/* {{{ proto integer MongoDB\BSON\Timestamp::getIncrement()
*/
static PHP_METHOD(Timestamp, getIncrement)
{
php_phongo_timestamp_t *intern;
intern = Z_TIMESTAMP_OBJ_P(getThis());
if (zend_parse_parameters_none() == FAILURE) {
return;
}
RETVAL_LONG(intern->increment);
} /* }}} */
/* {{{ proto integer MongoDB\BSON\Timestamp::getTimestamp()
*/
static PHP_METHOD(Timestamp, getTimestamp)
{
php_phongo_timestamp_t *intern;
intern = Z_TIMESTAMP_OBJ_P(getThis());
if (zend_parse_parameters_none() == FAILURE) {
return;
}
RETVAL_LONG(intern->timestamp);
} /* }}} */
/* {{{ proto void MongoDB\BSON\Timestamp::__set_state(array $properties)
*/
static PHP_METHOD(Timestamp, __set_state)
{
php_phongo_timestamp_t *intern;
HashTable *props;
zval *array;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "a", &array) == FAILURE) {
RETURN_FALSE;
}
object_init_ex(return_value, php_phongo_timestamp_ce);
intern = Z_TIMESTAMP_OBJ_P(return_value);
props = Z_ARRVAL_P(array);
php_phongo_timestamp_init_from_hash(intern, props TSRMLS_CC);
} /* }}} */
/* {{{ proto string MongoDB\BSON\Timestamp::__toString()
Returns a string in the form: [increment:timestamp] */
static PHP_METHOD(Timestamp, __toString)
{
php_phongo_timestamp_t *intern;
char *retval;
int retval_len;
intern = Z_TIMESTAMP_OBJ_P(getThis());
if (zend_parse_parameters_none() == FAILURE) {
return;
}
retval_len = spprintf(&retval, 0, "[%" PRIu32 ":%" PRIu32 "]", intern->increment, intern->timestamp);
PHONGO_RETVAL_STRINGL(retval, retval_len);
efree(retval);
} /* }}} */
/* {{{ proto array MongoDB\BSON\Timestamp::jsonSerialize()
*/
static PHP_METHOD(Timestamp, jsonSerialize)
{
php_phongo_timestamp_t *intern;
if (zend_parse_parameters_none() == FAILURE) {
return;
}
intern = Z_TIMESTAMP_OBJ_P(getThis());
array_init_size(return_value, 1);
#if PHP_VERSION_ID >= 70000
{
zval ts;
array_init_size(&ts, 2);
ADD_ASSOC_LONG_EX(&ts, "t", intern->timestamp);
ADD_ASSOC_LONG_EX(&ts, "i", intern->increment);
ADD_ASSOC_ZVAL_EX(return_value, "$timestamp", &ts);
}
#else
{
zval *ts;
MAKE_STD_ZVAL(ts);
array_init_size(ts, 2);
ADD_ASSOC_LONG_EX(ts, "t", intern->timestamp);
ADD_ASSOC_LONG_EX(ts, "i", intern->increment);
ADD_ASSOC_ZVAL_EX(return_value, "$timestamp", ts);
}
#endif
} /* }}} */
/* {{{ proto string MongoDB\BSON\Timestamp::serialize()
*/
static PHP_METHOD(Timestamp, serialize)
{
php_phongo_timestamp_t *intern;
#if PHP_VERSION_ID >= 70000
zval retval;
#else
zval *retval;
#endif
php_serialize_data_t var_hash;
smart_str buf = { 0 };
char s_increment[12];
char s_timestamp[12];
int s_increment_len;
int s_timestamp_len;
intern = Z_TIMESTAMP_OBJ_P(getThis());
if (zend_parse_parameters_none() == FAILURE) {
return;
}
s_increment_len = snprintf(s_increment, sizeof(s_increment), "%" PRIu32, intern->increment);
s_timestamp_len = snprintf(s_timestamp, sizeof(s_timestamp), "%" PRIu32, intern->timestamp);
#if PHP_VERSION_ID >= 70000
array_init_size(&retval, 2);
ADD_ASSOC_STRINGL(&retval, "increment", s_increment, s_increment_len);
ADD_ASSOC_STRINGL(&retval, "timestamp", s_timestamp, s_timestamp_len);
#else
ALLOC_INIT_ZVAL(retval);
array_init_size(retval, 2);
ADD_ASSOC_STRINGL(retval, "increment", s_increment, s_increment_len);
ADD_ASSOC_STRINGL(retval, "timestamp", s_timestamp, s_timestamp_len);
#endif
PHP_VAR_SERIALIZE_INIT(var_hash);
php_var_serialize(&buf, &retval, &var_hash TSRMLS_CC);
smart_str_0(&buf);
PHP_VAR_SERIALIZE_DESTROY(var_hash);
PHONGO_RETVAL_SMART_STR(buf);
smart_str_free(&buf);
zval_ptr_dtor(&retval);
} /* }}} */
/* {{{ proto void MongoDB\BSON\Timestamp::unserialize(string $serialized)
*/
static PHP_METHOD(Timestamp, unserialize)
{
php_phongo_timestamp_t *intern;
zend_error_handling error_handling;
char *serialized;
phongo_zpp_char_len serialized_len;
#if PHP_VERSION_ID >= 70000
zval props;
#else
zval *props;
#endif
php_unserialize_data_t var_hash;
intern = Z_TIMESTAMP_OBJ_P(getThis());
zend_replace_error_handling(EH_THROW, phongo_exception_from_phongo_domain(PHONGO_ERROR_INVALID_ARGUMENT), &error_handling TSRMLS_CC);
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &serialized, &serialized_len) == FAILURE) {
zend_restore_error_handling(&error_handling TSRMLS_CC);
return;
}
zend_restore_error_handling(&error_handling TSRMLS_CC);
#if PHP_VERSION_ID < 70000
ALLOC_INIT_ZVAL(props);
#endif
PHP_VAR_UNSERIALIZE_INIT(var_hash);
if (!php_var_unserialize(&props, (const unsigned char**) &serialized, (unsigned char *) serialized + serialized_len, &var_hash TSRMLS_CC)) {
zval_ptr_dtor(&props);
phongo_throw_exception(PHONGO_ERROR_UNEXPECTED_VALUE TSRMLS_CC, "%s unserialization failed", ZSTR_VAL(php_phongo_timestamp_ce->name));
PHP_VAR_UNSERIALIZE_DESTROY(var_hash);
return;
}
PHP_VAR_UNSERIALIZE_DESTROY(var_hash);
#if PHP_VERSION_ID >= 70000
php_phongo_timestamp_init_from_hash(intern, HASH_OF(&props) TSRMLS_CC);
#else
php_phongo_timestamp_init_from_hash(intern, HASH_OF(props) TSRMLS_CC);
#endif
zval_ptr_dtor(&props);
} /* }}} */
/* {{{ MongoDB\BSON\Timestamp function entries */
ZEND_BEGIN_ARG_INFO_EX(ai_Timestamp___construct, 0, 0, 2)
ZEND_ARG_INFO(0, increment)
ZEND_ARG_INFO(0, timestamp)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(ai_Timestamp___set_state, 0, 0, 1)
ZEND_ARG_ARRAY_INFO(0, properties, 0)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(ai_Timestamp_unserialize, 0, 0, 1)
ZEND_ARG_INFO(0, serialized)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(ai_Timestamp_void, 0, 0, 0)
ZEND_END_ARG_INFO()
static zend_function_entry php_phongo_timestamp_me[] = {
PHP_ME(Timestamp, __construct, ai_Timestamp___construct, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_ME(Timestamp, __set_state, ai_Timestamp___set_state, ZEND_ACC_PUBLIC|ZEND_ACC_STATIC)
PHP_ME(Timestamp, __toString, ai_Timestamp_void, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_ME(Timestamp, jsonSerialize, ai_Timestamp_void, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_ME(Timestamp, serialize, ai_Timestamp_void, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_ME(Timestamp, unserialize, ai_Timestamp_unserialize, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_ME(Timestamp, getIncrement, ai_Timestamp_void, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_ME(Timestamp, getTimestamp, ai_Timestamp_void, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_FE_END
};
/* }}} */
/* {{{ MongoDB\BSON\Timestamp object handlers */
static zend_object_handlers php_phongo_handler_timestamp;
static void php_phongo_timestamp_free_object(phongo_free_object_arg *object TSRMLS_DC) /* {{{ */
{
php_phongo_timestamp_t *intern = Z_OBJ_TIMESTAMP(object);
zend_object_std_dtor(&intern->std TSRMLS_CC);
if (intern->properties) {
zend_hash_destroy(intern->properties);
FREE_HASHTABLE(intern->properties);
}
#if PHP_VERSION_ID < 70000
efree(intern);
#endif
} /* }}} */
static phongo_create_object_retval php_phongo_timestamp_create_object(zend_class_entry *class_type TSRMLS_DC) /* {{{ */
{
php_phongo_timestamp_t *intern = NULL;
intern = PHONGO_ALLOC_OBJECT_T(php_phongo_timestamp_t, class_type);
zend_object_std_init(&intern->std, class_type TSRMLS_CC);
object_properties_init(&intern->std, class_type);
#if PHP_VERSION_ID >= 70000
intern->std.handlers = &php_phongo_handler_timestamp;
return &intern->std;
#else
{
zend_object_value retval;
retval.handle = zend_objects_store_put(intern, (zend_objects_store_dtor_t) zend_objects_destroy_object, php_phongo_timestamp_free_object, NULL TSRMLS_CC);
retval.handlers = &php_phongo_handler_timestamp;
return retval;
}
#endif
} /* }}} */
static int php_phongo_timestamp_compare_objects(zval *o1, zval *o2 TSRMLS_DC) /* {{{ */
{
php_phongo_timestamp_t *intern1, *intern2;
intern1 = Z_TIMESTAMP_OBJ_P(o1);
intern2 = Z_TIMESTAMP_OBJ_P(o2);
/* MongoDB compares the timestamp before the increment. */
if (intern1->timestamp != intern2->timestamp) {
return intern1->timestamp < intern2->timestamp ? -1 : 1;
}
if (intern1->increment != intern2->increment) {
return intern1->increment < intern2->increment ? -1 : 1;
}
return 0;
} /* }}} */
static HashTable *php_phongo_timestamp_get_gc(zval *object, phongo_get_gc_table table, int *n TSRMLS_DC) /* {{{ */
{
*table = NULL;
*n = 0;
return Z_TIMESTAMP_OBJ_P(object)->properties;
} /* }}} */
static HashTable *php_phongo_timestamp_get_properties_hash(zval *object, bool is_debug TSRMLS_DC) /* {{{ */
{
php_phongo_timestamp_t *intern;
HashTable *props;
char s_increment[24];
char s_timestamp[24];
int s_increment_len;
int s_timestamp_len;
intern = Z_TIMESTAMP_OBJ_P(object);
PHONGO_GET_PROPERTY_HASH_INIT_PROPS(is_debug, intern, props, 2);
if (!intern->initialized) {
return props;
}
s_increment_len = snprintf(s_increment, sizeof(s_increment), "%" PRIu32, intern->increment);
s_timestamp_len = snprintf(s_timestamp, sizeof(s_timestamp), "%" PRIu32, intern->timestamp);
#if PHP_VERSION_ID >= 70000
{
zval increment, timestamp;
ZVAL_STRINGL(&increment, s_increment, s_increment_len);
zend_hash_str_update(props, "increment", sizeof("increment")-1, &increment);
ZVAL_STRINGL(&timestamp, s_timestamp, s_timestamp_len);
zend_hash_str_update(props, "timestamp", sizeof("timestamp")-1, &timestamp);
}
#else
{
zval *increment, *timestamp;
MAKE_STD_ZVAL(increment);
ZVAL_STRINGL(increment, s_increment, s_increment_len, 1);
zend_hash_update(props, "increment", sizeof("increment"), &increment, sizeof(increment), NULL);
MAKE_STD_ZVAL(timestamp);
ZVAL_STRINGL(timestamp, s_timestamp, s_timestamp_len, 1);
zend_hash_update(props, "timestamp", sizeof("timestamp"), &timestamp, sizeof(timestamp), NULL);
}
#endif
return props;
} /* }}} */
static HashTable *php_phongo_timestamp_get_debug_info(zval *object, int *is_temp TSRMLS_DC) /* {{{ */
{
*is_temp = 1;
return php_phongo_timestamp_get_properties_hash(object, true TSRMLS_CC);
} /* }}} */
static HashTable *php_phongo_timestamp_get_properties(zval *object TSRMLS_DC) /* {{{ */
{
return php_phongo_timestamp_get_properties_hash(object, false TSRMLS_CC);
} /* }}} */
/* }}} */
void php_phongo_timestamp_init_ce(INIT_FUNC_ARGS) /* {{{ */
{
zend_class_entry ce;
INIT_NS_CLASS_ENTRY(ce, "MongoDB\\BSON", "Timestamp", php_phongo_timestamp_me);
php_phongo_timestamp_ce = zend_register_internal_class(&ce TSRMLS_CC);
php_phongo_timestamp_ce->create_object = php_phongo_timestamp_create_object;
PHONGO_CE_FINAL(php_phongo_timestamp_ce);
zend_class_implements(php_phongo_timestamp_ce TSRMLS_CC, 1, php_phongo_timestamp_interface_ce);
zend_class_implements(php_phongo_timestamp_ce TSRMLS_CC, 1, php_phongo_json_serializable_ce);
zend_class_implements(php_phongo_timestamp_ce TSRMLS_CC, 1, php_phongo_type_ce);
zend_class_implements(php_phongo_timestamp_ce TSRMLS_CC, 1, zend_ce_serializable);
memcpy(&php_phongo_handler_timestamp, phongo_get_std_object_handlers(), sizeof(zend_object_handlers));
php_phongo_handler_timestamp.compare_objects = php_phongo_timestamp_compare_objects;
php_phongo_handler_timestamp.get_debug_info = php_phongo_timestamp_get_debug_info;
php_phongo_handler_timestamp.get_gc = php_phongo_timestamp_get_gc;
php_phongo_handler_timestamp.get_properties = php_phongo_timestamp_get_properties;
#if PHP_VERSION_ID >= 70000
php_phongo_handler_timestamp.free_obj = php_phongo_timestamp_free_object;
php_phongo_handler_timestamp.offset = XtOffsetOf(php_phongo_timestamp_t, std);
#endif
} /* }}} */
/*
* Local variables:
* tab-width: 4
* c-basic-offset: 4
* End:
* vim600: noet sw=4 ts=4 fdm=marker
* vim<600: noet sw=4 ts=4
*/
diff --git a/mongodb-1.3.4/src/BSON/TimestampInterface.c b/mongodb-1.4.2/src/BSON/TimestampInterface.c
similarity index 100%
rename from mongodb-1.3.4/src/BSON/TimestampInterface.c
rename to mongodb-1.4.2/src/BSON/TimestampInterface.c
diff --git a/mongodb-1.3.4/src/BSON/Type.c b/mongodb-1.4.2/src/BSON/Type.c
similarity index 100%
rename from mongodb-1.3.4/src/BSON/Type.c
rename to mongodb-1.4.2/src/BSON/Type.c
diff --git a/mongodb-1.3.4/src/BSON/UTCDateTime.c b/mongodb-1.4.2/src/BSON/UTCDateTime.c
similarity index 99%
rename from mongodb-1.3.4/src/BSON/UTCDateTime.c
rename to mongodb-1.4.2/src/BSON/UTCDateTime.c
index 987a758e..278c2126 100644
--- a/mongodb-1.3.4/src/BSON/UTCDateTime.c
+++ b/mongodb-1.4.2/src/BSON/UTCDateTime.c
@@ -1,572 +1,572 @@
/*
* Copyright 2014-2017 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifdef HAVE_CONFIG_H
# include "config.h"
#endif
#include <math.h>
#include <php.h>
#include <Zend/zend_interfaces.h>
#include <ext/date/php_date.h>
#include <ext/standard/php_var.h>
#if PHP_VERSION_ID >= 70000
# include <zend_smart_str.h>
#else
# include <ext/standard/php_smart_str.h>
#endif
#ifdef PHP_WIN32
# include "win32/time.h"
#endif
#include "phongo_compat.h"
#include "php_phongo.h"
zend_class_entry *php_phongo_utcdatetime_ce;
/* Initialize the object and return whether it was successful. */
static bool php_phongo_utcdatetime_init(php_phongo_utcdatetime_t *intern, int64_t milliseconds) /* {{{ */
{
intern->milliseconds = milliseconds;
intern->initialized = true;
return true;
} /* }}} */
/* Initialize the object from a numeric string and return whether it was
* successful. An exception will be thrown on error. */
static bool php_phongo_utcdatetime_init_from_string(php_phongo_utcdatetime_t *intern, const char *s_milliseconds, phongo_zpp_char_len s_milliseconds_len TSRMLS_DC) /* {{{ */
{
int64_t milliseconds;
char *endptr = NULL;
errno = 0;
milliseconds = bson_ascii_strtoll(s_milliseconds, &endptr, 10);
/* errno will set errno if conversion fails; however, we do not need to
* specify the type of error.
*
* Note: bson_ascii_strtoll() does not properly detect out-of-range values
* (see: CDRIVER-1377). strtoll() would be preferable, but it is not
* available on all platforms (e.g. HP-UX), and atoll() provides no error
* reporting at all. */
if (errno || (endptr && endptr != ((const char *)s_milliseconds + s_milliseconds_len))) {
phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Error parsing \"%s\" as 64-bit integer for %s initialization", s_milliseconds, ZSTR_VAL(php_phongo_utcdatetime_ce->name));
return false;
}
return php_phongo_utcdatetime_init(intern, milliseconds);
} /* }}} */
/* Initialize the object from a HashTable and return whether it was successful.
* An exception will be thrown on error. */
static bool php_phongo_utcdatetime_init_from_hash(php_phongo_utcdatetime_t *intern, HashTable *props TSRMLS_DC) /* {{{ */
{
#if PHP_VERSION_ID >= 70000
zval *milliseconds;
if ((milliseconds = zend_hash_str_find(props, "milliseconds", sizeof("milliseconds")-1)) && Z_TYPE_P(milliseconds) == IS_LONG) {
return php_phongo_utcdatetime_init(intern, Z_LVAL_P(milliseconds));
}
if ((milliseconds = zend_hash_str_find(props, "milliseconds", sizeof("milliseconds")-1)) && Z_TYPE_P(milliseconds) == IS_STRING) {
return php_phongo_utcdatetime_init_from_string(intern, Z_STRVAL_P(milliseconds), Z_STRLEN_P(milliseconds) TSRMLS_CC);
}
#else
zval **milliseconds;
if (zend_hash_find(props, "milliseconds", sizeof("milliseconds"), (void**) &milliseconds) == SUCCESS && Z_TYPE_PP(milliseconds) == IS_LONG) {
return php_phongo_utcdatetime_init(intern, Z_LVAL_PP(milliseconds));
}
if (zend_hash_find(props, "milliseconds", sizeof("milliseconds"), (void**) &milliseconds) == SUCCESS && Z_TYPE_PP(milliseconds) == IS_STRING) {
return php_phongo_utcdatetime_init_from_string(intern, Z_STRVAL_PP(milliseconds), Z_STRLEN_PP(milliseconds) TSRMLS_CC);
}
#endif
phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "%s initialization requires \"milliseconds\" integer or numeric string field", ZSTR_VAL(php_phongo_utcdatetime_ce->name));
return false;
} /* }}} */
/* Initialize the object from the current time and return whether it was
* successful. */
static bool php_phongo_utcdatetime_init_from_current_time(php_phongo_utcdatetime_t *intern) /* {{{ */
{
int64_t sec, usec;
struct timeval cur_time;
gettimeofday(&cur_time, NULL);
sec = cur_time.tv_sec;
usec = cur_time.tv_usec;
intern->milliseconds = (sec * 1000) + (usec / 1000);
intern->initialized = true;
return true;
} /* }}} */
/* Initialize the object from a DateTime object and return whether it was
* successful. */
static bool php_phongo_utcdatetime_init_from_date(php_phongo_utcdatetime_t *intern, php_date_obj *datetime_obj) /* {{{ */
{
int64_t sec, usec;
/* The following assignments use the same logic as date_format() in php_date.c */
sec = datetime_obj->time->sse;
#if PHP_VERSION_ID >= 70200
usec = (int64_t) floor(datetime_obj->time->us);
#else
usec = (int64_t) floor(datetime_obj->time->f * 1000000 + 0.5);
#endif
intern->milliseconds = (sec * 1000) + (usec / 1000);
intern->initialized = true;
return true;
} /* }}} */
/* {{{ proto void MongoDB\BSON\UTCDateTime::__construct([int|float|string|DateTimeInterface $milliseconds = null])
Construct a new BSON UTCDateTime type from either the current time,
milliseconds since the epoch, or a DateTimeInterface object. Defaults to the
current time. */
static PHP_METHOD(UTCDateTime, __construct)
{
php_phongo_utcdatetime_t *intern;
zend_error_handling error_handling;
zval *milliseconds = NULL;
zend_replace_error_handling(EH_THROW, phongo_exception_from_phongo_domain(PHONGO_ERROR_INVALID_ARGUMENT), &error_handling TSRMLS_CC);
intern = Z_UTCDATETIME_OBJ_P(getThis());
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|z!", &milliseconds) == FAILURE) {
zend_restore_error_handling(&error_handling TSRMLS_CC);
return;
}
zend_restore_error_handling(&error_handling TSRMLS_CC);
if (milliseconds == NULL) {
php_phongo_utcdatetime_init_from_current_time(intern);
return;
}
if (Z_TYPE_P(milliseconds) == IS_OBJECT) {
if (instanceof_function(Z_OBJCE_P(milliseconds), php_date_get_date_ce() TSRMLS_CC) ||
(php_phongo_date_immutable_ce && instanceof_function(Z_OBJCE_P(milliseconds), php_phongo_date_immutable_ce TSRMLS_CC))) {
php_phongo_utcdatetime_init_from_date(intern, Z_PHPDATE_P(milliseconds));
} else {
phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected instance of DateTimeInterface, %s given", ZSTR_VAL(Z_OBJCE_P(milliseconds)->name));
}
return;
}
if (Z_TYPE_P(milliseconds) == IS_LONG) {
php_phongo_utcdatetime_init(intern, Z_LVAL_P(milliseconds));
return;
}
if (Z_TYPE_P(milliseconds) == IS_DOUBLE) {
char tmp[24];
int tmp_len;
tmp_len = snprintf(tmp, sizeof(tmp), "%.0f", Z_DVAL_P(milliseconds) > 0 ? floor(Z_DVAL_P(milliseconds)) : ceil(Z_DVAL_P(milliseconds)));
php_phongo_utcdatetime_init_from_string(intern, tmp, tmp_len TSRMLS_CC);
return;
}
if (Z_TYPE_P(milliseconds) != IS_STRING) {
- phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected integer or string, %s given", zend_get_type_by_const(Z_TYPE_P(milliseconds)));
+ phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected integer or string, %s given", PHONGO_ZVAL_CLASS_OR_TYPE_NAME_P(milliseconds));
return;
}
php_phongo_utcdatetime_init_from_string(intern, Z_STRVAL_P(milliseconds), Z_STRLEN_P(milliseconds) TSRMLS_CC);
} /* }}} */
/* {{{ proto void MongoDB\BSON\UTCDateTime::__set_state(array $properties)
*/
static PHP_METHOD(UTCDateTime, __set_state)
{
php_phongo_utcdatetime_t *intern;
HashTable *props;
zval *array;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "a", &array) == FAILURE) {
RETURN_FALSE;
}
object_init_ex(return_value, php_phongo_utcdatetime_ce);
intern = Z_UTCDATETIME_OBJ_P(return_value);
props = Z_ARRVAL_P(array);
php_phongo_utcdatetime_init_from_hash(intern, props TSRMLS_CC);
} /* }}} */
/* {{{ proto string MongoDB\BSON\UTCDateTime::__toString()
Returns the UTCDateTime's milliseconds as a string */
static PHP_METHOD(UTCDateTime, __toString)
{
php_phongo_utcdatetime_t *intern;
char *tmp;
int tmp_len;
intern = Z_UTCDATETIME_OBJ_P(getThis());
if (zend_parse_parameters_none() == FAILURE) {
return;
}
tmp_len = spprintf(&tmp, 0, "%" PRId64, intern->milliseconds);
PHONGO_RETVAL_STRINGL(tmp, tmp_len);
efree(tmp);
} /* }}} */
/* {{{ proto DateTime MongoDB\BSON\UTCDateTime::toDateTime()
Returns a DateTime object representing this UTCDateTime */
static PHP_METHOD(UTCDateTime, toDateTime)
{
php_phongo_utcdatetime_t *intern;
php_date_obj *datetime_obj;
char *sec;
size_t sec_len;
intern = Z_UTCDATETIME_OBJ_P(getThis());
if (zend_parse_parameters_none() == FAILURE) {
return;
}
object_init_ex(return_value, php_date_get_date_ce());
datetime_obj = Z_PHPDATE_P(return_value);
sec_len = spprintf(&sec, 0, "@%" PRId64, intern->milliseconds / 1000);
php_date_initialize(datetime_obj, sec, sec_len, NULL, NULL, 0 TSRMLS_CC);
efree(sec);
#if PHP_VERSION_ID >= 70200
datetime_obj->time->us = (intern->milliseconds % 1000) * 1000;
#else
datetime_obj->time->f = (double) (intern->milliseconds % 1000) / 1000;
#endif
}
/* }}} */
/* {{{ proto array MongoDB\BSON\UTCDateTime::jsonSerialize()
*/
static PHP_METHOD(UTCDateTime, jsonSerialize)
{
php_phongo_utcdatetime_t *intern;
char s_milliseconds[24];
int s_milliseconds_len;
if (zend_parse_parameters_none() == FAILURE) {
return;
}
intern = Z_UTCDATETIME_OBJ_P(getThis());
s_milliseconds_len = snprintf(s_milliseconds, sizeof(s_milliseconds), "%" PRId64, intern->milliseconds);
array_init_size(return_value, 1);
#if PHP_VERSION_ID >= 70000
{
zval udt;
array_init_size(&udt, 1);
ADD_ASSOC_STRINGL(&udt, "$numberLong", s_milliseconds, s_milliseconds_len);
ADD_ASSOC_ZVAL_EX(return_value, "$date", &udt);
}
#else
{
zval *udt;
MAKE_STD_ZVAL(udt);
array_init_size(udt, 1);
ADD_ASSOC_STRINGL(udt, "$numberLong", s_milliseconds, s_milliseconds_len);
ADD_ASSOC_ZVAL_EX(return_value, "$date", udt);
}
#endif
} /* }}} */
/* {{{ proto string MongoDB\BSON\UTCDateTime::serialize()
*/
static PHP_METHOD(UTCDateTime, serialize)
{
php_phongo_utcdatetime_t *intern;
#if PHP_VERSION_ID >= 70000
zval retval;
#else
zval *retval;
#endif
php_serialize_data_t var_hash;
smart_str buf = { 0 };
char s_milliseconds[24];
int s_milliseconds_len;
intern = Z_UTCDATETIME_OBJ_P(getThis());
if (zend_parse_parameters_none() == FAILURE) {
return;
}
s_milliseconds_len = snprintf(s_milliseconds, sizeof(s_milliseconds), "%" PRId64, intern->milliseconds);
#if PHP_VERSION_ID >= 70000
array_init_size(&retval, 2);
ADD_ASSOC_STRINGL(&retval, "milliseconds", s_milliseconds, s_milliseconds_len);
#else
ALLOC_INIT_ZVAL(retval);
array_init_size(retval, 2);
ADD_ASSOC_STRINGL(retval, "milliseconds", s_milliseconds, s_milliseconds_len);
#endif
PHP_VAR_SERIALIZE_INIT(var_hash);
php_var_serialize(&buf, &retval, &var_hash TSRMLS_CC);
smart_str_0(&buf);
PHP_VAR_SERIALIZE_DESTROY(var_hash);
PHONGO_RETVAL_SMART_STR(buf);
smart_str_free(&buf);
zval_ptr_dtor(&retval);
} /* }}} */
/* {{{ proto void MongoDB\BSON\UTCDateTime::unserialize(string $serialized)
*/
static PHP_METHOD(UTCDateTime, unserialize)
{
php_phongo_utcdatetime_t *intern;
zend_error_handling error_handling;
char *serialized;
phongo_zpp_char_len serialized_len;
#if PHP_VERSION_ID >= 70000
zval props;
#else
zval *props;
#endif
php_unserialize_data_t var_hash;
intern = Z_UTCDATETIME_OBJ_P(getThis());
zend_replace_error_handling(EH_THROW, phongo_exception_from_phongo_domain(PHONGO_ERROR_INVALID_ARGUMENT), &error_handling TSRMLS_CC);
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &serialized, &serialized_len) == FAILURE) {
zend_restore_error_handling(&error_handling TSRMLS_CC);
return;
}
zend_restore_error_handling(&error_handling TSRMLS_CC);
#if PHP_VERSION_ID < 70000
ALLOC_INIT_ZVAL(props);
#endif
PHP_VAR_UNSERIALIZE_INIT(var_hash);
if (!php_var_unserialize(&props, (const unsigned char**) &serialized, (unsigned char *) serialized + serialized_len, &var_hash TSRMLS_CC)) {
zval_ptr_dtor(&props);
phongo_throw_exception(PHONGO_ERROR_UNEXPECTED_VALUE TSRMLS_CC, "%s unserialization failed", ZSTR_VAL(php_phongo_utcdatetime_ce->name));
PHP_VAR_UNSERIALIZE_DESTROY(var_hash);
return;
}
PHP_VAR_UNSERIALIZE_DESTROY(var_hash);
#if PHP_VERSION_ID >= 70000
php_phongo_utcdatetime_init_from_hash(intern, HASH_OF(&props) TSRMLS_CC);
#else
php_phongo_utcdatetime_init_from_hash(intern, HASH_OF(props) TSRMLS_CC);
#endif
zval_ptr_dtor(&props);
} /* }}} */
/* {{{ MongoDB\BSON\UTCDateTime function entries */
ZEND_BEGIN_ARG_INFO_EX(ai_UTCDateTime___construct, 0, 0, 0)
ZEND_ARG_INFO(0, milliseconds)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(ai_UTCDateTime___set_state, 0, 0, 1)
ZEND_ARG_ARRAY_INFO(0, properties, 0)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(ai_UTCDateTime_unserialize, 0, 0, 1)
ZEND_ARG_INFO(0, serialized)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(ai_UTCDateTime_void, 0, 0, 0)
ZEND_END_ARG_INFO()
static zend_function_entry php_phongo_utcdatetime_me[] = {
PHP_ME(UTCDateTime, __construct, ai_UTCDateTime___construct, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_ME(UTCDateTime, __set_state, ai_UTCDateTime___set_state, ZEND_ACC_PUBLIC|ZEND_ACC_STATIC)
PHP_ME(UTCDateTime, __toString, ai_UTCDateTime_void, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_ME(UTCDateTime, jsonSerialize, ai_UTCDateTime_void, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_ME(UTCDateTime, serialize, ai_UTCDateTime_void, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_ME(UTCDateTime, unserialize, ai_UTCDateTime_unserialize, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_ME(UTCDateTime, toDateTime, ai_UTCDateTime_void, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_FE_END
};
/* }}} */
/* {{{ MongoDB\BSON\UTCDateTime object handlers */
static zend_object_handlers php_phongo_handler_utcdatetime;
static void php_phongo_utcdatetime_free_object(phongo_free_object_arg *object TSRMLS_DC) /* {{{ */
{
php_phongo_utcdatetime_t *intern = Z_OBJ_UTCDATETIME(object);
zend_object_std_dtor(&intern->std TSRMLS_CC);
if (intern->properties) {
zend_hash_destroy(intern->properties);
FREE_HASHTABLE(intern->properties);
}
#if PHP_VERSION_ID < 70000
efree(intern);
#endif
} /* }}} */
static phongo_create_object_retval php_phongo_utcdatetime_create_object(zend_class_entry *class_type TSRMLS_DC) /* {{{ */
{
php_phongo_utcdatetime_t *intern = NULL;
intern = PHONGO_ALLOC_OBJECT_T(php_phongo_utcdatetime_t, class_type);
zend_object_std_init(&intern->std, class_type TSRMLS_CC);
object_properties_init(&intern->std, class_type);
#if PHP_VERSION_ID >= 70000
intern->std.handlers = &php_phongo_handler_utcdatetime;
return &intern->std;
#else
{
zend_object_value retval;
retval.handle = zend_objects_store_put(intern, (zend_objects_store_dtor_t) zend_objects_destroy_object, php_phongo_utcdatetime_free_object, NULL TSRMLS_CC);
retval.handlers = &php_phongo_handler_utcdatetime;
return retval;
}
#endif
} /* }}} */
static int php_phongo_utcdatetime_compare_objects(zval *o1, zval *o2 TSRMLS_DC) /* {{{ */
{
php_phongo_utcdatetime_t *intern1, *intern2;
intern1 = Z_UTCDATETIME_OBJ_P(o1);
intern2 = Z_UTCDATETIME_OBJ_P(o2);
if (intern1->milliseconds != intern2->milliseconds) {
return intern1->milliseconds < intern2->milliseconds ? -1 : 1;
}
return 0;
} /* }}} */
static HashTable *php_phongo_utcdatetime_get_gc(zval *object, phongo_get_gc_table table, int *n TSRMLS_DC) /* {{{ */
{
*table = NULL;
*n = 0;
return Z_UTCDATETIME_OBJ_P(object)->properties;
} /* }}} */
static HashTable *php_phongo_utcdatetime_get_properties_hash(zval *object, bool is_debug TSRMLS_DC) /* {{{ */
{
php_phongo_utcdatetime_t *intern;
HashTable *props;
char s_milliseconds[24];
int s_milliseconds_len;
intern = Z_UTCDATETIME_OBJ_P(object);
PHONGO_GET_PROPERTY_HASH_INIT_PROPS(is_debug, intern, props, 2);
if (!intern->initialized) {
return props;
}
s_milliseconds_len = snprintf(s_milliseconds, sizeof(s_milliseconds), "%" PRId64, intern->milliseconds);
#if PHP_VERSION_ID >= 70000
{
zval milliseconds;
ZVAL_STRINGL(&milliseconds, s_milliseconds, s_milliseconds_len);
zend_hash_str_update(props, "milliseconds", sizeof("milliseconds")-1, &milliseconds);
}
#else
{
zval *milliseconds;
MAKE_STD_ZVAL(milliseconds);
ZVAL_STRINGL(milliseconds, s_milliseconds, s_milliseconds_len, 1);
zend_hash_update(props, "milliseconds", sizeof("milliseconds"), &milliseconds, sizeof(milliseconds), NULL);
}
#endif
return props;
} /* }}} */
static HashTable *php_phongo_utcdatetime_get_debug_info(zval *object, int *is_temp TSRMLS_DC) /* {{{ */
{
*is_temp = 1;
return php_phongo_utcdatetime_get_properties_hash(object, true TSRMLS_CC);
} /* }}} */
static HashTable *php_phongo_utcdatetime_get_properties(zval *object TSRMLS_DC) /* {{{ */
{
return php_phongo_utcdatetime_get_properties_hash(object, false TSRMLS_CC);
} /* }}} */
/* }}} */
void php_phongo_utcdatetime_init_ce(INIT_FUNC_ARGS) /* {{{ */
{
zend_class_entry ce;
INIT_NS_CLASS_ENTRY(ce, "MongoDB\\BSON", "UTCDateTime", php_phongo_utcdatetime_me);
php_phongo_utcdatetime_ce = zend_register_internal_class(&ce TSRMLS_CC);
php_phongo_utcdatetime_ce->create_object = php_phongo_utcdatetime_create_object;
PHONGO_CE_FINAL(php_phongo_utcdatetime_ce);
zend_class_implements(php_phongo_utcdatetime_ce TSRMLS_CC, 1, php_phongo_utcdatetime_interface_ce);
zend_class_implements(php_phongo_utcdatetime_ce TSRMLS_CC, 1, php_phongo_json_serializable_ce);
zend_class_implements(php_phongo_utcdatetime_ce TSRMLS_CC, 1, php_phongo_type_ce);
zend_class_implements(php_phongo_utcdatetime_ce TSRMLS_CC, 1, zend_ce_serializable);
memcpy(&php_phongo_handler_utcdatetime, phongo_get_std_object_handlers(), sizeof(zend_object_handlers));
php_phongo_handler_utcdatetime.compare_objects = php_phongo_utcdatetime_compare_objects;
php_phongo_handler_utcdatetime.get_debug_info = php_phongo_utcdatetime_get_debug_info;
php_phongo_handler_utcdatetime.get_gc = php_phongo_utcdatetime_get_gc;
php_phongo_handler_utcdatetime.get_properties = php_phongo_utcdatetime_get_properties;
#if PHP_VERSION_ID >= 70000
php_phongo_handler_utcdatetime.free_obj = php_phongo_utcdatetime_free_object;
php_phongo_handler_utcdatetime.offset = XtOffsetOf(php_phongo_utcdatetime_t, std);
#endif
} /* }}} */
/*
* Local variables:
* tab-width: 4
* c-basic-offset: 4
* End:
* vim600: noet sw=4 ts=4 fdm=marker
* vim<600: noet sw=4 ts=4
*/
diff --git a/mongodb-1.3.4/src/BSON/UTCDateTimeInterface.c b/mongodb-1.4.2/src/BSON/UTCDateTimeInterface.c
similarity index 100%
rename from mongodb-1.3.4/src/BSON/UTCDateTimeInterface.c
rename to mongodb-1.4.2/src/BSON/UTCDateTimeInterface.c
diff --git a/mongodb-1.4.2/src/BSON/Undefined.c b/mongodb-1.4.2/src/BSON/Undefined.c
new file mode 100644
index 00000000..ab349372
--- /dev/null
+++ b/mongodb-1.4.2/src/BSON/Undefined.c
@@ -0,0 +1,162 @@
+/*
+ * Copyright 2014-2017 MongoDB, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include <php.h>
+#include <Zend/zend_interfaces.h>
+#include <ext/standard/php_var.h>
+#if PHP_VERSION_ID >= 70000
+# include <zend_smart_str.h>
+#else
+# include <ext/standard/php_smart_str.h>
+#endif
+
+#include "phongo_compat.h"
+#include "php_phongo.h"
+
+zend_class_entry *php_phongo_undefined_ce;
+
+/* {{{ proto string MongoDB\BSON\Undefined::__toString()
+ Return the empty string. */
+static PHP_METHOD(Undefined, __toString)
+{
+ PHONGO_RETURN_STRINGL("", 0);
+} /* }}} */
+
+/* {{{ proto array MongoDB\BSON\Undefined::jsonSerialize()
+*/
+static PHP_METHOD(Undefined, jsonSerialize)
+{
+ if (zend_parse_parameters_none() == FAILURE) {
+ return;
+ }
+
+ array_init_size(return_value, 1);
+ ADD_ASSOC_BOOL_EX(return_value, "$undefined", 1);
+} /* }}} */
+
+/* {{{ proto string MongoDB\BSON\Undefined::serialize()
+*/
+static PHP_METHOD(Undefined, serialize)
+{
+ PHONGO_RETURN_STRING("");
+} /* }}} */
+
+/* {{{ proto void MongoDB\BSON\Undefined::unserialize(string $serialized)
+*/
+static PHP_METHOD(Undefined, unserialize)
+{
+ zend_error_handling error_handling;
+ char *serialized;
+ phongo_zpp_char_len serialized_len;
+
+ zend_replace_error_handling(EH_THROW, phongo_exception_from_phongo_domain(PHONGO_ERROR_INVALID_ARGUMENT), &error_handling TSRMLS_CC);
+
+ if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &serialized, &serialized_len) == FAILURE) {
+ zend_restore_error_handling(&error_handling TSRMLS_CC);
+ return;
+ }
+ zend_restore_error_handling(&error_handling TSRMLS_CC);
+} /* }}} */
+
+/* {{{ MongoDB\BSON\Undefined function entries */
+ZEND_BEGIN_ARG_INFO_EX(ai_Undefined_unserialize, 0, 0, 1)
+ ZEND_ARG_INFO(0, serialized)
+ZEND_END_ARG_INFO()
+
+ZEND_BEGIN_ARG_INFO_EX(ai_Undefined_void, 0, 0, 0)
+ZEND_END_ARG_INFO()
+
+static zend_function_entry php_phongo_undefined_me[] = {
+ /* __set_state intentionally missing */
+ PHP_ME(Undefined, __toString, ai_Undefined_void, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
+ PHP_ME(Undefined, jsonSerialize, ai_Undefined_void, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
+ PHP_ME(Undefined, serialize, ai_Undefined_void, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
+ PHP_ME(Undefined, unserialize, ai_Undefined_unserialize, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
+ ZEND_NAMED_ME(__construct, PHP_FN(MongoDB_disabled___construct), ai_Undefined_void, ZEND_ACC_PRIVATE|ZEND_ACC_FINAL)
+ PHP_FE_END
+};
+/* }}} */
+
+/* {{{ MongoDB\BSON\Undefined object handlers */
+static zend_object_handlers php_phongo_handler_undefined;
+
+static void php_phongo_undefined_free_object(phongo_free_object_arg *object TSRMLS_DC) /* {{{ */
+{
+ php_phongo_undefined_t *intern = Z_OBJ_UNDEFINED(object);
+
+ zend_object_std_dtor(&intern->std TSRMLS_CC);
+
+#if PHP_VERSION_ID < 70000
+ efree(intern);
+#endif
+} /* }}} */
+
+static phongo_create_object_retval php_phongo_undefined_create_object(zend_class_entry *class_type TSRMLS_DC) /* {{{ */
+{
+ php_phongo_undefined_t *intern = NULL;
+
+ intern = PHONGO_ALLOC_OBJECT_T(php_phongo_undefined_t, class_type);
+ zend_object_std_init(&intern->std, class_type TSRMLS_CC);
+ object_properties_init(&intern->std, class_type);
+
+#if PHP_VERSION_ID >= 70000
+ intern->std.handlers = &php_phongo_handler_undefined;
+
+ return &intern->std;
+#else
+ {
+ zend_object_value retval;
+ retval.handle = zend_objects_store_put(intern, (zend_objects_store_dtor_t) zend_objects_destroy_object, php_phongo_undefined_free_object, NULL TSRMLS_CC);
+ retval.handlers = &php_phongo_handler_undefined;
+
+ return retval;
+ }
+#endif
+} /* }}} */
+/* }}} */
+
+void php_phongo_undefined_init_ce(INIT_FUNC_ARGS) /* {{{ */
+{
+ zend_class_entry ce;
+
+ INIT_NS_CLASS_ENTRY(ce, "MongoDB\\BSON", "Undefined", php_phongo_undefined_me);
+ php_phongo_undefined_ce = zend_register_internal_class(&ce TSRMLS_CC);
+ php_phongo_undefined_ce->create_object = php_phongo_undefined_create_object;
+ PHONGO_CE_FINAL(php_phongo_undefined_ce);
+
+ zend_class_implements(php_phongo_undefined_ce TSRMLS_CC, 1, php_phongo_json_serializable_ce);
+ zend_class_implements(php_phongo_undefined_ce TSRMLS_CC, 1, php_phongo_type_ce);
+ zend_class_implements(php_phongo_undefined_ce TSRMLS_CC, 1, zend_ce_serializable);
+
+ memcpy(&php_phongo_handler_undefined, phongo_get_std_object_handlers(), sizeof(zend_object_handlers));
+#if PHP_VERSION_ID >= 70000
+ php_phongo_handler_undefined.free_obj = php_phongo_undefined_free_object;
+ php_phongo_handler_undefined.offset = XtOffsetOf(php_phongo_undefined_t, std);
+#endif
+} /* }}} */
+
+/*
+ * Local variables:
+ * tab-width: 4
+ * c-basic-offset: 4
+ * End:
+ * vim600: noet sw=4 ts=4 fdm=marker
+ * vim<600: noet sw=4 ts=4
+ */
diff --git a/mongodb-1.3.4/src/BSON/Unserializable.c b/mongodb-1.4.2/src/BSON/Unserializable.c
similarity index 100%
rename from mongodb-1.3.4/src/BSON/Unserializable.c
rename to mongodb-1.4.2/src/BSON/Unserializable.c
diff --git a/mongodb-1.3.4/src/BSON/functions.c b/mongodb-1.4.2/src/BSON/functions.c
similarity index 100%
rename from mongodb-1.3.4/src/BSON/functions.c
rename to mongodb-1.4.2/src/BSON/functions.c
diff --git a/mongodb-1.3.4/src/BSON/functions.h b/mongodb-1.4.2/src/BSON/functions.h
similarity index 100%
rename from mongodb-1.3.4/src/BSON/functions.h
rename to mongodb-1.4.2/src/BSON/functions.h
diff --git a/mongodb-1.3.4/src/MongoDB/BulkWrite.c b/mongodb-1.4.2/src/MongoDB/BulkWrite.c
similarity index 92%
rename from mongodb-1.3.4/src/MongoDB/BulkWrite.c
rename to mongodb-1.4.2/src/MongoDB/BulkWrite.c
index f93680b3..7f2864cd 100644
--- a/mongodb-1.3.4/src/MongoDB/BulkWrite.c
+++ b/mongodb-1.4.2/src/MongoDB/BulkWrite.c
@@ -1,575 +1,547 @@
/*
* Copyright 2015-2017 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifdef HAVE_CONFIG_H
# include "config.h"
#endif
#include <php.h>
#include <Zend/zend_interfaces.h>
#include <ext/spl/spl_iterators.h>
#include "php_array_api.h"
#include "phongo_compat.h"
#include "php_phongo.h"
#include "php_bson.h"
#define PHONGO_BULKWRITE_BYPASS_UNSET -1
zend_class_entry *php_phongo_bulkwrite_ce;
-/* Returns whether the insert document appears to be a legacy index. */
-static inline bool php_phongo_bulkwrite_insert_is_legacy_index(bson_t *bdocument) /* {{{ */
-{
- bson_iter_t iter;
-
- if (bson_iter_init_find(&iter, bdocument, "key") && BSON_ITER_HOLDS_DOCUMENT(&iter) &&
- bson_iter_init_find(&iter, bdocument, "name") && BSON_ITER_HOLDS_UTF8(&iter) &&
- bson_iter_init_find(&iter, bdocument, "ns") && BSON_ITER_HOLDS_UTF8(&iter)) {
- return true;
- }
-
- return false;
-} /* }}} */
-
/* Extracts the "_id" field of a BSON document into a return value. */
static void php_phongo_bulkwrite_extract_id(bson_t *doc, zval **return_value) /* {{{ */
{
php_phongo_bson_state state = PHONGO_BSON_STATE_INITIALIZER;
zval *id = NULL;
state.map.root_type = PHONGO_TYPEMAP_NATIVE_ARRAY;
if (!php_phongo_bson_to_zval_ex(bson_get_data(doc), doc->len, &state)) {
goto cleanup;
}
#if PHP_VERSION_ID >= 70000
id = php_array_fetchc(&state.zchild, "_id");
#else
id = php_array_fetchc(state.zchild, "_id");
#endif
if (id) {
ZVAL_ZVAL(*return_value, id, 1, 0);
}
cleanup:
zval_ptr_dtor(&state.zchild);
} /* }}} */
/* Returns whether any top-level field names in the document contain a "$". */
static inline bool php_phongo_bulkwrite_update_has_operators(bson_t *bupdate) /* {{{ */
{
bson_iter_t iter;
if (bson_iter_init(&iter, bupdate)) {
while (bson_iter_next (&iter)) {
if (strchr(bson_iter_key(&iter), '$')) {
return true;
}
}
}
return false;
} /* }}} */
/* Appends a document field for the given opts document and key. Returns true on
* success; otherwise, false is returned and an exception is thrown. */
static bool php_phongo_bulkwrite_opts_append_document(bson_t *opts, const char *opts_key, zval *zarr, const char *zarr_key TSRMLS_DC) /* {{{ */
{
zval *value = php_array_fetch(zarr, zarr_key);
bson_t b = BSON_INITIALIZER;
if (Z_TYPE_P(value) != IS_OBJECT && Z_TYPE_P(value) != IS_ARRAY) {
phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected \"%s\" option to be array or object, %s given", zarr_key, zend_get_type_by_const(Z_TYPE_P(value)));
return false;
}
php_phongo_zval_to_bson(value, PHONGO_BSON_NONE, &b, NULL TSRMLS_CC);
if (EG(exception)) {
bson_destroy(&b);
return false;
}
if (!BSON_APPEND_DOCUMENT(opts, opts_key, &b)) {
phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Error appending \"%s\" option", opts_key);
bson_destroy(&b);
return false;
}
bson_destroy(&b);
return true;
} /* }}} */
#define PHONGO_BULKWRITE_APPEND_BOOL(opt, value) \
if (!BSON_APPEND_BOOL(boptions, (opt), (value))) { \
phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Error appending \"%s\" option", (opt)); \
return false; \
}
#define PHONGO_BULKWRITE_APPEND_INT32(opt, value) \
if (!BSON_APPEND_INT32(boptions, (opt), (value))) { \
phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Error appending \"%s\" option", (opt)); \
return false; \
}
#define PHONGO_BULKWRITE_OPT_DOCUMENT(opt) \
if (zoptions && php_array_existsc(zoptions, (opt))) { \
if (!php_phongo_bulkwrite_opts_append_document(boptions, (opt), zoptions, (opt) TSRMLS_CC)) { \
return false; \
} \
}
/* Applies options (including defaults) for an update operation. */
static bool php_phongo_bulkwrite_update_apply_options(bson_t *boptions, zval *zoptions TSRMLS_DC)/* {{{ */
{
bool multi = false, upsert = false;
if (zoptions) {
if (php_array_existsc(zoptions, "multi")) {
multi = php_array_fetchc_bool(zoptions, "multi");
}
if (php_array_existsc(zoptions, "upsert")) {
upsert = php_array_fetchc_bool(zoptions, "upsert");
}
}
PHONGO_BULKWRITE_APPEND_BOOL("multi", multi);
PHONGO_BULKWRITE_APPEND_BOOL("upsert", upsert);
+ PHONGO_BULKWRITE_OPT_DOCUMENT("arrayFilters");
PHONGO_BULKWRITE_OPT_DOCUMENT("collation");
return true;
} /* }}} */
/* Applies options (including defaults) for an delete operation. */
static bool php_phongo_bulkwrite_delete_apply_options(bson_t *boptions, zval *zoptions TSRMLS_DC)/* {{{ */
{
int32_t limit = 0;
if (zoptions) {
if (php_array_existsc(zoptions, "limit")) {
limit = php_array_fetchc_bool(zoptions, "limit") ? 1 : 0;
}
}
PHONGO_BULKWRITE_APPEND_INT32("limit", limit);
PHONGO_BULKWRITE_OPT_DOCUMENT("collation");
return true;
} /* }}} */
#undef PHONGO_BULKWRITE_APPEND_BOOL
#undef PHONGO_BULKWRITE_APPEND_INT32
#undef PHONGO_BULKWRITE_OPT_DOCUMENT
/* {{{ proto void MongoDB\Driver\BulkWrite::__construct([array $options = array()])
Constructs a new BulkWrite */
static PHP_METHOD(BulkWrite, __construct)
{
php_phongo_bulkwrite_t *intern;
zend_error_handling error_handling;
zval *options = NULL;
zend_bool ordered = 1;
SUPPRESS_UNUSED_WARNING(return_value_ptr) SUPPRESS_UNUSED_WARNING(return_value) SUPPRESS_UNUSED_WARNING(return_value_used)
zend_replace_error_handling(EH_THROW, phongo_exception_from_phongo_domain(PHONGO_ERROR_INVALID_ARGUMENT), &error_handling TSRMLS_CC);
intern = Z_BULKWRITE_OBJ_P(getThis());
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|a!", &options) == FAILURE) {
zend_restore_error_handling(&error_handling TSRMLS_CC);
return;
}
zend_restore_error_handling(&error_handling TSRMLS_CC);
if (options && php_array_existsc(options, "ordered")) {
ordered = php_array_fetchc_bool(options, "ordered");
}
- intern->bulk = phongo_bulkwrite_init(ordered);
+ intern->bulk = mongoc_bulk_operation_new(ordered);
intern->ordered = ordered;
intern->bypass = PHONGO_BULKWRITE_BYPASS_UNSET;
intern->num_ops = 0;
if (options && php_array_existsc(options, "bypassDocumentValidation")) {
zend_bool bypass = php_array_fetchc_bool(options, "bypassDocumentValidation");
mongoc_bulk_operation_set_bypass_document_validation(intern->bulk, bypass);
intern->bypass = bypass;
}
} /* }}} */
/* {{{ proto mixed MongoDB\Driver\BulkWrite::insert(array|object $document)
Adds an insert operation to the BulkWrite */
static PHP_METHOD(BulkWrite, insert)
{
php_phongo_bulkwrite_t *intern;
zval *zdocument;
bson_t bdocument = BSON_INITIALIZER, boptions = BSON_INITIALIZER;
bson_t *bson_out = NULL;
int bson_flags = PHONGO_BSON_ADD_ID;
bson_error_t error = {0};
DECLARE_RETURN_VALUE_USED
SUPPRESS_UNUSED_WARNING(return_value_ptr)
intern = Z_BULKWRITE_OBJ_P(getThis());
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "A", &zdocument) == FAILURE) {
return;
}
if (return_value_used) {
bson_flags |= PHONGO_BSON_RETURN_ID;
}
php_phongo_zval_to_bson(zdocument, bson_flags, &bdocument, &bson_out TSRMLS_CC);
if (EG(exception)) {
goto cleanup;
}
- /* If the insert document appears to be a legacy index, instruct libmongoc
- * to allow dots in BSON keys by setting the "legacyIndex" option.
- *
- * Note: php_phongo_zval_to_bson() may have added an ObjectId if the "_id"
- * field was unset. We don't know at this point if the insert is destined
- * for a pre-2.6 server's "system.indexes" collection, but legacy index
- * creation will ignore the "_id" so there is no harm in leaving it. In the
- * event php_phongo_bulkwrite_insert_is_legacy_index() returns a false
- * positive, we absolutely want ObjectId added if "_id" was unset. */
- if (php_phongo_bulkwrite_insert_is_legacy_index(&bdocument) &&
- !BSON_APPEND_BOOL(&boptions, "legacyIndex", true)) {
- phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Error appending \"legacyIndex\" option");
- goto cleanup;
- }
-
if (!mongoc_bulk_operation_insert_with_opts(intern->bulk, &bdocument, &boptions, &error)) {
phongo_throw_exception_from_bson_error_t(&error TSRMLS_CC);
goto cleanup;
}
intern->num_ops++;
if (bson_out && return_value_used) {
php_phongo_bulkwrite_extract_id(bson_out, &return_value);
}
cleanup:
bson_destroy(&bdocument);
bson_destroy(&boptions);
bson_clear(&bson_out);
} /* }}} */
/* {{{ proto void MongoDB\Driver\BulkWrite::update(array|object $query, array|object $newObj[, array $updateOptions = array()])
Adds an update operation to the BulkWrite */
static PHP_METHOD(BulkWrite, update)
{
php_phongo_bulkwrite_t *intern;
zval *zquery, *zupdate, *zoptions = NULL;
bson_t bquery = BSON_INITIALIZER, bupdate = BSON_INITIALIZER, boptions = BSON_INITIALIZER;
bson_error_t error = {0};
SUPPRESS_UNUSED_WARNING(return_value_ptr) SUPPRESS_UNUSED_WARNING(return_value) SUPPRESS_UNUSED_WARNING(return_value_used)
intern = Z_BULKWRITE_OBJ_P(getThis());
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "AA|a!", &zquery, &zupdate, &zoptions) == FAILURE) {
return;
}
php_phongo_zval_to_bson(zquery, PHONGO_BSON_NONE, &bquery, NULL TSRMLS_CC);
if (EG(exception)) {
goto cleanup;
}
php_phongo_zval_to_bson(zupdate, PHONGO_BSON_NONE, &bupdate, NULL TSRMLS_CC);
if (EG(exception)) {
goto cleanup;
}
if (!php_phongo_bulkwrite_update_apply_options(&boptions, zoptions TSRMLS_CC)) {
goto cleanup;
}
if (php_phongo_bulkwrite_update_has_operators(&bupdate)) {
if (zoptions && php_array_existsc(zoptions, "multi") && php_array_fetchc_bool(zoptions, "multi")) {
if (!mongoc_bulk_operation_update_many_with_opts(intern->bulk, &bquery, &bupdate, &boptions, &error)) {
phongo_throw_exception_from_bson_error_t(&error TSRMLS_CC);
goto cleanup;
}
} else {
if (!mongoc_bulk_operation_update_one_with_opts(intern->bulk, &bquery, &bupdate, &boptions, &error)) {
phongo_throw_exception_from_bson_error_t(&error TSRMLS_CC);
goto cleanup;
}
}
} else {
if (zoptions && php_array_existsc(zoptions, "multi") && php_array_fetchc_bool(zoptions, "multi")) {
phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Replacement document conflicts with true \"multi\" option");
goto cleanup;
}
if (!mongoc_bulk_operation_replace_one_with_opts(intern->bulk, &bquery, &bupdate, &boptions, &error)) {
phongo_throw_exception_from_bson_error_t(&error TSRMLS_CC);
goto cleanup;
}
}
intern->num_ops++;
cleanup:
bson_destroy(&bquery);
bson_destroy(&bupdate);
bson_destroy(&boptions);
} /* }}} */
/* {{{ proto void MongoDB\Driver\BulkWrite::delete(array|object $query[, array $deleteOptions = array()])
Adds a delete operation to the BulkWrite */
static PHP_METHOD(BulkWrite, delete)
{
php_phongo_bulkwrite_t *intern;
zval *zquery, *zoptions = NULL;
bson_t bquery = BSON_INITIALIZER, boptions = BSON_INITIALIZER;
bson_error_t error = {0};
SUPPRESS_UNUSED_WARNING(return_value_ptr) SUPPRESS_UNUSED_WARNING(return_value) SUPPRESS_UNUSED_WARNING(return_value_used)
intern = Z_BULKWRITE_OBJ_P(getThis());
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "A|a!", &zquery, &zoptions) == FAILURE) {
return;
}
php_phongo_zval_to_bson(zquery, PHONGO_BSON_NONE, &bquery, NULL TSRMLS_CC);
if (EG(exception)) {
goto cleanup;
}
if (!php_phongo_bulkwrite_delete_apply_options(&boptions, zoptions TSRMLS_CC)) {
goto cleanup;
}
if (zoptions && php_array_existsc(zoptions, "limit") && php_array_fetchc_bool(zoptions, "limit")) {
if (!mongoc_bulk_operation_remove_one_with_opts(intern->bulk, &bquery, &boptions, &error)) {
phongo_throw_exception_from_bson_error_t(&error TSRMLS_CC);
goto cleanup;
}
} else {
if (!mongoc_bulk_operation_remove_many_with_opts(intern->bulk, &bquery, &boptions, &error)) {
phongo_throw_exception_from_bson_error_t(&error TSRMLS_CC);
goto cleanup;
}
}
intern->num_ops++;
cleanup:
bson_destroy(&bquery);
bson_destroy(&boptions);
} /* }}} */
/* {{{ proto integer MongoDB\Driver\BulkWrite::count()
Returns the number of operations that have been added to the BulkWrite */
static PHP_METHOD(BulkWrite, count)
{
php_phongo_bulkwrite_t *intern;
SUPPRESS_UNUSED_WARNING(return_value_ptr) SUPPRESS_UNUSED_WARNING(return_value) SUPPRESS_UNUSED_WARNING(return_value_used)
intern = Z_BULKWRITE_OBJ_P(getThis());
if (zend_parse_parameters_none() == FAILURE) {
return;
}
RETURN_LONG(intern->num_ops);
} /* }}} */
/* {{{ MongoDB\Driver\BulkWrite function entries */
ZEND_BEGIN_ARG_INFO_EX(ai_BulkWrite___construct, 0, 0, 0)
ZEND_ARG_ARRAY_INFO(0, options, 1)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(ai_BulkWrite_insert, 0, 0, 1)
ZEND_ARG_INFO(0, document)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(ai_BulkWrite_update, 0, 0, 2)
ZEND_ARG_INFO(0, query)
ZEND_ARG_INFO(0, newObj)
ZEND_ARG_ARRAY_INFO(0, updateOptions, 1)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(ai_BulkWrite_delete, 0, 0, 1)
ZEND_ARG_INFO(0, query)
ZEND_ARG_ARRAY_INFO(0, deleteOptions, 1)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(ai_BulkWrite_void, 0, 0, 0)
ZEND_END_ARG_INFO()
static zend_function_entry php_phongo_bulkwrite_me[] = {
PHP_ME(BulkWrite, __construct, ai_BulkWrite___construct, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_ME(BulkWrite, insert, ai_BulkWrite_insert, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_ME(BulkWrite, update, ai_BulkWrite_update, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_ME(BulkWrite, delete, ai_BulkWrite_delete, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_ME(BulkWrite, count, ai_BulkWrite_void, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
ZEND_NAMED_ME(__wakeup, PHP_FN(MongoDB_disabled___wakeup), ai_BulkWrite_void, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_FE_END
};
/* }}} */
/* {{{ MongoDB\Driver\BulkWrite object handlers */
static zend_object_handlers php_phongo_handler_bulkwrite;
static void php_phongo_bulkwrite_free_object(phongo_free_object_arg *object TSRMLS_DC) /* {{{ */
{
php_phongo_bulkwrite_t *intern = Z_OBJ_BULKWRITE(object);
zend_object_std_dtor(&intern->std TSRMLS_CC);
if (intern->bulk) {
mongoc_bulk_operation_destroy(intern->bulk);
}
if (intern->database) {
efree(intern->database);
}
if (intern->collection) {
efree(intern->collection);
}
#if PHP_VERSION_ID < 70000
efree(intern);
#endif
} /* }}} */
static phongo_create_object_retval php_phongo_bulkwrite_create_object(zend_class_entry *class_type TSRMLS_DC) /* {{{ */
{
php_phongo_bulkwrite_t *intern = NULL;
intern = PHONGO_ALLOC_OBJECT_T(php_phongo_bulkwrite_t, class_type);
zend_object_std_init(&intern->std, class_type TSRMLS_CC);
object_properties_init(&intern->std, class_type);
#if PHP_VERSION_ID >= 70000
intern->std.handlers = &php_phongo_handler_bulkwrite;
return &intern->std;
#else
{
zend_object_value retval;
retval.handle = zend_objects_store_put(intern, (zend_objects_store_dtor_t) zend_objects_destroy_object, php_phongo_bulkwrite_free_object, NULL TSRMLS_CC);
retval.handlers = &php_phongo_handler_bulkwrite;
return retval;
}
#endif
} /* }}} */
static HashTable *php_phongo_bulkwrite_get_debug_info(zval *object, int *is_temp TSRMLS_DC) /* {{{ */
{
#if PHP_VERSION_ID >= 70000
zval retval;
#else
zval retval = zval_used_for_init;
#endif
php_phongo_bulkwrite_t *intern = NULL;
*is_temp = 1;
intern = Z_BULKWRITE_OBJ_P(object);
array_init(&retval);
if (intern->database) {
ADD_ASSOC_STRING(&retval, "database", intern->database);
} else {
ADD_ASSOC_NULL_EX(&retval, "database");
}
if (intern->collection) {
ADD_ASSOC_STRING(&retval, "collection", intern->collection);
} else {
ADD_ASSOC_NULL_EX(&retval, "collection");
}
ADD_ASSOC_BOOL_EX(&retval, "ordered", intern->ordered);
if (intern->bypass != PHONGO_BULKWRITE_BYPASS_UNSET) {
ADD_ASSOC_BOOL_EX(&retval, "bypassDocumentValidation", intern->bypass);
} else {
ADD_ASSOC_NULL_EX(&retval, "bypassDocumentValidation");
}
ADD_ASSOC_BOOL_EX(&retval, "executed", intern->executed);
ADD_ASSOC_LONG_EX(&retval, "server_id", mongoc_bulk_operation_get_hint(intern->bulk));
if (mongoc_bulk_operation_get_write_concern(intern->bulk)) {
#if PHP_VERSION_ID >= 70000
zval write_concern;
php_phongo_write_concern_to_zval(&write_concern, mongoc_bulk_operation_get_write_concern(intern->bulk));
ADD_ASSOC_ZVAL_EX(&retval, "write_concern", &write_concern);
#else
zval *write_concern = NULL;
MAKE_STD_ZVAL(write_concern);
php_phongo_write_concern_to_zval(write_concern, mongoc_bulk_operation_get_write_concern(intern->bulk));
ADD_ASSOC_ZVAL_EX(&retval, "write_concern", write_concern);
#endif
} else {
ADD_ASSOC_NULL_EX(&retval, "write_concern");
}
return Z_ARRVAL(retval);
} /* }}} */
/* }}} */
void php_phongo_bulkwrite_init_ce(INIT_FUNC_ARGS) /* {{{ */
{
zend_class_entry ce;
INIT_NS_CLASS_ENTRY(ce, "MongoDB\\Driver", "BulkWrite", php_phongo_bulkwrite_me);
php_phongo_bulkwrite_ce = zend_register_internal_class(&ce TSRMLS_CC);
php_phongo_bulkwrite_ce->create_object = php_phongo_bulkwrite_create_object;
PHONGO_CE_FINAL(php_phongo_bulkwrite_ce);
PHONGO_CE_DISABLE_SERIALIZATION(php_phongo_bulkwrite_ce);
memcpy(&php_phongo_handler_bulkwrite, phongo_get_std_object_handlers(), sizeof(zend_object_handlers));
php_phongo_handler_bulkwrite.get_debug_info = php_phongo_bulkwrite_get_debug_info;
#if PHP_VERSION_ID >= 70000
php_phongo_handler_bulkwrite.free_obj = php_phongo_bulkwrite_free_object;
php_phongo_handler_bulkwrite.offset = XtOffsetOf(php_phongo_bulkwrite_t, std);
#endif
zend_class_implements(php_phongo_bulkwrite_ce TSRMLS_CC, 1, spl_ce_Countable);
} /* }}} */
/*
* Local variables:
* tab-width: 4
* c-basic-offset: 4
* End:
* vim600: noet sw=4 ts=4 fdm=marker
* vim<600: noet sw=4 ts=4
*/
diff --git a/mongodb-1.3.4/src/MongoDB/Command.c b/mongodb-1.4.2/src/MongoDB/Command.c
similarity index 66%
rename from mongodb-1.3.4/src/MongoDB/Command.c
rename to mongodb-1.4.2/src/MongoDB/Command.c
index bcd67b0a..f25759bd 100644
--- a/mongodb-1.3.4/src/MongoDB/Command.c
+++ b/mongodb-1.4.2/src/MongoDB/Command.c
@@ -1,174 +1,236 @@
/*
* Copyright 2014-2017 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifdef HAVE_CONFIG_H
# include "config.h"
#endif
#include <php.h>
#include <Zend/zend_interfaces.h>
+#include "php_array_api.h"
#include "phongo_compat.h"
#include "php_phongo.h"
#include "php_bson.h"
zend_class_entry *php_phongo_command_ce;
-/* {{{ proto void MongoDB\Driver\Command::__construct(array|object $document)
+/* Initialize the "maxAwaitTimeMS" option. Returns true on success; otherwise,
+ * false is returned and an exception is thrown.
+ *
+ * The "maxAwaitTimeMS" option is assigned to the cursor after query execution
+ * via mongoc_cursor_set_max_await_time_ms(). */
+static bool php_phongo_command_init_max_await_time_ms(php_phongo_command_t *intern, zval *options TSRMLS_DC) /* {{{ */
+{
+ if (php_array_existsc(options, "maxAwaitTimeMS")) {
+ int64_t max_await_time_ms = php_array_fetchc_long(options, "maxAwaitTimeMS");
+
+ if (max_await_time_ms < 0) {
+ phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected \"maxAwaitTimeMS\" option to be >= 0, %" PRId64 " given", max_await_time_ms);
+ return false;
+ }
+
+ if (max_await_time_ms > UINT32_MAX) {
+ phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected \"maxAwaitTimeMS\" option to be <= %" PRIu32 ", %" PRId64 " given", UINT32_MAX, max_await_time_ms);
+ return false;
+ }
+
+ intern->max_await_time_ms = (uint32_t) max_await_time_ms;
+ }
+
+ return true;
+} /* }}} */
+
+/* Initializes the php_phongo_command_init from options argument. This
+ * function will fall back to a modifier in the absence of a top-level option
+ * (where applicable). */
+static bool php_phongo_command_init(php_phongo_command_t *intern, zval *filter, zval *options TSRMLS_DC) /* {{{ */
+{
+ bson_iter_t iter;
+ bson_iter_t sub_iter;
+
+ intern->bson = bson_new();
+
+ php_phongo_zval_to_bson(filter, PHONGO_BSON_NONE, intern->bson, NULL TSRMLS_CC);
+
+ /* Note: if any exceptions are thrown, we can simply return as PHP will
+ * invoke php_phongo_query_free_object to destruct the object. */
+ if (EG(exception)) {
+ return false;
+ }
+
+ if (bson_iter_init(&iter, intern->bson) && bson_iter_find_descendant(&iter, "cursor.batchSize", &sub_iter) && BSON_ITER_HOLDS_INT(&sub_iter)) {
+ int64_t batch_size = bson_iter_as_int64(&sub_iter);
+
+ if (batch_size >= 0 && batch_size <= UINT32_MAX) {
+ intern->batch_size = (uint32_t) batch_size;
+ }
+ }
+
+ if (!options) {
+ return true;
+ }
+
+ if (!php_phongo_command_init_max_await_time_ms(intern, options TSRMLS_CC)) {
+ return false;
+ }
+
+ return true;
+} /* }}} */
+
+/* {{{ proto void MongoDB\Driver\Command::__construct(array|object $document[, array $options = array()])
Constructs a new Command */
static PHP_METHOD(Command, __construct)
{
php_phongo_command_t *intern;
zend_error_handling error_handling;
zval *document;
- bson_t *bson = bson_new();
+ zval *options = NULL;
SUPPRESS_UNUSED_WARNING(return_value) SUPPRESS_UNUSED_WARNING(return_value_ptr) SUPPRESS_UNUSED_WARNING(return_value_used)
zend_replace_error_handling(EH_THROW, phongo_exception_from_phongo_domain(PHONGO_ERROR_INVALID_ARGUMENT), &error_handling TSRMLS_CC);
intern = Z_COMMAND_OBJ_P(getThis());
- if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "A", &document) == FAILURE) {
+ if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "A|a!", &document, &options) == FAILURE) {
zend_restore_error_handling(&error_handling TSRMLS_CC);
return;
}
zend_restore_error_handling(&error_handling TSRMLS_CC);
-
- php_phongo_zval_to_bson(document, PHONGO_BSON_NONE, bson, NULL TSRMLS_CC);
- intern->bson = bson;
+ php_phongo_command_init(intern, document, options TSRMLS_CC);
} /* }}} */
/* {{{ MongoDB\Driver\Command function entries */
ZEND_BEGIN_ARG_INFO_EX(ai_Command___construct, 0, 0, 1)
ZEND_ARG_INFO(0, document)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(ai_Command_void, 0, 0, 0)
ZEND_END_ARG_INFO()
static zend_function_entry php_phongo_command_me[] = {
PHP_ME(Command, __construct, ai_Command___construct, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
ZEND_NAMED_ME(__wakeup, PHP_FN(MongoDB_disabled___wakeup), ai_Command_void, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_FE_END
};
/* }}} */
/* {{{ MongoDB\Driver\Command object handlers */
static zend_object_handlers php_phongo_handler_command;
static void php_phongo_command_free_object(phongo_free_object_arg *object TSRMLS_DC) /* {{{ */
{
php_phongo_command_t *intern = Z_OBJ_COMMAND(object);
zend_object_std_dtor(&intern->std TSRMLS_CC);
if (intern->bson) {
bson_clear(&intern->bson);
}
#if PHP_VERSION_ID < 70000
efree(intern);
#endif
} /* }}} */
static phongo_create_object_retval php_phongo_command_create_object(zend_class_entry *class_type TSRMLS_DC) /* {{{ */
{
php_phongo_command_t *intern = NULL;
intern = PHONGO_ALLOC_OBJECT_T(php_phongo_command_t, class_type);
zend_object_std_init(&intern->std, class_type TSRMLS_CC);
object_properties_init(&intern->std, class_type);
#if PHP_VERSION_ID >= 70000
intern->std.handlers = &php_phongo_handler_command;
return &intern->std;
#else
{
zend_object_value retval;
retval.handle = zend_objects_store_put(intern, (zend_objects_store_dtor_t) zend_objects_destroy_object, php_phongo_command_free_object, NULL TSRMLS_CC);
retval.handlers = &php_phongo_handler_command;
return retval;
}
#endif
} /* }}} */
static HashTable *php_phongo_command_get_debug_info(zval *object, int *is_temp TSRMLS_DC) /* {{{ */
{
php_phongo_command_t *intern;
#if PHP_VERSION_ID >= 70000
zval retval;
#else
zval retval = zval_used_for_init;
#endif
*is_temp = 1;
intern = Z_COMMAND_OBJ_P(object);
array_init_size(&retval, 1);
if (intern->bson) {
#if PHP_VERSION_ID >= 70000
zval zv;
#else
zval *zv;
#endif
php_phongo_bson_to_zval(bson_get_data(intern->bson), intern->bson->len, &zv);
#if PHP_VERSION_ID >= 70000
ADD_ASSOC_ZVAL_EX(&retval, "command", &zv);
#else
ADD_ASSOC_ZVAL_EX(&retval, "command", zv);
#endif
} else {
ADD_ASSOC_NULL_EX(&retval, "command");
}
return Z_ARRVAL(retval);
} /* }}} */
/* }}} */
void php_phongo_command_init_ce(INIT_FUNC_ARGS) /* {{{ */
{
zend_class_entry ce;
INIT_NS_CLASS_ENTRY(ce, "MongoDB\\Driver", "Command", php_phongo_command_me);
php_phongo_command_ce = zend_register_internal_class(&ce TSRMLS_CC);
php_phongo_command_ce->create_object = php_phongo_command_create_object;
PHONGO_CE_FINAL(php_phongo_command_ce);
PHONGO_CE_DISABLE_SERIALIZATION(php_phongo_command_ce);
memcpy(&php_phongo_handler_command, phongo_get_std_object_handlers(), sizeof(zend_object_handlers));
php_phongo_handler_command.get_debug_info = php_phongo_command_get_debug_info;
#if PHP_VERSION_ID >= 70000
php_phongo_handler_command.free_obj = php_phongo_command_free_object;
php_phongo_handler_command.offset = XtOffsetOf(php_phongo_command_t, std);
#endif
} /* }}} */
/*
* Local variables:
* tab-width: 4
* c-basic-offset: 4
* End:
* vim600: noet sw=4 ts=4 fdm=marker
* vim<600: noet sw=4 ts=4
*/
diff --git a/mongodb-1.3.4/src/MongoDB/Cursor.c b/mongodb-1.4.2/src/MongoDB/Cursor.c
similarity index 96%
rename from mongodb-1.3.4/src/MongoDB/Cursor.c
rename to mongodb-1.4.2/src/MongoDB/Cursor.c
index 49cb25c7..68ecae4b 100644
--- a/mongodb-1.3.4/src/MongoDB/Cursor.c
+++ b/mongodb-1.4.2/src/MongoDB/Cursor.c
@@ -1,535 +1,553 @@
/*
* Copyright 2014-2017 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifdef HAVE_CONFIG_H
# include "config.h"
#endif
#include <php.h>
#include <Zend/zend_interfaces.h>
#include <ext/spl/spl_iterators.h>
#include "phongo_compat.h"
#include "php_phongo.h"
#include "php_bson.h"
zend_class_entry *php_phongo_cursor_ce;
static void php_phongo_cursor_free_current(php_phongo_cursor_t *cursor) /* {{{ */
{
if (!Z_ISUNDEF(cursor->visitor_data.zchild)) {
zval_ptr_dtor(&cursor->visitor_data.zchild);
ZVAL_UNDEF(&cursor->visitor_data.zchild);
}
} /* }}} */
/* {{{ MongoDB\Driver\Cursor iterator handlers */
static void php_phongo_cursor_iterator_dtor(zend_object_iterator *iter TSRMLS_DC) /* {{{ */
{
php_phongo_cursor_iterator *cursor_it = (php_phongo_cursor_iterator *)iter;
if (!Z_ISUNDEF(cursor_it->intern.data)) {
#if PHP_VERSION_ID >= 70000
zval_ptr_dtor(&cursor_it->intern.data);
#else
zval_ptr_dtor((zval**)&cursor_it->intern.data);
cursor_it->intern.data = NULL;
#endif
}
#if PHP_VERSION_ID < 70000
efree(cursor_it);
#endif
} /* }}} */
static int php_phongo_cursor_iterator_valid(zend_object_iterator *iter TSRMLS_DC) /* {{{ */
{
php_phongo_cursor_t *cursor = ((php_phongo_cursor_iterator *)iter)->cursor;
if (!Z_ISUNDEF(cursor->visitor_data.zchild)) {
return SUCCESS;
}
return FAILURE;
} /* }}} */
static void php_phongo_cursor_iterator_get_current_key(zend_object_iterator *iter, zval *key TSRMLS_DC) /* {{{ */
{
php_phongo_cursor_t *cursor = ((php_phongo_cursor_iterator *)iter)->cursor;
ZVAL_LONG(key, cursor->current);
} /* }}} */
#if PHP_VERSION_ID < 70000
static void php_phongo_cursor_iterator_get_current_data(zend_object_iterator *iter, zval ***data TSRMLS_DC) /* {{{ */
{
php_phongo_cursor_t *cursor = ((php_phongo_cursor_iterator *)iter)->cursor;
*data = &cursor->visitor_data.zchild;
} /* }}} */
#else
static zval* php_phongo_cursor_iterator_get_current_data(zend_object_iterator *iter) /* {{{ */
{
php_phongo_cursor_t *cursor = ((php_phongo_cursor_iterator *)iter)->cursor;
return &cursor->visitor_data.zchild;
} /* }}} */
#endif
static void php_phongo_cursor_iterator_move_forward(zend_object_iterator *iter TSRMLS_DC) /* {{{ */
{
php_phongo_cursor_iterator *cursor_it = (php_phongo_cursor_iterator *)iter;
php_phongo_cursor_t *cursor = cursor_it->cursor;
const bson_t *doc;
php_phongo_cursor_free_current(cursor);
- cursor->current++;
+
+ /* If the cursor has already advanced, increment its position. Otherwise,
+ * the first call to mongoc_cursor_next() will be made below and we should
+ * leave its position at zero. */
+ if (cursor->advanced) {
+ cursor->current++;
+ } else {
+ cursor->advanced = true;
+ }
if (mongoc_cursor_next(cursor->cursor, &doc)) {
php_phongo_bson_to_zval_ex(bson_get_data(doc), doc->len, &cursor->visitor_data);
} else {
bson_error_t error;
if (mongoc_cursor_error(cursor->cursor, &error)) {
/* Intentionally not destroying the cursor as it will happen
* naturally now that there are no more results */
phongo_throw_exception_from_bson_error_t(&error TSRMLS_CC);
}
}
} /* }}} */
static void php_phongo_cursor_iterator_rewind(zend_object_iterator *iter TSRMLS_DC) /* {{{ */
{
php_phongo_cursor_iterator *cursor_it = (php_phongo_cursor_iterator *)iter;
php_phongo_cursor_t *cursor = cursor_it->cursor;
const bson_t *doc;
+ /* If the cursor was never advanced (e.g. command cursor), do so now */
+ if (!cursor->advanced) {
+ cursor->advanced = true;
+
+ if (!phongo_cursor_advance_and_check_for_error(cursor->cursor TSRMLS_CC)) {
+ /* Exception should already have been thrown */
+ return;
+ }
+ }
+
if (cursor->current > 0) {
phongo_throw_exception(PHONGO_ERROR_LOGIC TSRMLS_CC, "Cursors cannot rewind after starting iteration");
return;
}
php_phongo_cursor_free_current(cursor);
doc = mongoc_cursor_current(cursor->cursor);
if (doc) {
php_phongo_bson_to_zval_ex(bson_get_data(doc), doc->len, &cursor->visitor_data);
}
} /* }}} */
static zend_object_iterator_funcs php_phongo_cursor_iterator_funcs = {
php_phongo_cursor_iterator_dtor,
php_phongo_cursor_iterator_valid,
php_phongo_cursor_iterator_get_current_data,
php_phongo_cursor_iterator_get_current_key,
php_phongo_cursor_iterator_move_forward,
php_phongo_cursor_iterator_rewind,
NULL /* invalidate_current is not used */
};
static zend_object_iterator *php_phongo_cursor_get_iterator(zend_class_entry *ce, zval *object, int by_ref TSRMLS_DC) /* {{{ */
{
php_phongo_cursor_iterator *cursor_it = NULL;
php_phongo_cursor_t *cursor = Z_CURSOR_OBJ_P(object);
if (by_ref) {
zend_error(E_ERROR, "An iterator cannot be used with foreach by reference");
}
if (cursor->got_iterator) {
phongo_throw_exception(PHONGO_ERROR_LOGIC TSRMLS_CC, "Cursors cannot yield multiple iterators");
return NULL;
}
cursor->got_iterator = 1;
cursor_it = ecalloc(1, sizeof(php_phongo_cursor_iterator));
#if PHP_VERSION_ID >= 70000
zend_iterator_init(&cursor_it->intern);
#endif
#if PHP_VERSION_ID >= 70000
ZVAL_COPY(&cursor_it->intern.data, object);
#else
Z_ADDREF_P(object);
cursor_it->intern.data = (void*)object;
#endif
cursor_it->intern.funcs = &php_phongo_cursor_iterator_funcs;
cursor_it->cursor = cursor;
/* cursor_it->current should already be allocated to zero */
php_phongo_cursor_free_current(cursor_it->cursor);
return &cursor_it->intern;
} /* }}} */
/* }}} */
/* {{{ proto void MongoDB\Driver\Cursor::setTypeMap(array $typemap)
Sets a type map to use for BSON unserialization */
static PHP_METHOD(Cursor, setTypeMap)
{
php_phongo_cursor_t *intern;
php_phongo_bson_state state = PHONGO_BSON_STATE_INITIALIZER;
zval *typemap = NULL;
bool restore_current_element = false;
SUPPRESS_UNUSED_WARNING(return_value) SUPPRESS_UNUSED_WARNING(return_value_ptr) SUPPRESS_UNUSED_WARNING(return_value_used)
intern = Z_CURSOR_OBJ_P(getThis());
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "a!", &typemap) == FAILURE) {
return;
}
if (!php_phongo_bson_typemap_to_state(typemap, &state.map TSRMLS_CC)) {
return;
}
/* Check if the existing element needs to be freed before we overwrite
* visitor_data, which contains the only reference to it. */
if (!Z_ISUNDEF(intern->visitor_data.zchild)) {
php_phongo_cursor_free_current(intern);
restore_current_element = true;
}
intern->visitor_data = state;
/* If the cursor has a current element, we just freed it and should restore
* it with a new type map applied. */
if (restore_current_element && mongoc_cursor_current(intern->cursor)) {
const bson_t *doc = mongoc_cursor_current(intern->cursor);
php_phongo_bson_to_zval_ex(bson_get_data(doc), doc->len, &intern->visitor_data);
}
} /* }}} */
static int php_phongo_cursor_to_array_apply(zend_object_iterator *iter, void *puser TSRMLS_DC) /* {{{ */
{
#if PHP_VERSION_ID >= 70000
zval *data;
zval *return_value = (zval*)puser;
data = iter->funcs->get_current_data(iter TSRMLS_CC);
if (EG(exception)) {
return ZEND_HASH_APPLY_STOP;
}
if (Z_ISUNDEF_P(data)) {
return ZEND_HASH_APPLY_STOP;
}
Z_TRY_ADDREF_P(data);
add_next_index_zval(return_value, data);
#else
zval **data;
zval *return_value = (zval*)puser;
iter->funcs->get_current_data(iter, &data TSRMLS_CC);
if (EG(exception)) {
return ZEND_HASH_APPLY_STOP;
}
if (data == NULL || *data == NULL) {
return ZEND_HASH_APPLY_STOP;
}
Z_ADDREF_PP(data);
add_next_index_zval(return_value, *data);
#endif
return ZEND_HASH_APPLY_KEEP;
} /* }}} */
/* {{{ proto array MongoDB\Driver\Cursor::toArray()
Returns an array of all result documents for this cursor */
static PHP_METHOD(Cursor, toArray)
{
SUPPRESS_UNUSED_WARNING(return_value_ptr) SUPPRESS_UNUSED_WARNING(return_value_used)
if (zend_parse_parameters_none() == FAILURE) {
return;
}
array_init(return_value);
if (spl_iterator_apply(getThis(), php_phongo_cursor_to_array_apply, (void*)return_value TSRMLS_CC) != SUCCESS) {
zval_dtor(return_value);
RETURN_NULL();
}
} /* }}} */
/* {{{ proto MongoDB\Driver\CursorId MongoDB\Driver\Cursor::getId()
Returns the CursorId for this cursor */
static PHP_METHOD(Cursor, getId)
{
php_phongo_cursor_t *intern;
SUPPRESS_UNUSED_WARNING(return_value_ptr) SUPPRESS_UNUSED_WARNING(return_value_used)
intern = Z_CURSOR_OBJ_P(getThis());
if (zend_parse_parameters_none() == FAILURE) {
return;
}
php_phongo_cursor_id_new_from_id(return_value, mongoc_cursor_get_id(intern->cursor) TSRMLS_CC);
} /* }}} */
/* {{{ proto MongoDB\Driver\Server MongoDB\Driver\Cursor::getServer()
Returns the Server object to which this cursor is attached */
static PHP_METHOD(Cursor, getServer)
{
php_phongo_cursor_t *intern;
SUPPRESS_UNUSED_WARNING(return_value_ptr) SUPPRESS_UNUSED_WARNING(return_value_used)
intern = Z_CURSOR_OBJ_P(getThis());
if (zend_parse_parameters_none() == FAILURE) {
return;
}
phongo_server_init(return_value, intern->client, intern->server_id TSRMLS_CC);
} /* }}} */
/* {{{ proto boolean MongoDB\Driver\Cursor::isDead()
Checks if a cursor is still alive */
static PHP_METHOD(Cursor, isDead)
{
php_phongo_cursor_t *intern;
SUPPRESS_UNUSED_WARNING(return_value_ptr) SUPPRESS_UNUSED_WARNING(return_value_used)
intern = Z_CURSOR_OBJ_P(getThis());
if (zend_parse_parameters_none() == FAILURE) {
return;
}
RETURN_BOOL(!mongoc_cursor_is_alive(intern->cursor));
} /* }}} */
/* {{{ MongoDB\Driver\Cursor function entries */
ZEND_BEGIN_ARG_INFO_EX(ai_Cursor_setTypeMap, 0, 0, 1)
ZEND_ARG_ARRAY_INFO(0, typemap, 0)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(ai_Cursor_void, 0, 0, 0)
ZEND_END_ARG_INFO()
static zend_function_entry php_phongo_cursor_me[] = {
PHP_ME(Cursor, setTypeMap, ai_Cursor_setTypeMap, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_ME(Cursor, toArray, ai_Cursor_void, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_ME(Cursor, getId, ai_Cursor_void, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_ME(Cursor, getServer, ai_Cursor_void, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_ME(Cursor, isDead, ai_Cursor_void, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
ZEND_NAMED_ME(__construct, PHP_FN(MongoDB_disabled___construct), ai_Cursor_void, ZEND_ACC_PRIVATE|ZEND_ACC_FINAL)
ZEND_NAMED_ME(__wakeup, PHP_FN(MongoDB_disabled___wakeup), ai_Cursor_void, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_FE_END
};
/* }}} */
/* {{{ MongoDB\Driver\Cursor object handlers */
static zend_object_handlers php_phongo_handler_cursor;
static void php_phongo_cursor_free_object(phongo_free_object_arg *object TSRMLS_DC) /* {{{ */
{
php_phongo_cursor_t *intern = Z_OBJ_CURSOR(object);
zend_object_std_dtor(&intern->std TSRMLS_CC);
if (intern->cursor) {
mongoc_cursor_destroy(intern->cursor);
}
if (intern->database) {
efree(intern->database);
}
if (intern->collection) {
efree(intern->collection);
}
if (!Z_ISUNDEF(intern->query)) {
zval_ptr_dtor(&intern->query);
}
if (!Z_ISUNDEF(intern->command)) {
zval_ptr_dtor(&intern->command);
}
if (!Z_ISUNDEF(intern->read_preference)) {
zval_ptr_dtor(&intern->read_preference);
}
php_phongo_cursor_free_current(intern);
#if PHP_VERSION_ID < 70000
efree(intern);
#endif
} /* }}} */
static phongo_create_object_retval php_phongo_cursor_create_object(zend_class_entry *class_type TSRMLS_DC) /* {{{ */
{
php_phongo_cursor_t *intern = NULL;
intern = PHONGO_ALLOC_OBJECT_T(php_phongo_cursor_t, class_type);
zend_object_std_init(&intern->std, class_type TSRMLS_CC);
object_properties_init(&intern->std, class_type);
#if PHP_VERSION_ID >= 70000
intern->std.handlers = &php_phongo_handler_cursor;
return &intern->std;
#else
{
zend_object_value retval;
retval.handle = zend_objects_store_put(intern, (zend_objects_store_dtor_t) zend_objects_destroy_object, php_phongo_cursor_free_object, NULL TSRMLS_CC);
retval.handlers = &php_phongo_handler_cursor;
return retval;
}
#endif
} /* }}} */
static HashTable *php_phongo_cursor_get_debug_info(zval *object, int *is_temp TSRMLS_DC) /* {{{ */
{
php_phongo_cursor_t *intern;
#if PHP_VERSION_ID >= 70000
zval retval;
#else
zval retval = zval_used_for_init;
#endif
*is_temp = 1;
intern = Z_CURSOR_OBJ_P(object);
array_init_size(&retval, 9);
if (intern->database) {
ADD_ASSOC_STRING(&retval, "database", intern->database);
} else {
ADD_ASSOC_NULL_EX(&retval, "database");
}
if (intern->collection) {
ADD_ASSOC_STRING(&retval, "collection", intern->collection);
} else {
ADD_ASSOC_NULL_EX(&retval, "collection");
}
if (!Z_ISUNDEF(intern->query)) {
#if PHP_VERSION_ID >= 70000
ADD_ASSOC_ZVAL_EX(&retval, "query", &intern->query);
Z_ADDREF(intern->query);
#else
ADD_ASSOC_ZVAL_EX(&retval, "query", intern->query);
Z_ADDREF_P(intern->query);
#endif
} else {
ADD_ASSOC_NULL_EX(&retval, "query");
}
if (!Z_ISUNDEF(intern->command)) {
#if PHP_VERSION_ID >= 70000
ADD_ASSOC_ZVAL_EX(&retval, "command", &intern->command);
Z_ADDREF(intern->command);
#else
ADD_ASSOC_ZVAL_EX(&retval, "command", intern->command);
Z_ADDREF_P(intern->command);
#endif
} else {
ADD_ASSOC_NULL_EX(&retval, "command");
}
if (!Z_ISUNDEF(intern->read_preference)) {
#if PHP_VERSION_ID >= 70000
ADD_ASSOC_ZVAL_EX(&retval, "readPreference", &intern->read_preference);
Z_ADDREF(intern->read_preference);
#else
ADD_ASSOC_ZVAL_EX(&retval, "readPreference", intern->read_preference);
Z_ADDREF_P(intern->read_preference);
#endif
} else {
ADD_ASSOC_NULL_EX(&retval, "readPreference");
}
ADD_ASSOC_BOOL_EX(&retval, "isDead", !mongoc_cursor_is_alive(intern->cursor));
ADD_ASSOC_LONG_EX(&retval, "currentIndex", intern->current);
if (!Z_ISUNDEF(intern->visitor_data.zchild)) {
#if PHP_VERSION_ID >= 70000
ADD_ASSOC_ZVAL_EX(&retval, "currentDocument", &intern->visitor_data.zchild);
/*Z_ADDREF(intern->visitor_data.zchild);*/
#else
ADD_ASSOC_ZVAL_EX(&retval, "currentDocument", intern->visitor_data.zchild);
Z_ADDREF_P(intern->visitor_data.zchild);
#endif
} else {
ADD_ASSOC_NULL_EX(&retval, "currentDocument");
}
{
#if PHP_VERSION_ID >= 70000
zval server;
phongo_server_init(&server, intern->client, intern->server_id TSRMLS_CC);
ADD_ASSOC_ZVAL_EX(&retval, "server", &server);
#else
zval *server = NULL;
MAKE_STD_ZVAL(server);
phongo_server_init(server, intern->client, intern->server_id TSRMLS_CC);
ADD_ASSOC_ZVAL_EX(&retval, "server", server);
#endif
}
return Z_ARRVAL(retval);
} /* }}} */
/* }}} */
void php_phongo_cursor_init_ce(INIT_FUNC_ARGS) /* {{{ */
{
zend_class_entry ce;
INIT_NS_CLASS_ENTRY(ce, "MongoDB\\Driver", "Cursor", php_phongo_cursor_me);
php_phongo_cursor_ce = zend_register_internal_class(&ce TSRMLS_CC);
php_phongo_cursor_ce->create_object = php_phongo_cursor_create_object;
PHONGO_CE_FINAL(php_phongo_cursor_ce);
PHONGO_CE_DISABLE_SERIALIZATION(php_phongo_cursor_ce);
php_phongo_cursor_ce->get_iterator = php_phongo_cursor_get_iterator;
memcpy(&php_phongo_handler_cursor, phongo_get_std_object_handlers(), sizeof(zend_object_handlers));
php_phongo_handler_cursor.get_debug_info = php_phongo_cursor_get_debug_info;
#if PHP_VERSION_ID >= 70000
php_phongo_handler_cursor.free_obj = php_phongo_cursor_free_object;
php_phongo_handler_cursor.offset = XtOffsetOf(php_phongo_cursor_t, std);
#endif
zend_class_implements(php_phongo_cursor_ce TSRMLS_CC, 1, zend_ce_traversable);
} /* }}} */
/*
* Local variables:
* tab-width: 4
* c-basic-offset: 4
* End:
* vim600: noet sw=4 ts=4 fdm=marker
* vim<600: noet sw=4 ts=4
*/
diff --git a/mongodb-1.3.4/src/MongoDB/CursorId.c b/mongodb-1.4.2/src/MongoDB/CursorId.c
similarity index 100%
rename from mongodb-1.3.4/src/MongoDB/CursorId.c
rename to mongodb-1.4.2/src/MongoDB/CursorId.c
diff --git a/mongodb-1.3.4/src/MongoDB/Exception/AuthenticationException.c b/mongodb-1.4.2/src/MongoDB/Exception/AuthenticationException.c
similarity index 100%
rename from mongodb-1.3.4/src/MongoDB/Exception/AuthenticationException.c
rename to mongodb-1.4.2/src/MongoDB/Exception/AuthenticationException.c
diff --git a/mongodb-1.3.4/src/MongoDB/Exception/BulkWriteException.c b/mongodb-1.4.2/src/MongoDB/Exception/BulkWriteException.c
similarity index 100%
rename from mongodb-1.3.4/src/MongoDB/Exception/BulkWriteException.c
rename to mongodb-1.4.2/src/MongoDB/Exception/BulkWriteException.c
diff --git a/mongodb-1.3.4/src/MongoDB/Exception/ConnectionException.c b/mongodb-1.4.2/src/MongoDB/Exception/ConnectionException.c
similarity index 100%
rename from mongodb-1.3.4/src/MongoDB/Exception/ConnectionException.c
rename to mongodb-1.4.2/src/MongoDB/Exception/ConnectionException.c
diff --git a/mongodb-1.3.4/src/MongoDB/Exception/ConnectionTimeoutException.c b/mongodb-1.4.2/src/MongoDB/Exception/ConnectionTimeoutException.c
similarity index 100%
rename from mongodb-1.3.4/src/MongoDB/Exception/ConnectionTimeoutException.c
rename to mongodb-1.4.2/src/MongoDB/Exception/ConnectionTimeoutException.c
diff --git a/mongodb-1.3.4/src/MongoDB/Exception/Exception.c b/mongodb-1.4.2/src/MongoDB/Exception/Exception.c
similarity index 100%
rename from mongodb-1.3.4/src/MongoDB/Exception/Exception.c
rename to mongodb-1.4.2/src/MongoDB/Exception/Exception.c
diff --git a/mongodb-1.3.4/src/MongoDB/Exception/ExecutionTimeoutException.c b/mongodb-1.4.2/src/MongoDB/Exception/ExecutionTimeoutException.c
similarity index 100%
rename from mongodb-1.3.4/src/MongoDB/Exception/ExecutionTimeoutException.c
rename to mongodb-1.4.2/src/MongoDB/Exception/ExecutionTimeoutException.c
diff --git a/mongodb-1.3.4/src/MongoDB/Exception/InvalidArgumentException.c b/mongodb-1.4.2/src/MongoDB/Exception/InvalidArgumentException.c
similarity index 100%
rename from mongodb-1.3.4/src/MongoDB/Exception/InvalidArgumentException.c
rename to mongodb-1.4.2/src/MongoDB/Exception/InvalidArgumentException.c
diff --git a/mongodb-1.3.4/src/MongoDB/Exception/LogicException.c b/mongodb-1.4.2/src/MongoDB/Exception/LogicException.c
similarity index 100%
rename from mongodb-1.3.4/src/MongoDB/Exception/LogicException.c
rename to mongodb-1.4.2/src/MongoDB/Exception/LogicException.c
diff --git a/mongodb-1.3.4/src/MongoDB/Exception/RuntimeException.c b/mongodb-1.4.2/src/MongoDB/Exception/RuntimeException.c
similarity index 100%
rename from mongodb-1.3.4/src/MongoDB/Exception/RuntimeException.c
rename to mongodb-1.4.2/src/MongoDB/Exception/RuntimeException.c
diff --git a/mongodb-1.3.4/src/MongoDB/Exception/SSLConnectionException.c b/mongodb-1.4.2/src/MongoDB/Exception/SSLConnectionException.c
similarity index 100%
rename from mongodb-1.3.4/src/MongoDB/Exception/SSLConnectionException.c
rename to mongodb-1.4.2/src/MongoDB/Exception/SSLConnectionException.c
diff --git a/mongodb-1.3.4/src/MongoDB/Exception/UnexpectedValueException.c b/mongodb-1.4.2/src/MongoDB/Exception/UnexpectedValueException.c
similarity index 100%
rename from mongodb-1.3.4/src/MongoDB/Exception/UnexpectedValueException.c
rename to mongodb-1.4.2/src/MongoDB/Exception/UnexpectedValueException.c
diff --git a/mongodb-1.3.4/src/MongoDB/Exception/WriteException.c b/mongodb-1.4.2/src/MongoDB/Exception/WriteException.c
similarity index 100%
rename from mongodb-1.3.4/src/MongoDB/Exception/WriteException.c
rename to mongodb-1.4.2/src/MongoDB/Exception/WriteException.c
diff --git a/mongodb-1.3.4/src/MongoDB/Manager.c b/mongodb-1.4.2/src/MongoDB/Manager.c
similarity index 67%
rename from mongodb-1.3.4/src/MongoDB/Manager.c
rename to mongodb-1.4.2/src/MongoDB/Manager.c
index 5b03aa63..1f7c3331 100644
--- a/mongodb-1.3.4/src/MongoDB/Manager.c
+++ b/mongodb-1.4.2/src/MongoDB/Manager.c
@@ -1,648 +1,864 @@
/*
* Copyright 2014-2017 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifdef HAVE_CONFIG_H
# include "config.h"
#endif
#include <php.h>
#include <Zend/zend_hash.h>
#include <Zend/zend_interfaces.h>
#include <ext/standard/file.h>
#include "php_array_api.h"
#include "phongo_compat.h"
#include "php_phongo.h"
#define PHONGO_MANAGER_URI_DEFAULT "mongodb://127.0.0.1/"
/**
* Manager abstracts a cluster of Server objects (i.e. socket connections).
*
* Typically, users will connect to a cluster using a URI, and the Manager will
* perform tasks such as replica set discovery and create the necessary Server
* objects. That said, it is also possible to create a Manager with an arbitrary
* collection of Server objects using the static factory method (this can be
* useful for testing or administration).
*
* Operation methods do not take socket-level options (e.g. socketTimeoutMS).
* Those options should be specified during construction.
*/
zend_class_entry *php_phongo_manager_ce;
/* Checks if driverOptions contains a stream context resource in the "context"
* key and incorporates any of its SSL options into the base array that did not
* already exist (i.e. array union). The "context" key is then unset from the
* base array.
*
* This handles the merging of any legacy SSL context options and also makes
* driverOptions suitable for serialization by removing the resource zval. */
static bool php_phongo_manager_merge_context_options(zval *zdriverOptions TSRMLS_DC) /* {{{ */
{
php_stream_context *context;
zval *zcontext, *zcontextOptions;
if (!php_array_existsc(zdriverOptions, "context")) {
return true;
}
zcontext = php_array_fetchc(zdriverOptions, "context");
context = php_stream_context_from_zval(zcontext, 1);
if (!context) {
phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "\"context\" driver option is not a valid Stream-Context resource");
return false;
}
#if PHP_VERSION_ID >= 70000
zcontextOptions = php_array_fetchc_array(&context->options, "ssl");
#else
zcontextOptions = php_array_fetchc_array(context->options, "ssl");
#endif
if (!zcontextOptions) {
phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Stream-Context resource does not contain \"ssl\" options array");
return false;
}
/* Perform array union (see: add_function() in zend_operators.c) */
#if PHP_VERSION_ID >= 70000
zend_hash_merge(Z_ARRVAL_P(zdriverOptions), Z_ARRVAL_P(zcontextOptions), zval_add_ref, 0);
#else
{
zval *tmp;
zend_hash_merge(Z_ARRVAL_P(zdriverOptions), Z_ARRVAL_P(zcontextOptions), (void (*)(void *pData)) zval_add_ref, (void *) &tmp, sizeof(zval *), 0);
}
#endif
php_array_unsetc(zdriverOptions, "context");
return true;
} /* }}} */
/* Prepare authMechanismProperties for BSON encoding by converting a boolean
* value for the "CANONICALIZE_HOST_NAME" option to a string.
*
* Note: URI options are case-insensitive, so we must iterate through the
* HashTable in order to detect options. */
static void php_phongo_manager_prep_authmechanismproperties(zval *properties TSRMLS_DC) /* {{{ */
{
HashTable *ht_data;
if (Z_TYPE_P(properties) != IS_ARRAY && Z_TYPE_P(properties) != IS_OBJECT) {
return;
}
ht_data = HASH_OF(properties);
#if PHP_VERSION_ID >= 70000
{
zend_string *string_key = NULL;
zend_ulong num_key = 0;
zval *property;
ZEND_HASH_FOREACH_KEY_VAL(ht_data, num_key, string_key, property) {
if (!string_key) {
continue;
}
/* URI options are case-insensitive */
if (!strcasecmp(ZSTR_VAL(string_key), "CANONICALIZE_HOST_NAME")) {
ZVAL_DEREF(property);
if (Z_TYPE_P(property) != IS_STRING && zend_is_true(property)) {
SEPARATE_ZVAL_NOREF(property);
ZVAL_NEW_STR(property, zend_string_init(ZEND_STRL("true"), 0));
}
}
} ZEND_HASH_FOREACH_END();
}
#else
{
HashPosition pos;
zval **property;
for (zend_hash_internal_pointer_reset_ex(ht_data, &pos);
zend_hash_get_current_data_ex(ht_data, (void **) &property, &pos) == SUCCESS;
zend_hash_move_forward_ex(ht_data, &pos)) {
char *string_key = NULL;
uint string_key_len = 0;
ulong num_key = 0;
if (HASH_KEY_IS_STRING != zend_hash_get_current_key_ex(ht_data, &string_key, &string_key_len, &num_key, 0, &pos)) {
continue;
}
/* URI options are case-insensitive */
if (!strcasecmp(string_key, "CANONICALIZE_HOST_NAME")) {
if (Z_TYPE_PP(property) != IS_STRING && zend_is_true(*property)) {
SEPARATE_ZVAL_IF_NOT_REF(property);
Z_TYPE_PP(property) = IS_STRING;
Z_STRVAL_PP(property) = estrndup("true", sizeof("true")-1);
Z_STRLEN_PP(property) = sizeof("true")-1;
}
}
}
}
#endif
return;
} /* }}} */
/* Prepare URI options for BSON encoding.
*
* Read preference tag sets must be an array of documents. In order to ensure
* that empty arrays serialize as empty documents, array elements will be
* converted to objects. php_phongo_read_preference_tags_are_valid() handles
* actual validation of the tag set structure.
*
* Auth mechanism properties must have string values, so a boolean true value
* for the "CANONICALIZE_HOST_NAME" property will be converted to "true".
*
* Note: URI options are case-insensitive, so we must iterate through the
* HashTable in order to detect options. */
static void php_phongo_manager_prep_uri_options(zval *options TSRMLS_DC) /* {{{ */
{
HashTable *ht_data;
if (Z_TYPE_P(options) != IS_ARRAY) {
return;
}
ht_data = HASH_OF(options);
#if PHP_VERSION_ID >= 70000
{
zend_string *string_key = NULL;
zend_ulong num_key = 0;
zval *option;
ZEND_HASH_FOREACH_KEY_VAL(ht_data, num_key, string_key, option) {
if (!string_key) {
continue;
}
if (!strcasecmp(ZSTR_VAL(string_key), MONGOC_URI_READPREFERENCETAGS)) {
ZVAL_DEREF(option);
SEPARATE_ZVAL_NOREF(option);
php_phongo_read_preference_prep_tagsets(option TSRMLS_CC);
continue;
}
if (!strcasecmp(ZSTR_VAL(string_key), MONGOC_URI_AUTHMECHANISMPROPERTIES)) {
ZVAL_DEREF(option);
SEPARATE_ZVAL_NOREF(option);
php_phongo_manager_prep_authmechanismproperties(option TSRMLS_CC);
continue;
}
} ZEND_HASH_FOREACH_END();
}
#else
{
HashPosition pos;
zval **option;
for (zend_hash_internal_pointer_reset_ex(ht_data, &pos);
zend_hash_get_current_data_ex(ht_data, (void **) &option, &pos) == SUCCESS;
zend_hash_move_forward_ex(ht_data, &pos)) {
char *string_key = NULL;
uint string_key_len = 0;
ulong num_key = 0;
if (HASH_KEY_IS_STRING != zend_hash_get_current_key_ex(ht_data, &string_key, &string_key_len, &num_key, 0, &pos)) {
continue;
}
if (!strcasecmp(string_key, MONGOC_URI_READPREFERENCETAGS)) {
SEPARATE_ZVAL_IF_NOT_REF(option);
php_phongo_read_preference_prep_tagsets(*option TSRMLS_CC);
continue;
}
if (!strcasecmp(string_key, MONGOC_URI_AUTHMECHANISMPROPERTIES)) {
SEPARATE_ZVAL_IF_NOT_REF(option);
php_phongo_manager_prep_authmechanismproperties(*option TSRMLS_CC);
continue;
}
}
}
#endif
return;
} /* }}} */
+/* Selects a server for an execute method. If "for_writes" is true, a primary
+ * will be selected. Otherwise, a read preference will be used to select the
+ * server. If zreadPreference is NULL, the client's read preference will be
+ * used.
+ *
+ * On success, server_id will be set and the function will return true;
+ * otherwise, false is returned and an exception is thrown. */
+static bool php_phongo_manager_select_server(bool for_writes, zval *zreadPreference, mongoc_client_t *client, uint32_t *server_id TSRMLS_DC) /* {{{ */
+{
+ const mongoc_read_prefs_t *read_preference = NULL;
+ mongoc_server_description_t *selected_server;
+ bson_error_t error;
+
+ if (!for_writes) {
+ read_preference = zreadPreference ? phongo_read_preference_from_zval(zreadPreference TSRMLS_CC) : mongoc_client_get_read_prefs(client);
+ }
+
+ selected_server = mongoc_client_select_server(client, for_writes, read_preference, &error);
+
+ if (selected_server) {
+ *server_id = mongoc_server_description_id(selected_server);
+ mongoc_server_description_destroy(selected_server);
+
+ return true;
+ }
+
+ /* Check for connection related exceptions */
+ if (!EG(exception)) {
+ phongo_throw_exception_from_bson_error_t(&error TSRMLS_CC);
+ }
+
+ return false;
+} /* }}} */
+
/* {{{ proto void MongoDB\Driver\Manager::__construct([string $uri = "mongodb://127.0.0.1/"[, array $options = array()[, array $driverOptions = array()]]])
Constructs a new Manager */
static PHP_METHOD(Manager, __construct)
{
php_phongo_manager_t *intern;
zend_error_handling error_handling;
char *uri_string = NULL;
phongo_zpp_char_len uri_string_len = 0;
zval *options = NULL;
zval *driverOptions = NULL;
SUPPRESS_UNUSED_WARNING(return_value) SUPPRESS_UNUSED_WARNING(return_value_ptr) SUPPRESS_UNUSED_WARNING(return_value_used)
zend_replace_error_handling(EH_THROW, phongo_exception_from_phongo_domain(PHONGO_ERROR_INVALID_ARGUMENT), &error_handling TSRMLS_CC);
intern = Z_MANAGER_OBJ_P(getThis());
/* Separate the options and driverOptions zvals, since we may end up
* modifying them in php_phongo_manager_prep_uri_options() and
* php_phongo_manager_merge_context_options() below, respectively. */
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|s!a/!a/!", &uri_string, &uri_string_len, &options, &driverOptions) == FAILURE) {
zend_restore_error_handling(&error_handling TSRMLS_CC);
return;
}
zend_restore_error_handling(&error_handling TSRMLS_CC);
if (options) {
php_phongo_manager_prep_uri_options(options TSRMLS_CC);
}
if (driverOptions && !php_phongo_manager_merge_context_options(driverOptions TSRMLS_CC)) {
/* Exception should already have been thrown */
return;
}
phongo_manager_init(intern, uri_string ? uri_string : PHONGO_MANAGER_URI_DEFAULT, options, driverOptions TSRMLS_CC);
if (intern->client) {
php_phongo_set_monitoring_callbacks(intern->client);
}
} /* }}} */
-/* {{{ proto MongoDB\Driver\Cursor MongoDB\Driver\Manager::executeCommand(string $db, MongoDB\Driver\Command $command[, MongoDB\Driver\ReadPreference $readPreference = null])
+/* {{{ proto MongoDB\Driver\Cursor MongoDB\Driver\Manager::executeCommand(string $db, MongoDB\Driver\Command $command[, array $options = null])
Execute a Command */
static PHP_METHOD(Manager, executeCommand)
{
php_phongo_manager_t *intern;
char *db;
phongo_zpp_char_len db_len;
zval *command;
- zval *readPreference = NULL;
+ zval *options = NULL;
+ bool free_options = false;
+ zval *zreadPreference = NULL;
+ uint32_t server_id = 0;
+ DECLARE_RETURN_VALUE_USED
+ SUPPRESS_UNUSED_WARNING(return_value_ptr)
+
+ if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "sO|z!", &db, &db_len, &command, php_phongo_command_ce, &options) == FAILURE) {
+ return;
+ }
+
+ intern = Z_MANAGER_OBJ_P(getThis());
+
+ options = php_phongo_prep_legacy_option(options, "readPreference", &free_options TSRMLS_CC);
+
+ if (!phongo_parse_read_preference(options, &zreadPreference TSRMLS_CC)) {
+ /* Exception should already have been thrown */
+ goto cleanup;
+ }
+
+ if (!php_phongo_manager_select_server(false, zreadPreference, intern->client, &server_id TSRMLS_CC)) {
+ /* Exception should already have been thrown */
+ goto cleanup;
+ }
+
+ phongo_execute_command(intern->client, PHONGO_COMMAND_RAW, db, command, options, server_id, return_value, return_value_used TSRMLS_CC);
+
+cleanup:
+ if (free_options) {
+ php_phongo_prep_legacy_option_free(options TSRMLS_CC);
+ }
+} /* }}} */
+
+/* {{{ proto MongoDB\Driver\Cursor MongoDB\Driver\Manager::executeReadCommand(string $db, MongoDB\Driver\Command $command[, array $options = null])
+ Execute a ReadCommand */
+static PHP_METHOD(Manager, executeReadCommand)
+{
+ php_phongo_manager_t *intern;
+ char *db;
+ phongo_zpp_char_len db_len;
+ zval *command;
+ zval *options = NULL;
+ zval *zreadPreference = NULL;
+ uint32_t server_id = 0;
+ DECLARE_RETURN_VALUE_USED
+ SUPPRESS_UNUSED_WARNING(return_value_ptr)
+
+ if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "sO|a!", &db, &db_len, &command, php_phongo_command_ce, &options) == FAILURE) {
+ return;
+ }
+
+ intern = Z_MANAGER_OBJ_P(getThis());
+
+ if (!phongo_parse_read_preference(options, &zreadPreference TSRMLS_CC)) {
+ /* Exception should already have been thrown */
+ return;
+ }
+
+ if (!php_phongo_manager_select_server(false, zreadPreference, intern->client, &server_id TSRMLS_CC)) {
+ /* Exception should already have been thrown */
+ return;
+ }
+
+ phongo_execute_command(intern->client, PHONGO_COMMAND_READ, db, command, options, server_id, return_value, return_value_used TSRMLS_CC);
+} /* }}} */
+
+/* {{{ proto MongoDB\Driver\Cursor MongoDB\Driver\Manager::executeWriteCommand(string $db, MongoDB\Driver\Command $command[, array $options = null])
+ Execute a WriteCommand */
+static PHP_METHOD(Manager, executeWriteCommand)
+{
+ php_phongo_manager_t *intern;
+ char *db;
+ phongo_zpp_char_len db_len;
+ zval *command;
+ zval *options = NULL;
+ uint32_t server_id = 0;
+ DECLARE_RETURN_VALUE_USED
+ SUPPRESS_UNUSED_WARNING(return_value_ptr)
+
+ if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "sO|a!", &db, &db_len, &command, php_phongo_command_ce, &options) == FAILURE) {
+ return;
+ }
+
+ intern = Z_MANAGER_OBJ_P(getThis());
+
+ if (!php_phongo_manager_select_server(true, NULL, intern->client, &server_id TSRMLS_CC)) {
+ /* Exception should already have been thrown */
+ return;
+ }
+
+ phongo_execute_command(intern->client, PHONGO_COMMAND_WRITE, db, command, options, server_id, return_value, return_value_used TSRMLS_CC);
+} /* }}} */
+
+/* {{{ proto MongoDB\Driver\Cursor MongoDB\Driver\Manager::executeReadWriteCommand(string $db, MongoDB\Driver\Command $command[, array $options = null])
+ Execute a ReadWriteCommand */
+static PHP_METHOD(Manager, executeReadWriteCommand)
+{
+ php_phongo_manager_t *intern;
+ char *db;
+ phongo_zpp_char_len db_len;
+ zval *command;
+ zval *options = NULL;
+ uint32_t server_id = 0;
DECLARE_RETURN_VALUE_USED
SUPPRESS_UNUSED_WARNING(return_value_ptr)
- if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "sO|O!", &db, &db_len, &command, php_phongo_command_ce, &readPreference, php_phongo_readpreference_ce) == FAILURE) {
+ if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "sO|a!", &db, &db_len, &command, php_phongo_command_ce, &options) == FAILURE) {
return;
}
intern = Z_MANAGER_OBJ_P(getThis());
- phongo_execute_command(intern->client, db, command, readPreference, -1, return_value, return_value_used TSRMLS_CC);
+ if (!php_phongo_manager_select_server(true, NULL, intern->client, &server_id TSRMLS_CC)) {
+ /* Exception should already have been thrown */
+ return;
+ }
+
+ phongo_execute_command(intern->client, PHONGO_COMMAND_READ_WRITE, db, command, options, server_id, return_value, return_value_used TSRMLS_CC);
} /* }}} */
-/* {{{ proto MongoDB\Driver\Cursor MongoDB\Driver\Manager::executeQuery(string $namespace, MongoDB\Driver\Query $query[, MongoDB\Driver\ReadPreference $readPreference = null])
+/* {{{ proto MongoDB\Driver\Cursor MongoDB\Driver\Manager::executeQuery(string $namespace, MongoDB\Driver\Query $query[, array $options = null])
Execute a Query */
static PHP_METHOD(Manager, executeQuery)
{
php_phongo_manager_t *intern;
char *namespace;
phongo_zpp_char_len namespace_len;
zval *query;
- zval *readPreference = NULL;
+ zval *options = NULL;
+ bool free_options = false;
+ zval *zreadPreference = NULL;
+ uint32_t server_id = 0;
DECLARE_RETURN_VALUE_USED
SUPPRESS_UNUSED_WARNING(return_value_ptr)
- if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "sO|O!", &namespace, &namespace_len, &query, php_phongo_query_ce, &readPreference, php_phongo_readpreference_ce) == FAILURE) {
+ if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "sO|z!", &namespace, &namespace_len, &query, php_phongo_query_ce, &options) == FAILURE) {
return;
}
intern = Z_MANAGER_OBJ_P(getThis());
- phongo_execute_query(intern->client, namespace, query, readPreference, -1, return_value, return_value_used TSRMLS_CC);
+ options = php_phongo_prep_legacy_option(options, "readPreference", &free_options TSRMLS_CC);
+
+ if (!phongo_parse_read_preference(options, &zreadPreference TSRMLS_CC)) {
+ /* Exception should already have been thrown */
+ goto cleanup;
+ }
+
+ if (!php_phongo_manager_select_server(false, zreadPreference, intern->client, &server_id TSRMLS_CC)) {
+ /* Exception should already have been thrown */
+ goto cleanup;
+ }
+
+ phongo_execute_query(intern->client, namespace, query, options, server_id, return_value, return_value_used TSRMLS_CC);
+
+cleanup:
+ if (free_options) {
+ php_phongo_prep_legacy_option_free(options TSRMLS_CC);
+ }
} /* }}} */
-/* {{{ proto MongoDB\Driver\WriteResult MongoDB\Driver\Manager::executeBulkWrite(string $namespace, MongoDB\Driver\BulkWrite $zbulk[, MongoDB\Driver\WriteConcern $writeConcern = null])
+/* {{{ proto MongoDB\Driver\WriteResult MongoDB\Driver\Manager::executeBulkWrite(string $namespace, MongoDB\Driver\BulkWrite $zbulk[, array $options = null])
Executes a BulkWrite (i.e. any number of insert, update, and delete ops) */
static PHP_METHOD(Manager, executeBulkWrite)
{
php_phongo_manager_t *intern;
char *namespace;
phongo_zpp_char_len namespace_len;
zval *zbulk;
- zval *zwrite_concern = NULL;
php_phongo_bulkwrite_t *bulk;
+ zval *options = NULL;
+ bool free_options = false;
+ uint32_t server_id = 0;
DECLARE_RETURN_VALUE_USED
SUPPRESS_UNUSED_WARNING(return_value_ptr)
- if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "sO|O!", &namespace, &namespace_len, &zbulk, php_phongo_bulkwrite_ce, &zwrite_concern, php_phongo_writeconcern_ce) == FAILURE) {
+ if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "sO|z!", &namespace, &namespace_len, &zbulk, php_phongo_bulkwrite_ce, &options) == FAILURE) {
return;
}
intern = Z_MANAGER_OBJ_P(getThis());
bulk = Z_BULKWRITE_OBJ_P(zbulk);
- phongo_execute_write(intern->client, namespace, bulk, phongo_write_concern_from_zval(zwrite_concern TSRMLS_CC), -1, return_value, return_value_used TSRMLS_CC);
+ options = php_phongo_prep_legacy_option(options, "writeConcern", &free_options TSRMLS_CC);
+
+ if (!php_phongo_manager_select_server(true, NULL, intern->client, &server_id TSRMLS_CC)) {
+ /* Exception should already have been thrown */
+ goto cleanup;
+ }
+
+ phongo_execute_bulk_write(intern->client, namespace, bulk, options, server_id, return_value, return_value_used TSRMLS_CC);
+
+cleanup:
+ if (free_options) {
+ php_phongo_prep_legacy_option_free(options TSRMLS_CC);
+ }
} /* }}} */
/* {{{ proto MongoDB\Driver\ReadConcern MongoDB\Driver\Manager::getReadConcern()
Returns the ReadConcern associated with this Manager */
static PHP_METHOD(Manager, getReadConcern)
{
php_phongo_manager_t *intern;
DECLARE_RETURN_VALUE_USED
SUPPRESS_UNUSED_WARNING(return_value_ptr)
intern = Z_MANAGER_OBJ_P(getThis());
if (zend_parse_parameters_none() == FAILURE) {
return;
}
if (return_value_used) {
phongo_readconcern_init(return_value, mongoc_client_get_read_concern(intern->client) TSRMLS_CC);
}
} /* }}} */
/* {{{ proto MongoDB\Driver\ReadPreference MongoDB\Driver\Manager::getReadPreference()
Returns the ReadPreference associated with this Manager */
static PHP_METHOD(Manager, getReadPreference)
{
php_phongo_manager_t *intern;
DECLARE_RETURN_VALUE_USED
SUPPRESS_UNUSED_WARNING(return_value_ptr)
intern = Z_MANAGER_OBJ_P(getThis());
if (zend_parse_parameters_none() == FAILURE) {
return;
}
if (return_value_used) {
phongo_readpreference_init(return_value, mongoc_client_get_read_prefs(intern->client) TSRMLS_CC);
}
} /* }}} */
/* {{{ proto MongoDB\Driver\Server[] MongoDB\Driver\Manager::getServers()
Returns the Servers associated with this Manager */
static PHP_METHOD(Manager, getServers)
{
php_phongo_manager_t *intern;
mongoc_server_description_t **sds;
size_t i, n = 0;
SUPPRESS_UNUSED_WARNING(return_value_ptr) SUPPRESS_UNUSED_WARNING(return_value_used)
intern = Z_MANAGER_OBJ_P(getThis());
if (zend_parse_parameters_none() == FAILURE) {
return;
}
sds = mongoc_client_get_server_descriptions(intern->client, &n);
array_init_size(return_value, n);
for (i = 0; i < n; i++) {
#if PHP_VERSION_ID >= 70000
zval obj;
phongo_server_init(&obj, intern->client, mongoc_server_description_id(sds[i]) TSRMLS_CC);
add_next_index_zval(return_value, &obj);
#else
zval *obj = NULL;
MAKE_STD_ZVAL(obj);
phongo_server_init(obj, intern->client, mongoc_server_description_id(sds[i]) TSRMLS_CC);
add_next_index_zval(return_value, obj);
#endif
}
mongoc_server_descriptions_destroy_all(sds, n);
} /* }}} */
/* {{{ proto MongoDB\Driver\WriteConcern MongoDB\Driver\Manager::getWriteConcern()
Returns the WriteConcern associated with this Manager */
static PHP_METHOD(Manager, getWriteConcern)
{
php_phongo_manager_t *intern;
DECLARE_RETURN_VALUE_USED
SUPPRESS_UNUSED_WARNING(return_value_ptr)
intern = Z_MANAGER_OBJ_P(getThis());
if (zend_parse_parameters_none() == FAILURE) {
return;
}
if (return_value_used) {
phongo_writeconcern_init(return_value, mongoc_client_get_write_concern(intern->client) TSRMLS_CC);
}
} /* }}} */
/* {{{ proto MongoDB\Driver\Server MongoDB\Driver\Manager::selectServers(MongoDB\Driver\ReadPreference $readPreference)
Returns a suitable Server for the given ReadPreference */
static PHP_METHOD(Manager, selectServer)
{
php_phongo_manager_t *intern;
zval *zreadPreference = NULL;
- const mongoc_read_prefs_t *readPreference;
- bson_error_t error;
- mongoc_server_description_t *selected_server = NULL;
+ uint32_t server_id = 0;
SUPPRESS_UNUSED_WARNING(return_value_ptr) SUPPRESS_UNUSED_WARNING(return_value_used)
intern = Z_MANAGER_OBJ_P(getThis());
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "O", &zreadPreference, php_phongo_readpreference_ce) == FAILURE) {
return;
}
- readPreference = phongo_read_preference_from_zval(zreadPreference TSRMLS_CC);
- selected_server = mongoc_client_select_server(intern->client, false, readPreference, &error);
- if (selected_server) {
- phongo_server_init(return_value, intern->client, mongoc_server_description_id(selected_server) TSRMLS_CC);
- mongoc_server_description_destroy(selected_server);
- } else {
- /* Check for connection related exceptions */
- if (EG(exception)) {
- return;
- }
+ if (!php_phongo_manager_select_server(false, zreadPreference, intern->client, &server_id TSRMLS_CC)) {
+ /* Exception should already have been thrown */
+ return;
+ }
+ phongo_server_init(return_value, intern->client, server_id TSRMLS_CC);
+} /* }}} */
+
+/* {{{ proto MongoDB\Driver\Session MongoDB\Driver\Manager::startSession([array $options = null])
+ Returns a new client session */
+static PHP_METHOD(Manager, startSession)
+{
+ php_phongo_manager_t *intern;
+ zval *options = NULL;
+ mongoc_session_opt_t *cs_opts = NULL;
+ mongoc_client_session_t *cs;
+ bson_error_t error;
+ SUPPRESS_UNUSED_WARNING(return_value_ptr) SUPPRESS_UNUSED_WARNING(return_value_used)
+
+
+ intern = Z_MANAGER_OBJ_P(getThis());
+
+ if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|a!", &options) == FAILURE) {
+ return;
+ }
+
+ if (options && php_array_exists(options, "causalConsistency")) {
+ cs_opts = mongoc_session_opts_new();
+ mongoc_session_opts_set_causal_consistency(cs_opts, php_array_fetchc_bool(options, "causalConsistency"));
+ }
+
+ cs = mongoc_client_start_session(intern->client, cs_opts, &error);
+
+ if (cs_opts) {
+ mongoc_session_opts_destroy(cs_opts);
+ }
+
+ if (cs) {
+ phongo_session_init(return_value, cs TSRMLS_CC);
+ } else {
phongo_throw_exception_from_bson_error_t(&error TSRMLS_CC);
}
} /* }}} */
/* {{{ MongoDB\Driver\Manager function entries */
ZEND_BEGIN_ARG_INFO_EX(ai_Manager___construct, 0, 0, 0)
ZEND_ARG_INFO(0, uri)
ZEND_ARG_ARRAY_INFO(0, options, 0)
ZEND_ARG_ARRAY_INFO(0, driverOptions, 0)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(ai_Manager_executeCommand, 0, 0, 2)
ZEND_ARG_INFO(0, db)
ZEND_ARG_OBJ_INFO(0, command, MongoDB\\Driver\\Command, 0)
- ZEND_ARG_OBJ_INFO(0, readPreference, MongoDB\\Driver\\ReadPreference, 1)
+ ZEND_ARG_INFO(0, options)
+ZEND_END_ARG_INFO()
+
+ZEND_BEGIN_ARG_INFO_EX(ai_Manager_executeRWCommand, 0, 0, 2)
+ ZEND_ARG_INFO(0, db)
+ ZEND_ARG_OBJ_INFO(0, command, MongoDB\\Driver\\Command, 0)
+ ZEND_ARG_ARRAY_INFO(0, options, 0)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(ai_Manager_executeQuery, 0, 0, 2)
ZEND_ARG_INFO(0, namespace)
ZEND_ARG_OBJ_INFO(0, zquery, MongoDB\\Driver\\Query, 0)
- ZEND_ARG_OBJ_INFO(0, readPreference, MongoDB\\Driver\\ReadPreference, 1)
+ ZEND_ARG_INFO(0, options)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(ai_Manager_executeBulkWrite, 0, 0, 2)
ZEND_ARG_INFO(0, namespace)
ZEND_ARG_OBJ_INFO(0, zbulk, MongoDB\\Driver\\BulkWrite, 0)
- ZEND_ARG_OBJ_INFO(0, writeConcern, MongoDB\\Driver\\WriteConcern, 1)
+ ZEND_ARG_INFO(0, options)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(ai_Manager_selectServer, 0, 0, 1)
ZEND_ARG_OBJ_INFO(0, readPreference, MongoDB\\Driver\\ReadPreference, 1)
ZEND_END_ARG_INFO()
+ZEND_BEGIN_ARG_INFO_EX(ai_Manager_startSession, 0, 0, 0)
+ ZEND_ARG_ARRAY_INFO(0, options, 1)
+ZEND_END_ARG_INFO()
+
ZEND_BEGIN_ARG_INFO_EX(ai_Manager_void, 0, 0, 0)
ZEND_END_ARG_INFO()
static zend_function_entry php_phongo_manager_me[] = {
PHP_ME(Manager, __construct, ai_Manager___construct, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_ME(Manager, executeCommand, ai_Manager_executeCommand, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
+ PHP_ME(Manager, executeReadCommand, ai_Manager_executeRWCommand, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
+ PHP_ME(Manager, executeWriteCommand, ai_Manager_executeRWCommand, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
+ PHP_ME(Manager, executeReadWriteCommand, ai_Manager_executeCommand, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_ME(Manager, executeQuery, ai_Manager_executeQuery, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_ME(Manager, executeBulkWrite, ai_Manager_executeBulkWrite, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_ME(Manager, getReadConcern, ai_Manager_void, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_ME(Manager, getReadPreference, ai_Manager_void, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_ME(Manager, getServers, ai_Manager_void, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_ME(Manager, getWriteConcern, ai_Manager_void, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_ME(Manager, selectServer, ai_Manager_selectServer, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
+ PHP_ME(Manager, startSession, ai_Manager_startSession, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
ZEND_NAMED_ME(__wakeup, PHP_FN(MongoDB_disabled___wakeup), ai_Manager_void, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_FE_END
};
/* }}} */
/* {{{ MongoDB\Driver\Manager object handlers */
static zend_object_handlers php_phongo_handler_manager;
static void php_phongo_manager_free_object(phongo_free_object_arg *object TSRMLS_DC) /* {{{ */
{
php_phongo_manager_t *intern = Z_OBJ_MANAGER(object);
zend_object_std_dtor(&intern->std TSRMLS_CC);
if (intern->client) {
MONGOC_DEBUG("Not destroying persistent client for Manager");
intern->client = NULL;
}
#if PHP_VERSION_ID < 70000
efree(intern);
#endif
} /* }}} */
static phongo_create_object_retval php_phongo_manager_create_object(zend_class_entry *class_type TSRMLS_DC) /* {{{ */
{
php_phongo_manager_t *intern = NULL;
intern = PHONGO_ALLOC_OBJECT_T(php_phongo_manager_t, class_type);
zend_object_std_init(&intern->std, class_type TSRMLS_CC);
object_properties_init(&intern->std, class_type);
#if PHP_VERSION_ID >= 70000
intern->std.handlers = &php_phongo_handler_manager;
return &intern->std;
#else
{
zend_object_value retval;
retval.handle = zend_objects_store_put(intern, (zend_objects_store_dtor_t) zend_objects_destroy_object, php_phongo_manager_free_object, NULL TSRMLS_CC);
retval.handlers = &php_phongo_handler_manager;
return retval;
}
#endif
} /* }}} */
static HashTable *php_phongo_manager_get_debug_info(zval *object, int *is_temp TSRMLS_DC) /* {{{ */
{
php_phongo_manager_t *intern;
mongoc_server_description_t **sds;
size_t i, n = 0;
#if PHP_VERSION_ID >= 70000
zval retval, cluster;
#else
zval retval = zval_used_for_init;
zval *cluster = NULL;
#endif
*is_temp = 1;
intern = Z_MANAGER_OBJ_P(object);
array_init_size(&retval, 2);
ADD_ASSOC_STRING(&retval, "uri", mongoc_uri_get_string(mongoc_client_get_uri(intern->client)));
sds = mongoc_client_get_server_descriptions(intern->client, &n);
#if PHP_VERSION_ID >= 70000
array_init_size(&cluster, n);
for (i = 0; i < n; i++) {
zval obj;
php_phongo_server_to_zval(&obj, sds[i]);
add_next_index_zval(&cluster, &obj);
}
ADD_ASSOC_ZVAL_EX(&retval, "cluster", &cluster);
#else
MAKE_STD_ZVAL(cluster);
array_init_size(cluster, n);
for (i = 0; i < n; i++) {
zval *obj = NULL;
MAKE_STD_ZVAL(obj);
php_phongo_server_to_zval(obj, sds[i]);
add_next_index_zval(cluster, obj);
}
ADD_ASSOC_ZVAL_EX(&retval, "cluster", cluster);
#endif
mongoc_server_descriptions_destroy_all(sds, n);
return Z_ARRVAL(retval);
} /* }}} */
/* }}} */
void php_phongo_manager_init_ce(INIT_FUNC_ARGS) /* {{{ */
{
zend_class_entry ce;
INIT_NS_CLASS_ENTRY(ce, "MongoDB\\Driver", "Manager", php_phongo_manager_me);
php_phongo_manager_ce = zend_register_internal_class(&ce TSRMLS_CC);
php_phongo_manager_ce->create_object = php_phongo_manager_create_object;
PHONGO_CE_FINAL(php_phongo_manager_ce);
PHONGO_CE_DISABLE_SERIALIZATION(php_phongo_manager_ce);
memcpy(&php_phongo_handler_manager, phongo_get_std_object_handlers(), sizeof(zend_object_handlers));
php_phongo_handler_manager.get_debug_info = php_phongo_manager_get_debug_info;
#if PHP_VERSION_ID >= 70000
php_phongo_handler_manager.free_obj = php_phongo_manager_free_object;
php_phongo_handler_manager.offset = XtOffsetOf(php_phongo_manager_t, std);
#endif
} /* }}} */
/*
* Local variables:
* tab-width: 4
* c-basic-offset: 4
* End:
* vim600: noet sw=4 ts=4 fdm=marker
* vim<600: noet sw=4 ts=4
*/
diff --git a/mongodb-1.3.4/src/MongoDB/Monitoring/CommandFailedEvent.c b/mongodb-1.4.2/src/MongoDB/Monitoring/CommandFailedEvent.c
similarity index 99%
rename from mongodb-1.3.4/src/MongoDB/Monitoring/CommandFailedEvent.c
rename to mongodb-1.4.2/src/MongoDB/Monitoring/CommandFailedEvent.c
index a336d49f..c79abd6a 100644
--- a/mongodb-1.3.4/src/MongoDB/Monitoring/CommandFailedEvent.c
+++ b/mongodb-1.4.2/src/MongoDB/Monitoring/CommandFailedEvent.c
@@ -1,280 +1,280 @@
/*
* Copyright 2016-2017 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifdef HAVE_CONFIG_H
# include "config.h"
#endif
#include <php.h>
#include <Zend/zend_interfaces.h>
#include "phongo_compat.h"
#include "php_phongo.h"
zend_class_entry *php_phongo_commandfailedevent_ce;
/* {{{ proto string CommandFailedEvent::getCommandName()
Returns the command name for this event */
PHP_METHOD(CommandFailedEvent, getCommandName)
{
php_phongo_commandfailedevent_t *intern;
SUPPRESS_UNUSED_WARNING(return_value_ptr) SUPPRESS_UNUSED_WARNING(return_value_used)
intern = Z_COMMANDFAILEDEVENT_OBJ_P(getThis());
if (zend_parse_parameters_none() == FAILURE) {
return;
}
PHONGO_RETVAL_STRING(intern->command_name);
} /* }}} */
/* {{{ proto int CommandFailedEvent::getDurationMicros()
Returns the event's duration in microseconds */
PHP_METHOD(CommandFailedEvent, getDurationMicros)
{
php_phongo_commandfailedevent_t *intern;
SUPPRESS_UNUSED_WARNING(return_value_ptr) SUPPRESS_UNUSED_WARNING(return_value_used)
intern = Z_COMMANDFAILEDEVENT_OBJ_P(getThis());
if (zend_parse_parameters_none() == FAILURE) {
return;
}
RETURN_LONG(intern->duration_micros);
} /* }}} */
/* {{{ proto Exception CommandFailedEvent::getError()
Returns the error document associated with the event */
PHP_METHOD(CommandFailedEvent, getError)
{
php_phongo_commandfailedevent_t *intern;
SUPPRESS_UNUSED_WARNING(return_value_ptr) SUPPRESS_UNUSED_WARNING(return_value_used)
intern = Z_COMMANDFAILEDEVENT_OBJ_P(getThis());
if (zend_parse_parameters_none() == FAILURE) {
return;
}
#if PHP_VERSION_ID >= 70000
RETURN_ZVAL(&intern->z_error, 1, 0);
#else
RETURN_ZVAL(intern->z_error, 1, 0);
#endif
} /* }}} */
/* {{{ proto string CommandFailedEvent::getOperationId()
Returns the event's operation ID */
PHP_METHOD(CommandFailedEvent, getOperationId)
{
php_phongo_commandfailedevent_t *intern;
char int_as_string[20];
SUPPRESS_UNUSED_WARNING(return_value_ptr) SUPPRESS_UNUSED_WARNING(return_value_used)
intern = Z_COMMANDFAILEDEVENT_OBJ_P(getThis());
if (zend_parse_parameters_none() == FAILURE) {
return;
}
sprintf(int_as_string, "%" PHONGO_LONG_FORMAT, intern->operation_id);
PHONGO_RETVAL_STRING(int_as_string);
} /* }}} */
/* {{{ proto string CommandFailedEvent::getRequestId()
Returns the event's request ID */
PHP_METHOD(CommandFailedEvent, getRequestId)
{
php_phongo_commandfailedevent_t *intern;
char int_as_string[20];
SUPPRESS_UNUSED_WARNING(return_value_ptr) SUPPRESS_UNUSED_WARNING(return_value_used)
intern = Z_COMMANDFAILEDEVENT_OBJ_P(getThis());
if (zend_parse_parameters_none() == FAILURE) {
return;
}
sprintf(int_as_string, "%" PHONGO_LONG_FORMAT, intern->request_id);
PHONGO_RETVAL_STRING(int_as_string);
} /* }}} */
/* {{{ proto MongoDB\Driver\Server CommandFailedEvent::getServer()
Returns the Server from which the event originated */
PHP_METHOD(CommandFailedEvent, getServer)
{
php_phongo_commandfailedevent_t *intern;
SUPPRESS_UNUSED_WARNING(return_value_ptr) SUPPRESS_UNUSED_WARNING(return_value_used)
intern = Z_COMMANDFAILEDEVENT_OBJ_P(getThis());
if (zend_parse_parameters_none() == FAILURE) {
return;
}
phongo_server_init(return_value, intern->client, intern->server_id TSRMLS_CC);
} /* }}} */
/**
* Event thrown when a command has failed to execute.
*
* This class is only constructed internally.
*/
/* {{{ MongoDB\Driver\Monitoring\CommandFailedEvent function entries */
ZEND_BEGIN_ARG_INFO_EX(ai_CommandFailedEvent_void, 0, 0, 0)
ZEND_END_ARG_INFO()
static zend_function_entry php_phongo_commandfailedevent_me[] = {
ZEND_NAMED_ME(__construct, PHP_FN(MongoDB_disabled___construct), ai_CommandFailedEvent_void, ZEND_ACC_PRIVATE|ZEND_ACC_FINAL)
PHP_ME(CommandFailedEvent, getCommandName, ai_CommandFailedEvent_void, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_ME(CommandFailedEvent, getError, ai_CommandFailedEvent_void, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_ME(CommandFailedEvent, getDurationMicros, ai_CommandFailedEvent_void, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_ME(CommandFailedEvent, getOperationId, ai_CommandFailedEvent_void, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_ME(CommandFailedEvent, getRequestId, ai_CommandFailedEvent_void, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_ME(CommandFailedEvent, getServer, ai_CommandFailedEvent_void, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
ZEND_NAMED_ME(__wakeup, PHP_FN(MongoDB_disabled___wakeup), ai_CommandFailedEvent_void, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_FE_END
};
/* }}} */
/* {{{ MongoDB\Driver\Monitoring\CommandFailedEvent object handlers */
static zend_object_handlers php_phongo_handler_commandfailedevent;
static void php_phongo_commandfailedevent_free_object(phongo_free_object_arg *object TSRMLS_DC) /* {{{ */
{
php_phongo_commandfailedevent_t *intern = Z_OBJ_COMMANDFAILEDEVENT(object);
zend_object_std_dtor(&intern->std TSRMLS_CC);
if (!Z_ISUNDEF(intern->z_error)) {
zval_ptr_dtor(&intern->z_error);
}
if (intern->command_name) {
efree(intern->command_name);
}
#if PHP_VERSION_ID < 70000
efree(intern);
#endif
} /* }}} */
static phongo_create_object_retval php_phongo_commandfailedevent_create_object(zend_class_entry *class_type TSRMLS_DC) /* {{{ */
{
php_phongo_commandfailedevent_t *intern = NULL;
intern = PHONGO_ALLOC_OBJECT_T(php_phongo_commandfailedevent_t, class_type);
zend_object_std_init(&intern->std, class_type TSRMLS_CC);
object_properties_init(&intern->std, class_type);
#if PHP_VERSION_ID >= 70000
intern->std.handlers = &php_phongo_handler_commandfailedevent;
return &intern->std;
#else
{
zend_object_value retval;
retval.handle = zend_objects_store_put(intern, (zend_objects_store_dtor_t) zend_objects_destroy_object, php_phongo_commandfailedevent_free_object, NULL TSRMLS_CC);
retval.handlers = &php_phongo_handler_commandfailedevent;
return retval;
}
#endif
} /* }}} */
static HashTable *php_phongo_commandfailedevent_get_debug_info(zval *object, int *is_temp TSRMLS_DC) /* {{{ */
{
php_phongo_commandfailedevent_t *intern;
#if PHP_VERSION_ID >= 70000
zval retval;
#else
zval retval = zval_used_for_init;
#endif
char operation_id[20], request_id[20];
intern = Z_COMMANDFAILEDEVENT_OBJ_P(object);
*is_temp = 1;
array_init_size(&retval, 6);
ADD_ASSOC_STRING(&retval, "commandName", intern->command_name);
- ADD_ASSOC_INT64(&retval, "durationMicros", intern->duration_micros);
+ ADD_ASSOC_INT64(&retval, "durationMicros", (int64_t) intern->duration_micros);
#if PHP_VERSION_ID >= 70000
ADD_ASSOC_ZVAL_EX(&retval, "error", &intern->z_error);
Z_ADDREF(intern->z_error);
#else
ADD_ASSOC_ZVAL_EX(&retval, "error", intern->z_error);
Z_ADDREF_P(intern->z_error);
#endif
sprintf(operation_id, "%" PHONGO_LONG_FORMAT, intern->operation_id);
ADD_ASSOC_STRING(&retval, "operationId", operation_id);
sprintf(request_id, "%" PHONGO_LONG_FORMAT, intern->request_id);
ADD_ASSOC_STRING(&retval, "requestId", request_id);
{
#if PHP_VERSION_ID >= 70000
zval server;
phongo_server_init(&server, intern->client, intern->server_id TSRMLS_CC);
ADD_ASSOC_ZVAL_EX(&retval, "server", &server);
#else
zval *server = NULL;
MAKE_STD_ZVAL(server);
phongo_server_init(server, intern->client, intern->server_id TSRMLS_CC);
ADD_ASSOC_ZVAL_EX(&retval, "server", server);
#endif
}
return Z_ARRVAL(retval);
} /* }}} */
/* }}} */
void php_phongo_commandfailedevent_init_ce(INIT_FUNC_ARGS) /* {{{ */
{
zend_class_entry ce;
(void)type;(void)module_number;
INIT_NS_CLASS_ENTRY(ce, "MongoDB\\Driver\\Monitoring", "CommandFailedEvent", php_phongo_commandfailedevent_me);
php_phongo_commandfailedevent_ce = zend_register_internal_class(&ce TSRMLS_CC);
php_phongo_commandfailedevent_ce->create_object = php_phongo_commandfailedevent_create_object;
PHONGO_CE_FINAL(php_phongo_commandfailedevent_ce);
PHONGO_CE_DISABLE_SERIALIZATION(php_phongo_commandfailedevent_ce);
memcpy(&php_phongo_handler_commandfailedevent, phongo_get_std_object_handlers(), sizeof(zend_object_handlers));
php_phongo_handler_commandfailedevent.get_debug_info = php_phongo_commandfailedevent_get_debug_info;
#if PHP_VERSION_ID >= 70000
php_phongo_handler_commandfailedevent.free_obj = php_phongo_commandfailedevent_free_object;
php_phongo_handler_commandfailedevent.offset = XtOffsetOf(php_phongo_commandfailedevent_t, std);
#endif
return;
} /* }}} */
/*
* Local variables:
* tab-width: 4
* c-basic-offset: 4
* End:
* vim600: noet sw=4 ts=4 fdm=marker
* vim<600: noet sw=4 ts=4
*/
diff --git a/mongodb-1.3.4/src/MongoDB/Monitoring/CommandStartedEvent.c b/mongodb-1.4.2/src/MongoDB/Monitoring/CommandStartedEvent.c
similarity index 100%
rename from mongodb-1.3.4/src/MongoDB/Monitoring/CommandStartedEvent.c
rename to mongodb-1.4.2/src/MongoDB/Monitoring/CommandStartedEvent.c
diff --git a/mongodb-1.3.4/src/MongoDB/Monitoring/CommandSubscriber.c b/mongodb-1.4.2/src/MongoDB/Monitoring/CommandSubscriber.c
similarity index 100%
rename from mongodb-1.3.4/src/MongoDB/Monitoring/CommandSubscriber.c
rename to mongodb-1.4.2/src/MongoDB/Monitoring/CommandSubscriber.c
diff --git a/mongodb-1.3.4/src/MongoDB/Monitoring/CommandSucceededEvent.c b/mongodb-1.4.2/src/MongoDB/Monitoring/CommandSucceededEvent.c
similarity index 99%
rename from mongodb-1.3.4/src/MongoDB/Monitoring/CommandSucceededEvent.c
rename to mongodb-1.4.2/src/MongoDB/Monitoring/CommandSucceededEvent.c
index b1a4c5fd..fe6d0d4d 100644
--- a/mongodb-1.3.4/src/MongoDB/Monitoring/CommandSucceededEvent.c
+++ b/mongodb-1.4.2/src/MongoDB/Monitoring/CommandSucceededEvent.c
@@ -1,281 +1,281 @@
/*
* Copyright 2016-2017 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifdef HAVE_CONFIG_H
# include "config.h"
#endif
#include <php.h>
#include <Zend/zend_interfaces.h>
#include "phongo_compat.h"
#include "php_phongo.h"
zend_class_entry *php_phongo_commandsucceededevent_ce;
/* {{{ proto string CommandSucceededEvent::getCommandName()
Returns the command name for this event */
PHP_METHOD(CommandSucceededEvent, getCommandName)
{
php_phongo_commandsucceededevent_t *intern;
SUPPRESS_UNUSED_WARNING(return_value_ptr) SUPPRESS_UNUSED_WARNING(return_value_used)
intern = Z_COMMANDSUCCEEDEDEVENT_OBJ_P(getThis());
if (zend_parse_parameters_none() == FAILURE) {
return;
}
PHONGO_RETVAL_STRING(intern->command_name);
} /* }}} */
/* {{{ proto int CommandSucceededEvent::getDurationMicros()
Returns the event's duration in microseconds */
PHP_METHOD(CommandSucceededEvent, getDurationMicros)
{
php_phongo_commandsucceededevent_t *intern;
SUPPRESS_UNUSED_WARNING(return_value_ptr) SUPPRESS_UNUSED_WARNING(return_value_used)
intern = Z_COMMANDSUCCEEDEDEVENT_OBJ_P(getThis());
if (zend_parse_parameters_none() == FAILURE) {
return;
}
RETURN_LONG(intern->duration_micros);
} /* }}} */
/* {{{ proto string CommandSucceededEvent::getOperationId()
Returns the event's operation ID */
PHP_METHOD(CommandSucceededEvent, getOperationId)
{
php_phongo_commandsucceededevent_t *intern;
char int_as_string[20];
SUPPRESS_UNUSED_WARNING(return_value_ptr) SUPPRESS_UNUSED_WARNING(return_value_used)
intern = Z_COMMANDSUCCEEDEDEVENT_OBJ_P(getThis());
if (zend_parse_parameters_none() == FAILURE) {
return;
}
sprintf(int_as_string, "%" PHONGO_LONG_FORMAT, intern->operation_id);
PHONGO_RETVAL_STRING(int_as_string);
} /* }}} */
/* {{{ proto stdClass CommandSucceededEvent::getReply()
Returns the reply document associated with the event */
PHP_METHOD(CommandSucceededEvent, getReply)
{
php_phongo_commandsucceededevent_t *intern;
php_phongo_bson_state state = PHONGO_BSON_STATE_INITIALIZER;
SUPPRESS_UNUSED_WARNING(return_value_ptr) SUPPRESS_UNUSED_WARNING(return_value_used)
intern = Z_COMMANDSUCCEEDEDEVENT_OBJ_P(getThis());
if (zend_parse_parameters_none() == FAILURE) {
return;
}
php_phongo_bson_to_zval_ex(bson_get_data(intern->reply), intern->reply->len, &state);
#if PHP_VERSION_ID >= 70000
RETURN_ZVAL(&state.zchild, 0, 1);
#else
RETURN_ZVAL(state.zchild, 0, 1);
#endif
} /* }}} */
/* {{{ proto string CommandsucceededEvent::getRequestId()
Returns the event's request ID */
PHP_METHOD(CommandSucceededEvent, getRequestId)
{
php_phongo_commandsucceededevent_t *intern;
char int_as_string[20];
SUPPRESS_UNUSED_WARNING(return_value_ptr) SUPPRESS_UNUSED_WARNING(return_value_used)
intern = Z_COMMANDSUCCEEDEDEVENT_OBJ_P(getThis());
if (zend_parse_parameters_none() == FAILURE) {
return;
}
sprintf(int_as_string, "%" PHONGO_LONG_FORMAT, intern->request_id);
PHONGO_RETVAL_STRING(int_as_string);
} /* }}} */
/* {{{ proto MongoDB\Driver\Server CommandSucceededEvent::getServer()
Returns the Server from which the event originated */
PHP_METHOD(CommandSucceededEvent, getServer)
{
php_phongo_commandsucceededevent_t *intern;
SUPPRESS_UNUSED_WARNING(return_value_ptr) SUPPRESS_UNUSED_WARNING(return_value_used)
intern = Z_COMMANDSUCCEEDEDEVENT_OBJ_P(getThis());
if (zend_parse_parameters_none() == FAILURE) {
return;
}
phongo_server_init(return_value, intern->client, intern->server_id TSRMLS_CC);
} /* }}} */
/**
* Event thrown when a command has succeeded to execute.
*
* This class is only constructed internally.
*/
/* {{{ MongoDB\Driver\Monitoring\CommandSucceededEvent function entries */
ZEND_BEGIN_ARG_INFO_EX(ai_CommandSucceededEvent_void, 0, 0, 0)
ZEND_END_ARG_INFO()
static zend_function_entry php_phongo_commandsucceededevent_me[] = {
ZEND_NAMED_ME(__construct, PHP_FN(MongoDB_disabled___construct), ai_CommandSucceededEvent_void, ZEND_ACC_PRIVATE|ZEND_ACC_FINAL)
PHP_ME(CommandSucceededEvent, getCommandName, ai_CommandSucceededEvent_void, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_ME(CommandSucceededEvent, getDurationMicros, ai_CommandSucceededEvent_void, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_ME(CommandSucceededEvent, getOperationId, ai_CommandSucceededEvent_void, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_ME(CommandSucceededEvent, getReply, ai_CommandSucceededEvent_void, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_ME(CommandSucceededEvent, getRequestId, ai_CommandSucceededEvent_void, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_ME(CommandSucceededEvent, getServer, ai_CommandSucceededEvent_void, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
ZEND_NAMED_ME(__wakeup, PHP_FN(MongoDB_disabled___wakeup), ai_CommandSucceededEvent_void, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_FE_END
};
/* }}} */
/* {{{ MongoDB\Driver\Monitoring\CommandSucceededEvent object handlers */
static zend_object_handlers php_phongo_handler_commandsucceededevent;
static void php_phongo_commandsucceededevent_free_object(phongo_free_object_arg *object TSRMLS_DC) /* {{{ */
{
php_phongo_commandsucceededevent_t *intern = Z_OBJ_COMMANDSUCCEEDEDEVENT(object);
zend_object_std_dtor(&intern->std TSRMLS_CC);
if (intern->reply) {
bson_destroy(intern->reply);
}
if (intern->command_name) {
efree(intern->command_name);
}
#if PHP_VERSION_ID < 70000
efree(intern);
#endif
} /* }}} */
static phongo_create_object_retval php_phongo_commandsucceededevent_create_object(zend_class_entry *class_type TSRMLS_DC) /* {{{ */
{
php_phongo_commandsucceededevent_t *intern = NULL;
intern = PHONGO_ALLOC_OBJECT_T(php_phongo_commandsucceededevent_t, class_type);
zend_object_std_init(&intern->std, class_type TSRMLS_CC);
object_properties_init(&intern->std, class_type);
#if PHP_VERSION_ID >= 70000
intern->std.handlers = &php_phongo_handler_commandsucceededevent;
return &intern->std;
#else
{
zend_object_value retval;
retval.handle = zend_objects_store_put(intern, (zend_objects_store_dtor_t) zend_objects_destroy_object, php_phongo_commandsucceededevent_free_object, NULL TSRMLS_CC);
retval.handlers = &php_phongo_handler_commandsucceededevent;
return retval;
}
#endif
} /* }}} */
static HashTable *php_phongo_commandsucceededevent_get_debug_info(zval *object, int *is_temp TSRMLS_DC) /* {{{ */
{
php_phongo_commandsucceededevent_t *intern;
#if PHP_VERSION_ID >= 70000
zval retval;
#else
zval retval = zval_used_for_init;
#endif
char operation_id[20], request_id[20];
php_phongo_bson_state reply_state = PHONGO_BSON_STATE_INITIALIZER;
intern = Z_COMMANDSUCCEEDEDEVENT_OBJ_P(object);
*is_temp = 1;
array_init_size(&retval, 6);
ADD_ASSOC_STRING(&retval, "commandName", intern->command_name);
- ADD_ASSOC_INT64(&retval, "durationMicros", intern->duration_micros);
+ ADD_ASSOC_INT64(&retval, "durationMicros", (int64_t) intern->duration_micros);
sprintf(operation_id, "%" PHONGO_LONG_FORMAT, intern->operation_id);
ADD_ASSOC_STRING(&retval, "operationId", operation_id);
php_phongo_bson_to_zval_ex(bson_get_data(intern->reply), intern->reply->len, &reply_state);
#if PHP_VERSION_ID >= 70000
ADD_ASSOC_ZVAL(&retval, "reply", &reply_state.zchild);
#else
ADD_ASSOC_ZVAL(&retval, "reply", reply_state.zchild);
#endif
sprintf(request_id, "%" PHONGO_LONG_FORMAT, intern->request_id);
ADD_ASSOC_STRING(&retval, "requestId", request_id);
{
#if PHP_VERSION_ID >= 70000
zval server;
phongo_server_init(&server, intern->client, intern->server_id TSRMLS_CC);
ADD_ASSOC_ZVAL_EX(&retval, "server", &server);
#else
zval *server = NULL;
MAKE_STD_ZVAL(server);
phongo_server_init(server, intern->client, intern->server_id TSRMLS_CC);
ADD_ASSOC_ZVAL_EX(&retval, "server", server);
#endif
}
return Z_ARRVAL(retval);
} /* }}} */
/* }}} */
void php_phongo_commandsucceededevent_init_ce(INIT_FUNC_ARGS) /* {{{ */
{
zend_class_entry ce;
(void)type;(void)module_number;
INIT_NS_CLASS_ENTRY(ce, "MongoDB\\Driver\\Monitoring", "CommandSucceededEvent", php_phongo_commandsucceededevent_me);
php_phongo_commandsucceededevent_ce = zend_register_internal_class(&ce TSRMLS_CC);
php_phongo_commandsucceededevent_ce->create_object = php_phongo_commandsucceededevent_create_object;
PHONGO_CE_FINAL(php_phongo_commandsucceededevent_ce);
PHONGO_CE_DISABLE_SERIALIZATION(php_phongo_commandsucceededevent_ce);
memcpy(&php_phongo_handler_commandsucceededevent, phongo_get_std_object_handlers(), sizeof(zend_object_handlers));
php_phongo_handler_commandsucceededevent.get_debug_info = php_phongo_commandsucceededevent_get_debug_info;
#if PHP_VERSION_ID >= 70000
php_phongo_handler_commandsucceededevent.free_obj = php_phongo_commandsucceededevent_free_object;
php_phongo_handler_commandsucceededevent.offset = XtOffsetOf(php_phongo_commandsucceededevent_t, std);
#endif
return;
} /* }}} */
/*
* Local variables:
* tab-width: 4
* c-basic-offset: 4
* End:
* vim600: noet sw=4 ts=4 fdm=marker
* vim<600: noet sw=4 ts=4
*/
diff --git a/mongodb-1.3.4/src/MongoDB/Monitoring/Subscriber.c b/mongodb-1.4.2/src/MongoDB/Monitoring/Subscriber.c
similarity index 100%
rename from mongodb-1.3.4/src/MongoDB/Monitoring/Subscriber.c
rename to mongodb-1.4.2/src/MongoDB/Monitoring/Subscriber.c
diff --git a/mongodb-1.3.4/src/MongoDB/Monitoring/functions.c b/mongodb-1.4.2/src/MongoDB/Monitoring/functions.c
similarity index 100%
rename from mongodb-1.3.4/src/MongoDB/Monitoring/functions.c
rename to mongodb-1.4.2/src/MongoDB/Monitoring/functions.c
diff --git a/mongodb-1.3.4/src/MongoDB/Monitoring/functions.h b/mongodb-1.4.2/src/MongoDB/Monitoring/functions.h
similarity index 100%
rename from mongodb-1.3.4/src/MongoDB/Monitoring/functions.h
rename to mongodb-1.4.2/src/MongoDB/Monitoring/functions.h
diff --git a/mongodb-1.3.4/src/MongoDB/Query.c b/mongodb-1.4.2/src/MongoDB/Query.c
similarity index 98%
rename from mongodb-1.3.4/src/MongoDB/Query.c
rename to mongodb-1.4.2/src/MongoDB/Query.c
index 0a5cf54a..e0624fd5 100644
--- a/mongodb-1.3.4/src/MongoDB/Query.c
+++ b/mongodb-1.4.2/src/MongoDB/Query.c
@@ -1,516 +1,516 @@
/*
* Copyright 2014-2017 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifdef HAVE_CONFIG_H
# include "config.h"
#endif
#include <php.h>
#include <Zend/zend_interfaces.h>
#include "php_array_api.h"
#include "phongo_compat.h"
#include "php_phongo.h"
#include "php_bson.h"
zend_class_entry *php_phongo_query_ce;
/* Appends a string field into the BSON options. Returns true on success;
* otherwise, false is returned and an exception is thrown. */
static bool php_phongo_query_opts_append_string(bson_t *opts, const char *opts_key, zval *zarr, const char *zarr_key TSRMLS_DC) /* {{{ */
{
zval *value = php_array_fetch(zarr, zarr_key);
if (Z_TYPE_P(value) != IS_STRING) {
- phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected \"%s\" %s to be string, %s given", zarr_key, zarr_key[0] == '$' ? "modifier" : "option", zend_get_type_by_const(Z_TYPE_P(value)));
+ phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected \"%s\" %s to be string, %s given", zarr_key, zarr_key[0] == '$' ? "modifier" : "option", PHONGO_ZVAL_CLASS_OR_TYPE_NAME_P(value));
return false;
}
if (!bson_append_utf8(opts, opts_key, strlen(opts_key), Z_STRVAL_P(value), Z_STRLEN_P(value))) {
phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Error appending \"%s\" option", opts_key);
return false;
}
return true;
} /* }}} */
/* Appends a document field for the given opts document and key. Returns true on
* success; otherwise, false is returned and an exception is thrown. */
static bool php_phongo_query_opts_append_document(bson_t *opts, const char *opts_key, zval *zarr, const char *zarr_key TSRMLS_DC) /* {{{ */
{
zval *value = php_array_fetch(zarr, zarr_key);
bson_t b = BSON_INITIALIZER;
if (Z_TYPE_P(value) != IS_OBJECT && Z_TYPE_P(value) != IS_ARRAY) {
- phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected \"%s\" %s to be array or object, %s given", zarr_key, zarr_key[0] == '$' ? "modifier" : "option", zend_get_type_by_const(Z_TYPE_P(value)));
+ phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected \"%s\" %s to be array or object, %s given", zarr_key, zarr_key[0] == '$' ? "modifier" : "option", PHONGO_ZVAL_CLASS_OR_TYPE_NAME_P(value));
return false;
}
php_phongo_zval_to_bson(value, PHONGO_BSON_NONE, &b, NULL TSRMLS_CC);
if (EG(exception)) {
bson_destroy(&b);
return false;
}
if (!bson_validate(&b, BSON_VALIDATE_EMPTY_KEYS, NULL)) {
phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Cannot use empty keys in \"%s\" %s", zarr_key, zarr_key[0] == '$' ? "modifier" : "option");
bson_destroy(&b);
return false;
}
if (!BSON_APPEND_DOCUMENT(opts, opts_key, &b)) {
phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Error appending \"%s\" option", opts_key);
bson_destroy(&b);
return false;
}
bson_destroy(&b);
return true;
} /* }}} */
#define PHONGO_QUERY_OPT_BOOL(opt, zarr, key) \
if ((zarr) && php_array_existsc((zarr), (key))) { \
if (!BSON_APPEND_BOOL(intern->opts, (opt), php_array_fetchc_bool((zarr), (key)))) { \
phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Error appending \"%s\" option", (opt)); \
return false; \
} \
}
#define PHONGO_QUERY_OPT_DOCUMENT(opt, zarr, key) \
if ((zarr) && php_array_existsc((zarr), (key))) { \
if (!php_phongo_query_opts_append_document(intern->opts, (opt), (zarr), (key) TSRMLS_CC)) { \
return false; \
} \
}
/* Note: handling of integer options will depend on SIZEOF_ZEND_LONG and we
* are not converting strings to 64-bit integers for 32-bit platforms. */
#define PHONGO_QUERY_OPT_INT64(opt, zarr, key) \
if ((zarr) && php_array_existsc((zarr), (key))) { \
if (!BSON_APPEND_INT64(intern->opts, (opt), php_array_fetchc_long((zarr), (key)))) { \
phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Error appending \"%s\" option", (opt)); \
return false; \
} \
}
#define PHONGO_QUERY_OPT_STRING(opt, zarr, key) \
if ((zarr) && php_array_existsc((zarr), (key))) { \
if (!php_phongo_query_opts_append_string(intern->opts, (opt), (zarr), (key) TSRMLS_CC)) { \
return false; \
} \
}
/* Initialize the "hint" option. Returns true on success; otherwise, false is
* returned and an exception is thrown.
*
* The "hint" option (or "$hint" modifier) must be a string or document. Check
* for both types and merge into BSON options accordingly. */
static bool php_phongo_query_init_hint(php_phongo_query_t *intern, zval *options, zval *modifiers TSRMLS_DC) /* {{{ */
{
/* The "hint" option (or "$hint" modifier) must be a string or document.
* Check for both types and merge into BSON options accordingly. */
if (php_array_existsc(options, "hint")) {
zend_uchar type = Z_TYPE_P(php_array_fetchc(options, "hint"));
if (type == IS_STRING) {
PHONGO_QUERY_OPT_STRING("hint", options, "hint");
} else if (type == IS_OBJECT || type == IS_ARRAY) {
PHONGO_QUERY_OPT_DOCUMENT("hint", options, "hint");
} else {
phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected \"hint\" option to be string, array, or object, %s given", zend_get_type_by_const(type));
return false;
}
} else if (modifiers && php_array_existsc(modifiers, "$hint")) {
zend_uchar type = Z_TYPE_P(php_array_fetchc(modifiers, "$hint"));
if (type == IS_STRING) {
PHONGO_QUERY_OPT_STRING("hint", modifiers, "$hint");
} else if (type == IS_OBJECT || type == IS_ARRAY) {
PHONGO_QUERY_OPT_DOCUMENT("hint", modifiers, "$hint");
} else {
phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected \"$hint\" modifier to be string, array, or object, %s given", zend_get_type_by_const(type));
return false;
}
}
return true;
} /* }}} */
/* Initialize the "limit" and "singleBatch" options. Returns true on success;
* otherwise, false is returned and an exception is thrown.
*
* mongoc_collection_find_with_opts() requires a non-negative limit. For
* backwards compatibility, a negative limit should be set as a positive value
* and default singleBatch to true. */
static bool php_phongo_query_init_limit_and_singlebatch(php_phongo_query_t *intern, zval *options TSRMLS_DC) /* {{{ */
{
if (php_array_existsc(options, "limit") && php_array_fetchc_long(options, "limit") < 0) {
phongo_long limit = php_array_fetchc_long(options, "limit");
if (!BSON_APPEND_INT64(intern->opts, "limit", -limit)) {
phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Error appending \"limit\" option");
return false;
}
if (php_array_existsc(options, "singleBatch") && !php_array_fetchc_bool(options, "singleBatch")) {
phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Negative \"limit\" option conflicts with false \"singleBatch\" option");
return false;
} else {
if (!BSON_APPEND_BOOL(intern->opts, "singleBatch", true)) {
phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Error appending \"singleBatch\" option");
return false;
}
}
} else {
PHONGO_QUERY_OPT_INT64("limit", options, "limit");
PHONGO_QUERY_OPT_BOOL("singleBatch", options, "singleBatch");
}
return true;
} /* }}} */
/* Initialize the "readConcern" option. Returns true on success; otherwise,
* false is returned and an exception is thrown.
*
* The "readConcern" option should be a MongoDB\Driver\ReadConcern instance,
* which must be converted to a mongoc_read_concern_t. */
static bool php_phongo_query_init_readconcern(php_phongo_query_t *intern, zval *options TSRMLS_DC) /* {{{ */
{
if (php_array_existsc(options, "readConcern")) {
zval *read_concern = php_array_fetchc(options, "readConcern");
if (Z_TYPE_P(read_concern) != IS_OBJECT || !instanceof_function(Z_OBJCE_P(read_concern), php_phongo_readconcern_ce TSRMLS_CC)) {
- phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected \"readConcern\" option to be %s, %s given", ZSTR_VAL(php_phongo_readconcern_ce->name), zend_get_type_by_const(Z_TYPE_P(read_concern)));
+ phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected \"readConcern\" option to be %s, %s given", ZSTR_VAL(php_phongo_readconcern_ce->name), PHONGO_ZVAL_CLASS_OR_TYPE_NAME_P(read_concern));
return false;
}
intern->read_concern = mongoc_read_concern_copy(phongo_read_concern_from_zval(read_concern TSRMLS_CC));
}
return true;
} /* }}} */
/* Initialize the "maxAwaitTimeMS" option. Returns true on success; otherwise,
* false is returned and an exception is thrown.
*
* The "maxAwaitTimeMS" option is assigned to the cursor after query execution
* via mongoc_cursor_set_max_await_time_ms(). */
static bool php_phongo_query_init_max_await_time_ms(php_phongo_query_t *intern, zval *options TSRMLS_DC) /* {{{ */
{
if (php_array_existsc(options, "maxAwaitTimeMS")) {
int64_t max_await_time_ms = php_array_fetchc_long(options, "maxAwaitTimeMS");
if (max_await_time_ms < 0) {
phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected \"maxAwaitTimeMS\" option to be >= 0, %" PRId64 " given", max_await_time_ms);
return false;
}
if (max_await_time_ms > UINT32_MAX) {
phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected \"maxAwaitTimeMS\" option to be <= %" PRIu32 ", %" PRId64 " given", UINT32_MAX, max_await_time_ms);
return false;
}
intern->max_await_time_ms = (uint32_t) max_await_time_ms;
}
return true;
} /* }}} */
/* Initializes the php_phongo_query_t from filter and options arguments. This
* function will fall back to a modifier in the absence of a top-level option
* (where applicable). */
static bool php_phongo_query_init(php_phongo_query_t *intern, zval *filter, zval *options TSRMLS_DC) /* {{{ */
{
zval *modifiers = NULL;
intern->filter = bson_new();
intern->opts = bson_new();
php_phongo_zval_to_bson(filter, PHONGO_BSON_NONE, intern->filter, NULL TSRMLS_CC);
/* Note: if any exceptions are thrown, we can simply return as PHP will
* invoke php_phongo_query_free_object to destruct the object. */
if (EG(exception)) {
return false;
}
if (!bson_validate(intern->filter, BSON_VALIDATE_EMPTY_KEYS, NULL)) {
phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Cannot use empty keys in filter document");
return false;
}
if (!options) {
return true;
}
if (php_array_existsc(options, "modifiers")) {
modifiers = php_array_fetchc(options, "modifiers");
if (Z_TYPE_P(modifiers) != IS_ARRAY) {
- phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected \"modifiers\" option to be array, %s given", zend_get_type_by_const(Z_TYPE_P(modifiers)));
+ phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected \"modifiers\" option to be array, %s given", PHONGO_ZVAL_CLASS_OR_TYPE_NAME_P(modifiers));
return false;
}
}
PHONGO_QUERY_OPT_BOOL("allowPartialResults", options, "allowPartialResults")
else PHONGO_QUERY_OPT_BOOL("allowPartialResults", options, "partial");
PHONGO_QUERY_OPT_BOOL("awaitData", options, "awaitData");
PHONGO_QUERY_OPT_INT64("batchSize", options, "batchSize");
PHONGO_QUERY_OPT_DOCUMENT("collation", options, "collation");
PHONGO_QUERY_OPT_STRING("comment", options, "comment")
else PHONGO_QUERY_OPT_STRING("comment", modifiers, "$comment");
PHONGO_QUERY_OPT_BOOL("exhaust", options, "exhaust");
PHONGO_QUERY_OPT_DOCUMENT("max", options, "max")
else PHONGO_QUERY_OPT_DOCUMENT("max", modifiers, "$max");
PHONGO_QUERY_OPT_INT64("maxScan", options, "maxScan")
else PHONGO_QUERY_OPT_INT64("maxScan", modifiers, "$maxScan");
PHONGO_QUERY_OPT_INT64("maxTimeMS", options, "maxTimeMS")
else PHONGO_QUERY_OPT_INT64("maxTimeMS", modifiers, "$maxTimeMS");
PHONGO_QUERY_OPT_DOCUMENT("min", options, "min")
else PHONGO_QUERY_OPT_DOCUMENT("min", modifiers, "$min");
PHONGO_QUERY_OPT_BOOL("noCursorTimeout", options, "noCursorTimeout");
PHONGO_QUERY_OPT_BOOL("oplogReplay", options, "oplogReplay");
PHONGO_QUERY_OPT_DOCUMENT("projection", options, "projection");
PHONGO_QUERY_OPT_BOOL("returnKey", options, "returnKey")
else PHONGO_QUERY_OPT_BOOL("returnKey", modifiers, "$returnKey");
PHONGO_QUERY_OPT_BOOL("showRecordId", options, "showRecordId")
else PHONGO_QUERY_OPT_BOOL("showRecordId", modifiers, "$showDiskLoc");
PHONGO_QUERY_OPT_INT64("skip", options, "skip");
PHONGO_QUERY_OPT_DOCUMENT("sort", options, "sort")
else PHONGO_QUERY_OPT_DOCUMENT("sort", modifiers, "$orderby");
PHONGO_QUERY_OPT_BOOL("snapshot", options, "snapshot")
else PHONGO_QUERY_OPT_BOOL("snapshot", modifiers, "$snapshot");
PHONGO_QUERY_OPT_BOOL("tailable", options, "tailable");
/* The "$explain" modifier should be converted to an "explain" option, which
* libmongoc will later convert back to a modifier for the OP_QUERY code
* path. This modifier will be ignored for the find command code path. */
PHONGO_QUERY_OPT_BOOL("explain", modifiers, "$explain");
if (!php_phongo_query_init_hint(intern, options, modifiers TSRMLS_CC)) {
return false;
}
if (!php_phongo_query_init_limit_and_singlebatch(intern, options TSRMLS_CC)) {
return false;
}
if (!php_phongo_query_init_readconcern(intern, options TSRMLS_CC)) {
return false;
}
if (!php_phongo_query_init_max_await_time_ms(intern, options TSRMLS_CC)) {
return false;
}
return true;
} /* }}} */
#undef PHONGO_QUERY_OPT_BOOL
#undef PHONGO_QUERY_OPT_DOCUMENT
#undef PHONGO_QUERY_OPT_INT64
#undef PHONGO_QUERY_OPT_STRING
/* {{{ proto void MongoDB\Driver\Query::__construct(array|object $filter[, array $options = array()])
Constructs a new Query */
static PHP_METHOD(Query, __construct)
{
php_phongo_query_t *intern;
zend_error_handling error_handling;
zval *filter;
zval *options = NULL;
SUPPRESS_UNUSED_WARNING(return_value_ptr) SUPPRESS_UNUSED_WARNING(return_value) SUPPRESS_UNUSED_WARNING(return_value_used)
zend_replace_error_handling(EH_THROW, phongo_exception_from_phongo_domain(PHONGO_ERROR_INVALID_ARGUMENT), &error_handling TSRMLS_CC);
intern = Z_QUERY_OBJ_P(getThis());
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "A|a!", &filter, &options) == FAILURE) {
zend_restore_error_handling(&error_handling TSRMLS_CC);
return;
}
zend_restore_error_handling(&error_handling TSRMLS_CC);
php_phongo_query_init(intern, filter, options TSRMLS_CC);
} /* }}} */
/* {{{ MongoDB\Driver\Query function entries */
ZEND_BEGIN_ARG_INFO_EX(ai_Query___construct, 0, 0, 1)
ZEND_ARG_INFO(0, filter)
ZEND_ARG_ARRAY_INFO(0, options, 1)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(ai_Query_void, 0, 0, 0)
ZEND_END_ARG_INFO()
static zend_function_entry php_phongo_query_me[] = {
PHP_ME(Query, __construct, ai_Query___construct, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
ZEND_NAMED_ME(__wakeup, PHP_FN(MongoDB_disabled___wakeup), ai_Query_void, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_FE_END
};
/* }}} */
/* {{{ MongoDB\Driver\Query object handlers */
static zend_object_handlers php_phongo_handler_query;
static void php_phongo_query_free_object(phongo_free_object_arg *object TSRMLS_DC) /* {{{ */
{
php_phongo_query_t *intern = Z_OBJ_QUERY(object);
zend_object_std_dtor(&intern->std TSRMLS_CC);
if (intern->filter) {
bson_clear(&intern->filter);
}
if (intern->opts) {
bson_clear(&intern->opts);
}
if (intern->read_concern) {
mongoc_read_concern_destroy(intern->read_concern);
}
#if PHP_VERSION_ID < 70000
efree(intern);
#endif
} /* }}} */
static phongo_create_object_retval php_phongo_query_create_object(zend_class_entry *class_type TSRMLS_DC) /* {{{ */
{
php_phongo_query_t *intern = NULL;
intern = PHONGO_ALLOC_OBJECT_T(php_phongo_query_t, class_type);
zend_object_std_init(&intern->std, class_type TSRMLS_CC);
object_properties_init(&intern->std, class_type);
#if PHP_VERSION_ID >= 70000
intern->std.handlers = &php_phongo_handler_query;
return &intern->std;
#else
{
zend_object_value retval;
retval.handle = zend_objects_store_put(intern, (zend_objects_store_dtor_t) zend_objects_destroy_object, php_phongo_query_free_object, NULL TSRMLS_CC);
retval.handlers = &php_phongo_handler_query;
return retval;
}
#endif
} /* }}} */
static HashTable *php_phongo_query_get_debug_info(zval *object, int *is_temp TSRMLS_DC) /* {{{ */
{
php_phongo_query_t *intern;
#if PHP_VERSION_ID >= 70000
zval retval;
#else
zval retval = zval_used_for_init;
#endif
*is_temp = 1;
intern = Z_QUERY_OBJ_P(object);
array_init_size(&retval, 3);
/* Avoid using PHONGO_TYPEMAP_NATIVE_ARRAY for decoding filter and opts
* documents so that users can differentiate BSON arrays and documents. */
if (intern->filter) {
#if PHP_VERSION_ID >= 70000
zval zv;
#else
zval *zv;
#endif
php_phongo_bson_to_zval(bson_get_data(intern->filter), intern->filter->len, &zv);
#if PHP_VERSION_ID >= 70000
ADD_ASSOC_ZVAL_EX(&retval, "filter", &zv);
#else
ADD_ASSOC_ZVAL_EX(&retval, "filter", zv);
#endif
} else {
ADD_ASSOC_NULL_EX(&retval, "filter");
}
if (intern->opts) {
#if PHP_VERSION_ID >= 70000
zval zv;
#else
zval *zv;
#endif
php_phongo_bson_to_zval(bson_get_data(intern->opts), intern->opts->len, &zv);
#if PHP_VERSION_ID >= 70000
ADD_ASSOC_ZVAL_EX(&retval, "options", &zv);
#else
ADD_ASSOC_ZVAL_EX(&retval, "options", zv);
#endif
} else {
ADD_ASSOC_NULL_EX(&retval, "options");
}
if (intern->read_concern) {
#if PHP_VERSION_ID >= 70000
zval read_concern;
php_phongo_read_concern_to_zval(&read_concern, intern->read_concern);
ADD_ASSOC_ZVAL_EX(&retval, "readConcern", &read_concern);
#else
zval *read_concern = NULL;
MAKE_STD_ZVAL(read_concern);
php_phongo_read_concern_to_zval(read_concern, intern->read_concern);
ADD_ASSOC_ZVAL_EX(&retval, "readConcern", read_concern);
#endif
} else {
ADD_ASSOC_NULL_EX(&retval, "readConcern");
}
return Z_ARRVAL(retval);
} /* }}} */
/* }}} */
void php_phongo_query_init_ce(INIT_FUNC_ARGS) /* {{{ */
{
zend_class_entry ce;
INIT_NS_CLASS_ENTRY(ce, "MongoDB\\Driver", "Query", php_phongo_query_me);
php_phongo_query_ce = zend_register_internal_class(&ce TSRMLS_CC);
php_phongo_query_ce->create_object = php_phongo_query_create_object;
PHONGO_CE_FINAL(php_phongo_query_ce);
PHONGO_CE_DISABLE_SERIALIZATION(php_phongo_query_ce);
memcpy(&php_phongo_handler_query, phongo_get_std_object_handlers(), sizeof(zend_object_handlers));
php_phongo_handler_query.get_debug_info = php_phongo_query_get_debug_info;
#if PHP_VERSION_ID >= 70000
php_phongo_handler_query.free_obj = php_phongo_query_free_object;
php_phongo_handler_query.offset = XtOffsetOf(php_phongo_query_t, std);
#endif
} /* }}} */
/*
* Local variables:
* tab-width: 4
* c-basic-offset: 4
* End:
* vim600: noet sw=4 ts=4 fdm=marker
* vim<600: noet sw=4 ts=4
*/
diff --git a/mongodb-1.3.4/src/MongoDB/ReadConcern.c b/mongodb-1.4.2/src/MongoDB/ReadConcern.c
similarity index 98%
rename from mongodb-1.3.4/src/MongoDB/ReadConcern.c
rename to mongodb-1.4.2/src/MongoDB/ReadConcern.c
index e14c8543..9f174ba5 100644
--- a/mongodb-1.3.4/src/MongoDB/ReadConcern.c
+++ b/mongodb-1.4.2/src/MongoDB/ReadConcern.c
@@ -1,220 +1,221 @@
/*
* Copyright 2015-2017 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifdef HAVE_CONFIG_H
# include "config.h"
#endif
#include <php.h>
#include <Zend/zend_interfaces.h>
#include "phongo_compat.h"
#include "php_phongo.h"
zend_class_entry *php_phongo_readconcern_ce;
/* {{{ proto void MongoDB\Driver\ReadConcern::__construct([string $level])
Constructs a new ReadConcern */
static PHP_METHOD(ReadConcern, __construct)
{
php_phongo_readconcern_t *intern;
zend_error_handling error_handling;
char *level = NULL;
phongo_zpp_char_len level_len = 0;
SUPPRESS_UNUSED_WARNING(return_value_ptr) SUPPRESS_UNUSED_WARNING(return_value) SUPPRESS_UNUSED_WARNING(return_value_used)
zend_replace_error_handling(EH_THROW, phongo_exception_from_phongo_domain(PHONGO_ERROR_INVALID_ARGUMENT), &error_handling TSRMLS_CC);
intern = Z_READCONCERN_OBJ_P(getThis());
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|s!", &level, &level_len) == FAILURE) {
zend_restore_error_handling(&error_handling TSRMLS_CC);
return;
}
zend_restore_error_handling(&error_handling TSRMLS_CC);
intern->read_concern = mongoc_read_concern_new();
if (level) {
mongoc_read_concern_set_level(intern->read_concern, level);
}
} /* }}} */
/* {{{ proto string|null MongoDB\Driver\ReadConcern::getLevel()
Returns the ReadConcern "level" option */
static PHP_METHOD(ReadConcern, getLevel)
{
php_phongo_readconcern_t *intern;
const char *level;
SUPPRESS_UNUSED_WARNING(return_value_ptr) SUPPRESS_UNUSED_WARNING(return_value_used)
intern = Z_READCONCERN_OBJ_P(getThis());
if (zend_parse_parameters_none() == FAILURE) {
return;
}
level = mongoc_read_concern_get_level(intern->read_concern);
if (level) {
PHONGO_RETURN_STRING(level);
}
RETURN_NULL();
} /* }}} */
/* {{{ proto boolean MongoDB\Driver\ReadConcern::isDefault()
Returns whether the read concern has not been modified (i.e. constructed
without a level or from a Manager with no read concern URI options). */
static PHP_METHOD(ReadConcern, isDefault)
{
php_phongo_readconcern_t *intern;
SUPPRESS_UNUSED_WARNING(return_value_ptr) SUPPRESS_UNUSED_WARNING(return_value_used)
intern = Z_READCONCERN_OBJ_P(getThis());
if (zend_parse_parameters_none() == FAILURE) {
return;
}
RETURN_BOOL(mongoc_read_concern_is_default(intern->read_concern));
} /* }}} */
/* {{{ proto array MongoDB\Driver\ReadConcern::bsonSerialize()
*/
static PHP_METHOD(ReadConcern, bsonSerialize)
{
const mongoc_read_concern_t *read_concern = phongo_read_concern_from_zval(getThis() TSRMLS_CC);
if (zend_parse_parameters_none() == FAILURE) {
return;
}
php_phongo_read_concern_to_zval(return_value, read_concern);
convert_to_object(return_value);
} /* }}} */
/* {{{ MongoDB\Driver\ReadConcern function entries */
ZEND_BEGIN_ARG_INFO_EX(ai_ReadConcern___construct, 0, 0, 0)
ZEND_ARG_INFO(0, level)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(ai_ReadConcern_void, 0, 0, 0)
ZEND_END_ARG_INFO()
static zend_function_entry php_phongo_readconcern_me[] = {
PHP_ME(ReadConcern, __construct, ai_ReadConcern___construct, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_ME(ReadConcern, getLevel, ai_ReadConcern_void, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_ME(ReadConcern, isDefault, ai_ReadConcern_void, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_ME(ReadConcern, bsonSerialize, ai_ReadConcern_void, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_FE_END
};
/* }}} */
/* {{{ MongoDB\Driver\ReadConcern object handlers */
static zend_object_handlers php_phongo_handler_readconcern;
static void php_phongo_readconcern_free_object(phongo_free_object_arg *object TSRMLS_DC) /* {{{ */
{
php_phongo_readconcern_t *intern = Z_OBJ_READCONCERN(object);
zend_object_std_dtor(&intern->std TSRMLS_CC);
if (intern->read_concern) {
mongoc_read_concern_destroy(intern->read_concern);
}
#if PHP_VERSION_ID < 70000
efree(intern);
#endif
} /* }}} */
static phongo_create_object_retval php_phongo_readconcern_create_object(zend_class_entry *class_type TSRMLS_DC) /* {{{ */
{
php_phongo_readconcern_t *intern = NULL;
intern = PHONGO_ALLOC_OBJECT_T(php_phongo_readconcern_t, class_type);
zend_object_std_init(&intern->std, class_type TSRMLS_CC);
object_properties_init(&intern->std, class_type);
#if PHP_VERSION_ID >= 70000
intern->std.handlers = &php_phongo_handler_readconcern;
return &intern->std;
#else
{
zend_object_value retval;
retval.handle = zend_objects_store_put(intern, (zend_objects_store_dtor_t) zend_objects_destroy_object, php_phongo_readconcern_free_object, NULL TSRMLS_CC);
retval.handlers = &php_phongo_handler_readconcern;
return retval;
}
#endif
} /* }}} */
static HashTable *php_phongo_readconcern_get_debug_info(zval *object, int *is_temp TSRMLS_DC) /* {{{ */
{
#if PHP_VERSION_ID >= 70000
zval retval;
#else
zval retval = zval_used_for_init;
#endif
const mongoc_read_concern_t *read_concern = phongo_read_concern_from_zval(object TSRMLS_CC);
*is_temp = 1;
php_phongo_read_concern_to_zval(&retval, read_concern);
return Z_ARRVAL(retval);
} /* }}} */
/* }}} */
void php_phongo_readconcern_init_ce(INIT_FUNC_ARGS) /* {{{ */
{
zend_class_entry ce;
INIT_NS_CLASS_ENTRY(ce, "MongoDB\\Driver", "ReadConcern", php_phongo_readconcern_me);
php_phongo_readconcern_ce = zend_register_internal_class(&ce TSRMLS_CC);
php_phongo_readconcern_ce->create_object = php_phongo_readconcern_create_object;
PHONGO_CE_FINAL(php_phongo_readconcern_ce);
PHONGO_CE_DISABLE_SERIALIZATION(php_phongo_readconcern_ce);
zend_class_implements(php_phongo_readconcern_ce TSRMLS_CC, 1, php_phongo_serializable_ce);
memcpy(&php_phongo_handler_readconcern, phongo_get_std_object_handlers(), sizeof(zend_object_handlers));
php_phongo_handler_readconcern.get_debug_info = php_phongo_readconcern_get_debug_info;
#if PHP_VERSION_ID >= 70000
php_phongo_handler_readconcern.free_obj = php_phongo_readconcern_free_object;
php_phongo_handler_readconcern.offset = XtOffsetOf(php_phongo_readconcern_t, std);
#endif
zend_declare_class_constant_stringl(php_phongo_readconcern_ce, ZEND_STRL("LOCAL"), ZEND_STRL(MONGOC_READ_CONCERN_LEVEL_LOCAL) TSRMLS_CC);
zend_declare_class_constant_stringl(php_phongo_readconcern_ce, ZEND_STRL("MAJORITY"), ZEND_STRL(MONGOC_READ_CONCERN_LEVEL_MAJORITY) TSRMLS_CC);
zend_declare_class_constant_stringl(php_phongo_readconcern_ce, ZEND_STRL("LINEARIZABLE"), ZEND_STRL(MONGOC_READ_CONCERN_LEVEL_LINEARIZABLE) TSRMLS_CC);
+ zend_declare_class_constant_stringl(php_phongo_readconcern_ce, ZEND_STRL("AVAILABLE"), ZEND_STRL("available") TSRMLS_CC);
} /* }}} */
/*
* Local variables:
* tab-width: 4
* c-basic-offset: 4
* End:
* vim600: noet sw=4 ts=4 fdm=marker
* vim<600: noet sw=4 ts=4
*/
diff --git a/mongodb-1.3.4/src/MongoDB/ReadPreference.c b/mongodb-1.4.2/src/MongoDB/ReadPreference.c
similarity index 99%
rename from mongodb-1.3.4/src/MongoDB/ReadPreference.c
rename to mongodb-1.4.2/src/MongoDB/ReadPreference.c
index 7bc0ff25..49df8f98 100644
--- a/mongodb-1.3.4/src/MongoDB/ReadPreference.c
+++ b/mongodb-1.4.2/src/MongoDB/ReadPreference.c
@@ -1,330 +1,330 @@
/*
* Copyright 2014-2017 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifdef HAVE_CONFIG_H
# include "config.h"
#endif
#include <php.h>
#include <Zend/zend_interfaces.h>
#include "php_array_api.h"
#include "phongo_compat.h"
#include "php_phongo.h"
#include "php_bson.h"
zend_class_entry *php_phongo_readpreference_ce;
/* {{{ proto void MongoDB\Driver\ReadPreference::__construct(int|string $mode[, array $tagSets = array()[, array $options = array()]])
Constructs a new ReadPreference */
static PHP_METHOD(ReadPreference, __construct)
{
php_phongo_readpreference_t *intern;
zend_error_handling error_handling;
zval *mode;
zval *tagSets = NULL;
zval *options = NULL;
SUPPRESS_UNUSED_WARNING(return_value_ptr) SUPPRESS_UNUSED_WARNING(return_value) SUPPRESS_UNUSED_WARNING(return_value_used)
zend_replace_error_handling(EH_THROW, phongo_exception_from_phongo_domain(PHONGO_ERROR_INVALID_ARGUMENT), &error_handling TSRMLS_CC);
intern = Z_READPREFERENCE_OBJ_P(getThis());
/* Separate the tagSets zval, since we may end up modifying it in
* php_phongo_read_preference_prep_tagsets() below. */
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "z|a/!a!", &mode, &tagSets, &options) == FAILURE) {
zend_restore_error_handling(&error_handling TSRMLS_CC);
return;
}
zend_restore_error_handling(&error_handling TSRMLS_CC);
if (Z_TYPE_P(mode) == IS_LONG) {
switch(Z_LVAL_P(mode)) {
case MONGOC_READ_PRIMARY:
case MONGOC_READ_SECONDARY:
case MONGOC_READ_PRIMARY_PREFERRED:
case MONGOC_READ_SECONDARY_PREFERRED:
case MONGOC_READ_NEAREST:
intern->read_preference = mongoc_read_prefs_new(Z_LVAL_P(mode));
break;
default:
phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Invalid mode: %" PHONGO_LONG_FORMAT, Z_LVAL_P(mode));
return;
}
} else if (Z_TYPE_P(mode) == IS_STRING) {
if (strcasecmp(Z_STRVAL_P(mode), "primary") == 0) {
intern->read_preference = mongoc_read_prefs_new(MONGOC_READ_PRIMARY);
} else if (strcasecmp(Z_STRVAL_P(mode), "primaryPreferred") == 0) {
intern->read_preference = mongoc_read_prefs_new(MONGOC_READ_PRIMARY_PREFERRED);
} else if (strcasecmp(Z_STRVAL_P(mode), "secondary") == 0) {
intern->read_preference = mongoc_read_prefs_new(MONGOC_READ_SECONDARY);
} else if (strcasecmp(Z_STRVAL_P(mode), "secondaryPreferred") == 0) {
intern->read_preference = mongoc_read_prefs_new(MONGOC_READ_SECONDARY_PREFERRED);
} else if (strcasecmp(Z_STRVAL_P(mode), "nearest") == 0) {
intern->read_preference = mongoc_read_prefs_new(MONGOC_READ_NEAREST);
} else {
phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Invalid mode: '%s'", Z_STRVAL_P(mode));
return;
}
} else {
- phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected mode to be integer or string, %s given", zend_get_type_by_const(Z_TYPE_P(mode)));
+ phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected mode to be integer or string, %s given", PHONGO_ZVAL_CLASS_OR_TYPE_NAME_P(mode));
return;
}
if (tagSets) {
bson_t *tags = bson_new();
php_phongo_read_preference_prep_tagsets(tagSets TSRMLS_CC);
php_phongo_zval_to_bson(tagSets, PHONGO_BSON_NONE, (bson_t *)tags, NULL TSRMLS_CC);
if (!php_phongo_read_preference_tags_are_valid(tags)) {
phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "tagSets must be an array of zero or more documents");
bson_destroy(tags);
return;
}
if (!bson_empty(tags) && (mongoc_read_prefs_get_mode(intern->read_preference) == MONGOC_READ_PRIMARY)) {
phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "tagSets may not be used with primary mode");
bson_destroy(tags);
return;
}
mongoc_read_prefs_set_tags(intern->read_preference, tags);
bson_destroy(tags);
}
if (options && php_array_exists(options, "maxStalenessSeconds")) {
phongo_long maxStalenessSeconds = php_array_fetchc_long(options, "maxStalenessSeconds");
if (maxStalenessSeconds != MONGOC_NO_MAX_STALENESS) {
if (maxStalenessSeconds < MONGOC_SMALLEST_MAX_STALENESS_SECONDS) {
phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected maxStalenessSeconds to be >= %d, %" PHONGO_LONG_FORMAT " given", MONGOC_SMALLEST_MAX_STALENESS_SECONDS, maxStalenessSeconds);
return;
}
if (maxStalenessSeconds > INT32_MAX) {
phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected maxStalenessSeconds to be <= %" PRId32 ", %" PHONGO_LONG_FORMAT " given", INT32_MAX, maxStalenessSeconds);
return;
}
if (mongoc_read_prefs_get_mode(intern->read_preference) == MONGOC_READ_PRIMARY) {
phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "maxStalenessSeconds may not be used with primary mode");
return;
}
}
mongoc_read_prefs_set_max_staleness_seconds(intern->read_preference, maxStalenessSeconds);
}
if (!mongoc_read_prefs_is_valid(intern->read_preference)) {
phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Read preference is not valid");
return;
}
} /* }}} */
/* {{{ proto integer MongoDB\Driver\ReadPreference::getMaxStalenessSeconds()
Returns the ReadPreference maxStalenessSeconds value */
static PHP_METHOD(ReadPreference, getMaxStalenessSeconds)
{
php_phongo_readpreference_t *intern;
SUPPRESS_UNUSED_WARNING(return_value_ptr) SUPPRESS_UNUSED_WARNING(return_value_used)
intern = Z_READPREFERENCE_OBJ_P(getThis());
if (zend_parse_parameters_none() == FAILURE) {
return;
}
RETURN_LONG(mongoc_read_prefs_get_max_staleness_seconds(intern->read_preference));
} /* }}} */
/* {{{ proto integer MongoDB\Driver\ReadPreference::getMode()
Returns the ReadPreference mode */
static PHP_METHOD(ReadPreference, getMode)
{
php_phongo_readpreference_t *intern;
SUPPRESS_UNUSED_WARNING(return_value_ptr) SUPPRESS_UNUSED_WARNING(return_value_used)
intern = Z_READPREFERENCE_OBJ_P(getThis());
if (zend_parse_parameters_none() == FAILURE) {
return;
}
RETURN_LONG(mongoc_read_prefs_get_mode(intern->read_preference));
} /* }}} */
/* {{{ proto array MongoDB\Driver\ReadPreference::getTagSets()
Returns the ReadPreference tag sets */
static PHP_METHOD(ReadPreference, getTagSets)
{
php_phongo_readpreference_t *intern;
const bson_t *tags;
SUPPRESS_UNUSED_WARNING(return_value_ptr) SUPPRESS_UNUSED_WARNING(return_value_used)
intern = Z_READPREFERENCE_OBJ_P(getThis());
if (zend_parse_parameters_none() == FAILURE) {
return;
}
tags = mongoc_read_prefs_get_tags(intern->read_preference);
if (tags->len) {
php_phongo_bson_state state = PHONGO_BSON_STATE_INITIALIZER;
/* Use native arrays for debugging output */
state.map.root_type = PHONGO_TYPEMAP_NATIVE_ARRAY;
state.map.document_type = PHONGO_TYPEMAP_NATIVE_ARRAY;
php_phongo_bson_to_zval_ex(bson_get_data(tags), tags->len, &state);
#if PHP_VERSION_ID >= 70000
RETURN_ZVAL(&state.zchild, 0, 1);
#else
RETURN_ZVAL(state.zchild, 0, 1);
#endif
} else {
RETURN_NULL();
}
} /* }}} */
/* {{{ proto array MongoDB\Driver\ReadPreference::bsonSerialize()
*/
static PHP_METHOD(ReadPreference, bsonSerialize)
{
const mongoc_read_prefs_t *read_preference = phongo_read_preference_from_zval(getThis() TSRMLS_CC);
if (zend_parse_parameters_none() == FAILURE) {
return;
}
php_phongo_read_preference_to_zval(return_value, read_preference);
convert_to_object(return_value);
} /* }}} */
/* {{{ MongoDB\Driver\ReadPreference function entries */
ZEND_BEGIN_ARG_INFO_EX(ai_ReadPreference___construct, 0, 0, 1)
ZEND_ARG_INFO(0, mode)
ZEND_ARG_ARRAY_INFO(0, tagSets, 1)
ZEND_ARG_ARRAY_INFO(0, options, 1)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(ai_ReadPreference_void, 0, 0, 0)
ZEND_END_ARG_INFO()
static zend_function_entry php_phongo_readpreference_me[] = {
PHP_ME(ReadPreference, __construct, ai_ReadPreference___construct, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_ME(ReadPreference, getMaxStalenessSeconds, ai_ReadPreference_void, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_ME(ReadPreference, getMode, ai_ReadPreference_void, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_ME(ReadPreference, getTagSets, ai_ReadPreference_void, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_ME(ReadPreference, bsonSerialize, ai_ReadPreference_void, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_FE_END
};
/* }}} */
/* {{{ MongoDB\Driver\ReadPreference object handlers */
static zend_object_handlers php_phongo_handler_readpreference;
static void php_phongo_readpreference_free_object(phongo_free_object_arg *object TSRMLS_DC) /* {{{ */
{
php_phongo_readpreference_t *intern = Z_OBJ_READPREFERENCE(object);
zend_object_std_dtor(&intern->std TSRMLS_CC);
if (intern->read_preference) {
mongoc_read_prefs_destroy(intern->read_preference);
}
#if PHP_VERSION_ID < 70000
efree(intern);
#endif
} /* }}} */
static phongo_create_object_retval php_phongo_readpreference_create_object(zend_class_entry *class_type TSRMLS_DC) /* {{{ */
{
php_phongo_readpreference_t *intern = NULL;
intern = PHONGO_ALLOC_OBJECT_T(php_phongo_readpreference_t, class_type);
zend_object_std_init(&intern->std, class_type TSRMLS_CC);
object_properties_init(&intern->std, class_type);
#if PHP_VERSION_ID >= 70000
intern->std.handlers = &php_phongo_handler_readpreference;
return &intern->std;
#else
{
zend_object_value retval;
retval.handle = zend_objects_store_put(intern, (zend_objects_store_dtor_t) zend_objects_destroy_object, php_phongo_readpreference_free_object, NULL TSRMLS_CC);
retval.handlers = &php_phongo_handler_readpreference;
return retval;
}
#endif
} /* }}} */
static HashTable *php_phongo_readpreference_get_debug_info(zval *object, int *is_temp TSRMLS_DC) /* {{{ */
{
#if PHP_VERSION_ID >= 70000
zval retval;
#else
zval retval = zval_used_for_init;
#endif
const mongoc_read_prefs_t *read_prefs = phongo_read_preference_from_zval(object TSRMLS_CC);
*is_temp = 1;
php_phongo_read_preference_to_zval(&retval, read_prefs);
return Z_ARRVAL(retval);
} /* }}} */
/* }}} */
void php_phongo_readpreference_init_ce(INIT_FUNC_ARGS) /* {{{ */
{
zend_class_entry ce;
INIT_NS_CLASS_ENTRY(ce, "MongoDB\\Driver", "ReadPreference", php_phongo_readpreference_me);
php_phongo_readpreference_ce = zend_register_internal_class(&ce TSRMLS_CC);
php_phongo_readpreference_ce->create_object = php_phongo_readpreference_create_object;
PHONGO_CE_FINAL(php_phongo_readpreference_ce);
PHONGO_CE_DISABLE_SERIALIZATION(php_phongo_readpreference_ce);
zend_class_implements(php_phongo_readpreference_ce TSRMLS_CC, 1, php_phongo_serializable_ce);
memcpy(&php_phongo_handler_readpreference, phongo_get_std_object_handlers(), sizeof(zend_object_handlers));
php_phongo_handler_readpreference.get_debug_info = php_phongo_readpreference_get_debug_info;
#if PHP_VERSION_ID >= 70000
php_phongo_handler_readpreference.free_obj = php_phongo_readpreference_free_object;
php_phongo_handler_readpreference.offset = XtOffsetOf(php_phongo_readpreference_t, std);
#endif
zend_declare_class_constant_long(php_phongo_readpreference_ce, ZEND_STRL("RP_PRIMARY"), MONGOC_READ_PRIMARY TSRMLS_CC);
zend_declare_class_constant_long(php_phongo_readpreference_ce, ZEND_STRL("RP_PRIMARY_PREFERRED"), MONGOC_READ_PRIMARY_PREFERRED TSRMLS_CC);
zend_declare_class_constant_long(php_phongo_readpreference_ce, ZEND_STRL("RP_SECONDARY"), MONGOC_READ_SECONDARY TSRMLS_CC);
zend_declare_class_constant_long(php_phongo_readpreference_ce, ZEND_STRL("RP_SECONDARY_PREFERRED"), MONGOC_READ_SECONDARY_PREFERRED TSRMLS_CC);
zend_declare_class_constant_long(php_phongo_readpreference_ce, ZEND_STRL("RP_NEAREST"), MONGOC_READ_NEAREST TSRMLS_CC);
zend_declare_class_constant_long(php_phongo_readpreference_ce, ZEND_STRL("NO_MAX_STALENESS"), MONGOC_NO_MAX_STALENESS TSRMLS_CC);
zend_declare_class_constant_long(php_phongo_readpreference_ce, ZEND_STRL("SMALLEST_MAX_STALENESS_SECONDS"), MONGOC_SMALLEST_MAX_STALENESS_SECONDS TSRMLS_CC);
} /* }}} */
/*
* Local variables:
* tab-width: 4
* c-basic-offset: 4
* End:
* vim600: noet sw=4 ts=4 fdm=marker
* vim<600: noet sw=4 ts=4
*/
diff --git a/mongodb-1.3.4/src/MongoDB/Server.c b/mongodb-1.4.2/src/MongoDB/Server.c
similarity index 79%
rename from mongodb-1.3.4/src/MongoDB/Server.c
rename to mongodb-1.4.2/src/MongoDB/Server.c
index b83b3454..c63bf784 100644
--- a/mongodb-1.3.4/src/MongoDB/Server.c
+++ b/mongodb-1.4.2/src/MongoDB/Server.c
@@ -1,587 +1,683 @@
/*
* Copyright 2014-2017 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifdef HAVE_CONFIG_H
# include "config.h"
#endif
#include <php.h>
#include <Zend/zend_interfaces.h>
#include "phongo_compat.h"
#include "php_phongo.h"
#include "php_bson.h"
zend_class_entry *php_phongo_server_ce;
-/* {{{ proto MongoDB\Driver\Cursor MongoDB\Driver\Server::executeCommand(string $db, MongoDB\Driver\Command $command[, MongoDB\Driver\ReadPreference $readPreference = null]))
+/* {{{ proto MongoDB\Driver\Cursor MongoDB\Driver\Server::executeCommand(string $db, MongoDB\Driver\Command $command[, array $options = null]))
Executes a Command on this Server */
static PHP_METHOD(Server, executeCommand)
{
php_phongo_server_t *intern;
char *db;
phongo_zpp_char_len db_len;
zval *command;
- zval *readPreference = NULL;
+ zval *options = NULL;
+ bool free_options = false;
DECLARE_RETURN_VALUE_USED
SUPPRESS_UNUSED_WARNING(return_value_ptr)
intern = Z_SERVER_OBJ_P(getThis());
- if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "sO|O!", &db, &db_len, &command, php_phongo_command_ce, &readPreference, php_phongo_readpreference_ce) == FAILURE) {
+ if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "sO|z!", &db, &db_len, &command, php_phongo_command_ce, &options) == FAILURE) {
return;
}
- phongo_execute_command(intern->client, db, command, readPreference, intern->server_id, return_value, return_value_used TSRMLS_CC);
+ options = php_phongo_prep_legacy_option(options, "readPreference", &free_options TSRMLS_CC);
+
+ phongo_execute_command(intern->client, PHONGO_COMMAND_RAW, db, command, options, intern->server_id, return_value, return_value_used TSRMLS_CC);
+
+ if (free_options) {
+ php_phongo_prep_legacy_option_free(options TSRMLS_CC);
+ }
} /* }}} */
-/* {{{ proto MongoDB\Driver\Cursor MongoDB\Driver\Server::executeQuery(string $namespace, MongoDB\Driver\Query $query[, MongoDB\Driver\ReadPreference $readPreference = null]))
+/* {{{ proto MongoDB\Driver\Cursor MongoDB\Driver\Server::executeReadCommand(string $db, MongoDB\Driver\Command $command[, array $options = null]))
+ Executes a ReadCommand on this Server */
+static PHP_METHOD(Server, executeReadCommand)
+{
+ php_phongo_server_t *intern;
+ char *db;
+ phongo_zpp_char_len db_len;
+ zval *command;
+ zval *options = NULL;
+ DECLARE_RETURN_VALUE_USED
+ SUPPRESS_UNUSED_WARNING(return_value_ptr)
+
+
+ intern = Z_SERVER_OBJ_P(getThis());
+
+ if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "sO|a!", &db, &db_len, &command, php_phongo_command_ce, &options) == FAILURE) {
+ return;
+ }
+
+ phongo_execute_command(intern->client, PHONGO_COMMAND_READ, db, command, options, intern->server_id, return_value, return_value_used TSRMLS_CC);
+} /* }}} */
+
+/* {{{ proto MongoDB\Driver\Cursor MongoDB\Driver\Server::executeWriteCommand(string $db, MongoDB\Driver\Command $command[, array $options = null]))
+ Executes a WriteCommand on this Server */
+static PHP_METHOD(Server, executeWriteCommand)
+{
+ php_phongo_server_t *intern;
+ char *db;
+ phongo_zpp_char_len db_len;
+ zval *command;
+ zval *options = NULL;
+ DECLARE_RETURN_VALUE_USED
+ SUPPRESS_UNUSED_WARNING(return_value_ptr)
+
+
+ intern = Z_SERVER_OBJ_P(getThis());
+
+ if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "sO|a!", &db, &db_len, &command, php_phongo_command_ce, &options) == FAILURE) {
+ return;
+ }
+
+ phongo_execute_command(intern->client, PHONGO_COMMAND_WRITE, db, command, options, intern->server_id, return_value, return_value_used TSRMLS_CC);
+} /* }}} */
+
+/* {{{ proto MongoDB\Driver\Cursor MongoDB\Driver\Server::executeReadWriteCommand(string $db, MongoDB\Driver\Command $command[, array $options = null]))
+ Executes a ReadWriteCommand on this Server */
+static PHP_METHOD(Server, executeReadWriteCommand)
+{
+ php_phongo_server_t *intern;
+ char *db;
+ phongo_zpp_char_len db_len;
+ zval *command;
+ zval *options = NULL;
+ DECLARE_RETURN_VALUE_USED
+ SUPPRESS_UNUSED_WARNING(return_value_ptr)
+
+
+ intern = Z_SERVER_OBJ_P(getThis());
+
+ if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "sO|a!", &db, &db_len, &command, php_phongo_command_ce, &options) == FAILURE) {
+ return;
+ }
+
+ phongo_execute_command(intern->client, PHONGO_COMMAND_READ_WRITE, db, command, options, intern->server_id, return_value, return_value_used TSRMLS_CC);
+} /* }}} */
+
+/* {{{ proto MongoDB\Driver\Cursor MongoDB\Driver\Server::executeQuery(string $namespace, MongoDB\Driver\Query $query[, array $options = null]))
Executes a Query on this Server */
static PHP_METHOD(Server, executeQuery)
{
php_phongo_server_t *intern;
char *namespace;
phongo_zpp_char_len namespace_len;
zval *query;
- zval *readPreference = NULL;
+ zval *options = NULL;
+ bool free_options = false;
DECLARE_RETURN_VALUE_USED
SUPPRESS_UNUSED_WARNING(return_value_ptr)
intern = Z_SERVER_OBJ_P(getThis());
- if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "sO|O!", &namespace, &namespace_len, &query, php_phongo_query_ce, &readPreference, php_phongo_readpreference_ce) == FAILURE) {
+ if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "sO|z!", &namespace, &namespace_len, &query, php_phongo_query_ce, &options) == FAILURE) {
return;
}
- phongo_execute_query(intern->client, namespace, query, readPreference, intern->server_id, return_value, return_value_used TSRMLS_CC);
+ options = php_phongo_prep_legacy_option(options, "readPreference", &free_options TSRMLS_CC);
+
+ phongo_execute_query(intern->client, namespace, query, options, intern->server_id, return_value, return_value_used TSRMLS_CC);
+
+ if (free_options) {
+ php_phongo_prep_legacy_option_free(options TSRMLS_CC);
+ }
} /* }}} */
-/* {{{ proto MongoDB\Driver\WriteResult MongoDB\Driver\Server::executeBulkWrite(string $namespace, MongoDB\Driver\BulkWrite $zbulk[, MongoDB\Driver\WriteConcern $writeConcern = null])
+/* {{{ proto MongoDB\Driver\WriteResult MongoDB\Driver\Server::executeBulkWrite(string $namespace, MongoDB\Driver\BulkWrite $zbulk[, array $options = null])
Executes a BulkWrite (i.e. any number of insert, update, and delete ops) on
this Server */
static PHP_METHOD(Server, executeBulkWrite)
{
php_phongo_server_t *intern;
char *namespace;
phongo_zpp_char_len namespace_len;
zval *zbulk;
- zval *zwrite_concern = NULL;
php_phongo_bulkwrite_t *bulk;
+ zval *options = NULL;
+ bool free_options = false;
DECLARE_RETURN_VALUE_USED
SUPPRESS_UNUSED_WARNING(return_value_ptr)
intern = Z_SERVER_OBJ_P(getThis());
- if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "sO|O!", &namespace, &namespace_len, &zbulk, php_phongo_bulkwrite_ce, &zwrite_concern, php_phongo_writeconcern_ce) == FAILURE) {
+ if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "sO|z!", &namespace, &namespace_len, &zbulk, php_phongo_bulkwrite_ce, &options, php_phongo_writeconcern_ce) == FAILURE) {
return;
}
bulk = Z_BULKWRITE_OBJ_P(zbulk);
- phongo_execute_write(intern->client, namespace, bulk, phongo_write_concern_from_zval(zwrite_concern TSRMLS_CC), intern->server_id, return_value, return_value_used TSRMLS_CC);
+ options = php_phongo_prep_legacy_option(options, "writeConcern", &free_options TSRMLS_CC);
+
+ phongo_execute_bulk_write(intern->client, namespace, bulk, options, intern->server_id, return_value, return_value_used TSRMLS_CC);
+
+ if (free_options) {
+ php_phongo_prep_legacy_option_free(options TSRMLS_CC);
+ }
} /* }}} */
/* {{{ proto string MongoDB\Driver\Server::getHost()
Returns the hostname for this Server */
static PHP_METHOD(Server, getHost)
{
php_phongo_server_t *intern;
mongoc_server_description_t *sd;
SUPPRESS_UNUSED_WARNING(return_value_ptr) SUPPRESS_UNUSED_WARNING(return_value_used)
intern = Z_SERVER_OBJ_P(getThis());
if (zend_parse_parameters_none() == FAILURE) {
return;
}
if ((sd = mongoc_client_get_server_description(intern->client, intern->server_id))) {
PHONGO_RETVAL_STRING(mongoc_server_description_host(sd)->host);
mongoc_server_description_destroy(sd);
return;
}
phongo_throw_exception(PHONGO_ERROR_RUNTIME TSRMLS_CC, "Failed to get server description");
} /* }}} */
/* {{{ proto array MongoDB\Driver\Server::getTags()
Returns the currently configured tags for this Server */
static PHP_METHOD(Server, getTags)
{
php_phongo_server_t *intern;
mongoc_server_description_t *sd;
SUPPRESS_UNUSED_WARNING(return_value_ptr) SUPPRESS_UNUSED_WARNING(return_value_used)
intern = Z_SERVER_OBJ_P(getThis());
if (zend_parse_parameters_none() == FAILURE) {
return;
}
if ((sd = mongoc_client_get_server_description(intern->client, intern->server_id))) {
const bson_t *is_master = mongoc_server_description_ismaster(sd);
bson_iter_t iter;
if (bson_iter_init_find(&iter, is_master, "tags") && BSON_ITER_HOLDS_DOCUMENT(&iter)) {
const uint8_t *bytes;
uint32_t len;
php_phongo_bson_state state = PHONGO_BSON_STATE_INITIALIZER;
state.map.root_type = PHONGO_TYPEMAP_NATIVE_ARRAY;
state.map.document_type = PHONGO_TYPEMAP_NATIVE_ARRAY;
bson_iter_document(&iter, &len, &bytes);
if (!php_phongo_bson_to_zval_ex(bytes, len, &state)) {
/* Exception should already have been thrown */
zval_ptr_dtor(&state.zchild);
mongoc_server_description_destroy(sd);
return;
}
mongoc_server_description_destroy(sd);
#if PHP_VERSION_ID >= 70000
RETURN_ZVAL(&state.zchild, 0, 1);
#else
RETURN_ZVAL(state.zchild, 0, 1);
#endif
}
array_init(return_value);
mongoc_server_description_destroy(sd);
return;
}
phongo_throw_exception(PHONGO_ERROR_RUNTIME TSRMLS_CC, "Failed to get server description");
} /* }}} */
/* {{{ proto array MongoDB\Driver\Server::getInfo()
Returns the last isMaster result document for this Server */
static PHP_METHOD(Server, getInfo)
{
php_phongo_server_t *intern;
mongoc_server_description_t *sd;
SUPPRESS_UNUSED_WARNING(return_value_ptr) SUPPRESS_UNUSED_WARNING(return_value_used)
intern = Z_SERVER_OBJ_P(getThis());
if (zend_parse_parameters_none() == FAILURE) {
return;
}
if ((sd = mongoc_client_get_server_description(intern->client, intern->server_id))) {
const bson_t *is_master = mongoc_server_description_ismaster(sd);
php_phongo_bson_state state = PHONGO_BSON_STATE_INITIALIZER;
state.map.root_type = PHONGO_TYPEMAP_NATIVE_ARRAY;
state.map.document_type = PHONGO_TYPEMAP_NATIVE_ARRAY;
if (!php_phongo_bson_to_zval_ex(bson_get_data(is_master), is_master->len, &state)) {
/* Exception should already have been thrown */
zval_ptr_dtor(&state.zchild);
mongoc_server_description_destroy(sd);
return;
}
mongoc_server_description_destroy(sd);
#if PHP_VERSION_ID >= 70000
RETURN_ZVAL(&state.zchild, 0, 1);
#else
RETURN_ZVAL(state.zchild, 0, 1);
#endif
}
phongo_throw_exception(PHONGO_ERROR_RUNTIME TSRMLS_CC, "Failed to get server description");
} /* }}} */
/* {{{ proto integer MongoDB\Driver\Server::getLatency()
Returns the last measured latency for this Server */
static PHP_METHOD(Server, getLatency)
{
php_phongo_server_t *intern;
mongoc_server_description_t *sd;
SUPPRESS_UNUSED_WARNING(return_value_ptr) SUPPRESS_UNUSED_WARNING(return_value_used)
intern = Z_SERVER_OBJ_P(getThis());
if (zend_parse_parameters_none() == FAILURE) {
return;
}
if ((sd = mongoc_client_get_server_description(intern->client, intern->server_id))) {
RETVAL_LONG((phongo_long) mongoc_server_description_round_trip_time(sd));
mongoc_server_description_destroy(sd);
return;
}
phongo_throw_exception(PHONGO_ERROR_RUNTIME TSRMLS_CC, "Failed to get server description");
} /* }}} */
/* {{{ proto integer MongoDB\Driver\Server::getPort()
Returns the port for this Server */
static PHP_METHOD(Server, getPort)
{
php_phongo_server_t *intern;
mongoc_server_description_t *sd;
SUPPRESS_UNUSED_WARNING(return_value_ptr) SUPPRESS_UNUSED_WARNING(return_value_used)
intern = Z_SERVER_OBJ_P(getThis());
if (zend_parse_parameters_none() == FAILURE) {
return;
}
if ((sd = mongoc_client_get_server_description(intern->client, intern->server_id))) {
RETVAL_LONG(mongoc_server_description_host(sd)->port);
mongoc_server_description_destroy(sd);
return;
}
phongo_throw_exception(PHONGO_ERROR_RUNTIME TSRMLS_CC, "Failed to get server description");
} /* }}} */
/* {{{ proto integer MongoDB\Driver\Server::getType()
Returns the node type of this Server */
static PHP_METHOD(Server, getType)
{
php_phongo_server_t *intern;
mongoc_server_description_t *sd;
SUPPRESS_UNUSED_WARNING(return_value_ptr) SUPPRESS_UNUSED_WARNING(return_value_used)
intern = Z_SERVER_OBJ_P(getThis());
if (zend_parse_parameters_none() == FAILURE) {
return;
}
if ((sd = mongoc_client_get_server_description(intern->client, intern->server_id))) {
RETVAL_LONG(php_phongo_server_description_type(sd));
mongoc_server_description_destroy(sd);
return;
}
phongo_throw_exception(PHONGO_ERROR_RUNTIME TSRMLS_CC, "Failed to get server description");
} /* }}} */
/* {{{ proto boolean MongoDB\Driver\Server::isPrimary()
Returns whether this Server is a primary member of a replica set */
static PHP_METHOD(Server, isPrimary)
{
php_phongo_server_t *intern;
mongoc_server_description_t *sd;
SUPPRESS_UNUSED_WARNING(return_value_ptr) SUPPRESS_UNUSED_WARNING(return_value_used)
intern = Z_SERVER_OBJ_P(getThis());
if (zend_parse_parameters_none() == FAILURE) {
return;
}
if ((sd = mongoc_client_get_server_description(intern->client, intern->server_id))) {
RETVAL_BOOL(!strcmp(mongoc_server_description_type(sd), php_phongo_server_description_type_map[PHONGO_SERVER_RS_PRIMARY].name));
mongoc_server_description_destroy(sd);
return;
}
phongo_throw_exception(PHONGO_ERROR_RUNTIME TSRMLS_CC, "Failed to get server description");
} /* }}} */
/* {{{ proto boolean MongoDB\Driver\Server::isSecondary()
Returns whether this Server is a secondary member of a replica set */
static PHP_METHOD(Server, isSecondary)
{
php_phongo_server_t *intern;
mongoc_server_description_t *sd;
SUPPRESS_UNUSED_WARNING(return_value_ptr) SUPPRESS_UNUSED_WARNING(return_value_used)
intern = Z_SERVER_OBJ_P(getThis());
if (zend_parse_parameters_none() == FAILURE) {
return;
}
if ((sd = mongoc_client_get_server_description(intern->client, intern->server_id))) {
RETVAL_BOOL(!strcmp(mongoc_server_description_type(sd), php_phongo_server_description_type_map[PHONGO_SERVER_RS_SECONDARY].name));
mongoc_server_description_destroy(sd);
return;
}
phongo_throw_exception(PHONGO_ERROR_RUNTIME TSRMLS_CC, "Failed to get server description");
} /* }}} */
/* {{{ proto boolean MongoDB\Driver\Server::isArbiter()
Returns whether this Server is an arbiter member of a replica set */
static PHP_METHOD(Server, isArbiter)
{
php_phongo_server_t *intern;
mongoc_server_description_t *sd;
SUPPRESS_UNUSED_WARNING(return_value_ptr) SUPPRESS_UNUSED_WARNING(return_value_used)
intern = Z_SERVER_OBJ_P(getThis());
if (zend_parse_parameters_none() == FAILURE) {
return;
}
if ((sd = mongoc_client_get_server_description(intern->client, intern->server_id))) {
RETVAL_BOOL(!strcmp(mongoc_server_description_type(sd), php_phongo_server_description_type_map[PHONGO_SERVER_RS_ARBITER].name));
mongoc_server_description_destroy(sd);
return;
}
phongo_throw_exception(PHONGO_ERROR_RUNTIME TSRMLS_CC, "Failed to get server description");
} /* }}} */
/* {{{ proto boolean MongoDB\Driver\Server::isHidden()
Returns whether this Server is a hidden member of a replica set */
static PHP_METHOD(Server, isHidden)
{
php_phongo_server_t *intern;
mongoc_server_description_t *sd;
SUPPRESS_UNUSED_WARNING(return_value_ptr) SUPPRESS_UNUSED_WARNING(return_value_used)
intern = Z_SERVER_OBJ_P(getThis());
if (zend_parse_parameters_none() == FAILURE) {
return;
}
if ((sd = mongoc_client_get_server_description(intern->client, intern->server_id))) {
bson_iter_t iter;
RETVAL_BOOL(bson_iter_init_find_case(&iter, mongoc_server_description_ismaster(sd), "hidden") && bson_iter_as_bool(&iter));
mongoc_server_description_destroy(sd);
return;
}
phongo_throw_exception(PHONGO_ERROR_RUNTIME TSRMLS_CC, "Failed to get server description");
} /* }}} */
/* {{{ proto boolean MongoDB\Driver\Server::isPassive()
Returns whether this Server is a passive member of a replica set */
static PHP_METHOD(Server, isPassive)
{
php_phongo_server_t *intern;
mongoc_server_description_t *sd;
SUPPRESS_UNUSED_WARNING(return_value_ptr) SUPPRESS_UNUSED_WARNING(return_value_used)
intern = Z_SERVER_OBJ_P(getThis());
if (zend_parse_parameters_none() == FAILURE) {
return;
}
if ((sd = mongoc_client_get_server_description(intern->client, intern->server_id))) {
bson_iter_t iter;
RETVAL_BOOL(bson_iter_init_find_case(&iter, mongoc_server_description_ismaster(sd), "passive") && bson_iter_as_bool(&iter));
mongoc_server_description_destroy(sd);
return;
}
phongo_throw_exception(PHONGO_ERROR_RUNTIME TSRMLS_CC, "Failed to get server description");
} /* }}} */
/* {{{ MongoDB\Driver\Server function entries */
ZEND_BEGIN_ARG_INFO_EX(ai_Server_executeCommand, 0, 0, 2)
ZEND_ARG_INFO(0, db)
ZEND_ARG_OBJ_INFO(0, command, MongoDB\\Driver\\Command, 0)
- ZEND_ARG_OBJ_INFO(0, readPreference, MongoDB\\Driver\\ReadPreference, 1)
+ ZEND_ARG_INFO(0, options)
+ZEND_END_ARG_INFO()
+
+ZEND_BEGIN_ARG_INFO_EX(ai_Server_executeRWCommand, 0, 0, 2)
+ ZEND_ARG_INFO(0, db)
+ ZEND_ARG_OBJ_INFO(0, command, MongoDB\\Driver\\Command, 0)
+ ZEND_ARG_ARRAY_INFO(0, options, 0)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(ai_Server_executeQuery, 0, 0, 2)
ZEND_ARG_INFO(0, namespace)
ZEND_ARG_OBJ_INFO(0, zquery, MongoDB\\Driver\\Query, 0)
- ZEND_ARG_OBJ_INFO(0, readPreference, MongoDB\\Driver\\ReadPreference, 1)
+ ZEND_ARG_INFO(0, options)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(ai_Server_executeBulkWrite, 0, 0, 2)
ZEND_ARG_INFO(0, namespace)
ZEND_ARG_OBJ_INFO(0, zbulk, MongoDB\\Driver\\BulkWrite, 0)
- ZEND_ARG_OBJ_INFO(0, writeConcern, MongoDB\\Driver\\WriteConcern, 1)
+ ZEND_ARG_INFO(0, options)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(ai_Server_void, 0, 0, 0)
ZEND_END_ARG_INFO()
static zend_function_entry php_phongo_server_me[] = {
PHP_ME(Server, executeCommand, ai_Server_executeCommand, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
+ PHP_ME(Server, executeReadCommand, ai_Server_executeRWCommand, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
+ PHP_ME(Server, executeWriteCommand, ai_Server_executeRWCommand, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
+ PHP_ME(Server, executeReadWriteCommand, ai_Server_executeRWCommand, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_ME(Server, executeQuery, ai_Server_executeQuery, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_ME(Server, executeBulkWrite, ai_Server_executeBulkWrite, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_ME(Server, getHost, ai_Server_void, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_ME(Server, getTags, ai_Server_void, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_ME(Server, getInfo, ai_Server_void, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_ME(Server, getLatency, ai_Server_void, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_ME(Server, getPort, ai_Server_void, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_ME(Server, getType, ai_Server_void, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_ME(Server, isPrimary, ai_Server_void, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_ME(Server, isSecondary, ai_Server_void, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_ME(Server, isArbiter, ai_Server_void, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_ME(Server, isHidden, ai_Server_void, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_ME(Server, isPassive, ai_Server_void, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
ZEND_NAMED_ME(__construct, PHP_FN(MongoDB_disabled___construct), ai_Server_void, ZEND_ACC_PRIVATE|ZEND_ACC_FINAL)
ZEND_NAMED_ME(__wakeup, PHP_FN(MongoDB_disabled___wakeup), ai_Server_void, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_FE_END
};
/* }}} */
/* {{{ MongoDB\Driver\Server object handlers */
static zend_object_handlers php_phongo_handler_server;
static int php_phongo_server_compare_objects(zval *o1, zval *o2 TSRMLS_DC) /* {{{ */
{
php_phongo_server_t *intern1, *intern2;
mongoc_server_description_t *sd1, *sd2;
int retval = 0;
intern1 = Z_SERVER_OBJ_P(o1);
intern2 = Z_SERVER_OBJ_P(o2);
sd1 = mongoc_client_get_server_description(intern1->client, intern1->server_id);
sd2 = mongoc_client_get_server_description(intern2->client, intern2->server_id);
if (sd1 && sd2) {
retval = strcasecmp(mongoc_server_description_host(sd1)->host_and_port, mongoc_server_description_host(sd2)->host_and_port);
} else {
phongo_throw_exception(PHONGO_ERROR_RUNTIME TSRMLS_CC, "Failed to get server description(s)");
}
if (sd1) {
mongoc_server_description_destroy(sd1);
}
if (sd2) {
mongoc_server_description_destroy(sd2);
}
return retval;
} /* }}} */
static void php_phongo_server_free_object(phongo_free_object_arg *object TSRMLS_DC) /* {{{ */
{
php_phongo_server_t *intern = Z_OBJ_SERVER(object);
zend_object_std_dtor(&intern->std TSRMLS_CC);
#if PHP_VERSION_ID < 70000
efree(intern);
#endif
} /* }}} */
static phongo_create_object_retval php_phongo_server_create_object(zend_class_entry *class_type TSRMLS_DC) /* {{{ */
{
php_phongo_server_t *intern = NULL;
intern = PHONGO_ALLOC_OBJECT_T(php_phongo_server_t, class_type);
zend_object_std_init(&intern->std, class_type TSRMLS_CC);
object_properties_init(&intern->std, class_type);
#if PHP_VERSION_ID >= 70000
intern->std.handlers = &php_phongo_handler_server;
return &intern->std;
#else
{
zend_object_value retval;
retval.handle = zend_objects_store_put(intern, (zend_objects_store_dtor_t) zend_objects_destroy_object, php_phongo_server_free_object, NULL TSRMLS_CC);
retval.handlers = &php_phongo_handler_server;
return retval;
}
#endif
} /* }}} */
static HashTable *php_phongo_server_get_debug_info(zval *object, int *is_temp TSRMLS_DC) /* {{{ */
{
php_phongo_server_t *intern = NULL;
#if PHP_VERSION_ID >= 70000
zval retval;
#else
zval retval = zval_used_for_init;
#endif
mongoc_server_description_t *sd;
*is_temp = 1;
intern = Z_SERVER_OBJ_P(object);
if (!(sd = mongoc_client_get_server_description(intern->client, intern->server_id))) {
phongo_throw_exception(PHONGO_ERROR_RUNTIME TSRMLS_CC, "Failed to get server description");
return NULL;
}
php_phongo_server_to_zval(&retval, sd);
mongoc_server_description_destroy(sd);
return Z_ARRVAL(retval);
} /* }}} */
/* }}} */
void php_phongo_server_init_ce(INIT_FUNC_ARGS) /* {{{ */
{
zend_class_entry ce;
INIT_NS_CLASS_ENTRY(ce, "MongoDB\\Driver", "Server", php_phongo_server_me);
php_phongo_server_ce = zend_register_internal_class(&ce TSRMLS_CC);
php_phongo_server_ce->create_object = php_phongo_server_create_object;
PHONGO_CE_FINAL(php_phongo_server_ce);
PHONGO_CE_DISABLE_SERIALIZATION(php_phongo_server_ce);
memcpy(&php_phongo_handler_server, phongo_get_std_object_handlers(), sizeof(zend_object_handlers));
php_phongo_handler_server.compare_objects = php_phongo_server_compare_objects;
php_phongo_handler_server.get_debug_info = php_phongo_server_get_debug_info;
#if PHP_VERSION_ID >= 70000
php_phongo_handler_server.free_obj = php_phongo_server_free_object;
php_phongo_handler_server.offset = XtOffsetOf(php_phongo_server_t, std);
#endif
zend_declare_class_constant_long(php_phongo_server_ce, ZEND_STRL("TYPE_UNKNOWN"), PHONGO_SERVER_UNKNOWN TSRMLS_CC);
zend_declare_class_constant_long(php_phongo_server_ce, ZEND_STRL("TYPE_STANDALONE"), PHONGO_SERVER_STANDALONE TSRMLS_CC);
zend_declare_class_constant_long(php_phongo_server_ce, ZEND_STRL("TYPE_MONGOS"), PHONGO_SERVER_MONGOS TSRMLS_CC);
zend_declare_class_constant_long(php_phongo_server_ce, ZEND_STRL("TYPE_POSSIBLE_PRIMARY"), PHONGO_SERVER_POSSIBLE_PRIMARY TSRMLS_CC);
zend_declare_class_constant_long(php_phongo_server_ce, ZEND_STRL("TYPE_RS_PRIMARY"), PHONGO_SERVER_RS_PRIMARY TSRMLS_CC);
zend_declare_class_constant_long(php_phongo_server_ce, ZEND_STRL("TYPE_RS_SECONDARY"), PHONGO_SERVER_RS_SECONDARY TSRMLS_CC);
zend_declare_class_constant_long(php_phongo_server_ce, ZEND_STRL("TYPE_RS_ARBITER"), PHONGO_SERVER_RS_ARBITER TSRMLS_CC);
zend_declare_class_constant_long(php_phongo_server_ce, ZEND_STRL("TYPE_RS_OTHER"), PHONGO_SERVER_RS_OTHER TSRMLS_CC);
zend_declare_class_constant_long(php_phongo_server_ce, ZEND_STRL("TYPE_RS_GHOST"), PHONGO_SERVER_RS_GHOST TSRMLS_CC);
} /* }}} */
/*
* Local variables:
* tab-width: 4
* c-basic-offset: 4
* End:
* vim600: noet sw=4 ts=4 fdm=marker
* vim<600: noet sw=4 ts=4
*/
diff --git a/mongodb-1.4.2/src/MongoDB/Session.c b/mongodb-1.4.2/src/MongoDB/Session.c
new file mode 100644
index 00000000..1ee78c9b
--- /dev/null
+++ b/mongodb-1.4.2/src/MongoDB/Session.c
@@ -0,0 +1,410 @@
+/*
+ * Copyright 2017 MongoDB, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include <php.h>
+#include <Zend/zend_interfaces.h>
+
+#include "phongo_compat.h"
+#include "php_phongo.h"
+#include "php_bson.h"
+
+zend_class_entry *php_phongo_session_ce;
+
+static bool php_phongo_session_get_timestamp_parts(zval *obj, uint32_t *timestamp, uint32_t *increment TSRMLS_DC)
+{
+ bool retval = false;
+#if PHP_VERSION_ID >= 70000
+ zval ztimestamp;
+ zval zincrement;
+
+ zend_call_method_with_0_params(obj, NULL, NULL, "getTimestamp", &ztimestamp);
+
+ if (Z_ISUNDEF(ztimestamp) || EG(exception)) {
+ goto cleanup;
+ }
+
+ zend_call_method_with_0_params(obj, NULL, NULL, "getIncrement", &zincrement);
+
+ if (Z_ISUNDEF(zincrement) || EG(exception)) {
+ goto cleanup;
+ }
+
+ *timestamp = Z_LVAL(ztimestamp);
+ *increment = Z_LVAL(zincrement);
+#else
+ zval *ztimestamp = NULL;
+ zval *zincrement = NULL;
+
+ zend_call_method_with_0_params(&obj, NULL, NULL, "getTimestamp", &ztimestamp);
+
+ if (Z_ISUNDEF(ztimestamp) || EG(exception)) {
+ goto cleanup;
+ }
+
+ zend_call_method_with_0_params(&obj, NULL, NULL, "getIncrement", &zincrement);
+
+ if (Z_ISUNDEF(zincrement) || EG(exception)) {
+ goto cleanup;
+ }
+
+ *timestamp = Z_LVAL_P(ztimestamp);
+ *increment = Z_LVAL_P(zincrement);
+#endif
+
+ retval = true;
+
+cleanup:
+ if (!Z_ISUNDEF(ztimestamp)) {
+ zval_ptr_dtor(&ztimestamp);
+ }
+
+ if (!Z_ISUNDEF(zincrement)) {
+ zval_ptr_dtor(&zincrement);
+ }
+
+ return retval;
+}
+
+/* {{{ proto void MongoDB\Driver\Session::advanceClusterTime(array|object $clusterTime)
+ Advances the cluster time for this Session */
+static PHP_METHOD(Session, advanceClusterTime)
+{
+ php_phongo_session_t *intern;
+ zval *zcluster_time;
+ bson_t cluster_time = BSON_INITIALIZER;
+ SUPPRESS_UNUSED_WARNING(return_value_ptr) SUPPRESS_UNUSED_WARNING(return_value_used)
+
+
+ intern = Z_SESSION_OBJ_P(getThis());
+
+ if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "A", &zcluster_time) == FAILURE) {
+ return;
+ }
+
+ php_phongo_zval_to_bson(zcluster_time, PHONGO_BSON_NONE, &cluster_time, NULL TSRMLS_CC);
+
+ /* An exception may be thrown during BSON conversion */
+ if (EG(exception)) {
+ goto cleanup;
+ }
+
+ mongoc_client_session_advance_cluster_time(intern->client_session, &cluster_time);
+
+cleanup:
+ bson_destroy(&cluster_time);
+} /* }}} */
+
+/* {{{ proto void MongoDB\Driver\Session::advanceOperationTime(MongoDB\BSON\TimestampInterface $timestamp)
+ Advances the operation time for this Session */
+static PHP_METHOD(Session, advanceOperationTime)
+{
+ php_phongo_session_t *intern;
+ zval *ztimestamp;
+ uint32_t timestamp = 0;
+ uint32_t increment = 0;
+ SUPPRESS_UNUSED_WARNING(return_value_ptr) SUPPRESS_UNUSED_WARNING(return_value_used)
+
+
+ intern = Z_SESSION_OBJ_P(getThis());
+
+ if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "O", &ztimestamp, php_phongo_timestamp_interface_ce) == FAILURE) {
+ return;
+ }
+
+ if (!php_phongo_session_get_timestamp_parts(ztimestamp, &timestamp, &increment TSRMLS_CC)) {
+ return;
+ }
+
+ mongoc_client_session_advance_operation_time(intern->client_session, timestamp, increment);
+} /* }}} */
+
+/* {{{ proto object|null MongoDB\Driver\Session::getClusterTime()
+ Returns the cluster time for this Session */
+static PHP_METHOD(Session, getClusterTime)
+{
+ php_phongo_session_t *intern;
+ const bson_t *cluster_time;
+ php_phongo_bson_state state = PHONGO_BSON_STATE_INITIALIZER;
+ SUPPRESS_UNUSED_WARNING(return_value_ptr) SUPPRESS_UNUSED_WARNING(return_value_used)
+
+
+ intern = Z_SESSION_OBJ_P(getThis());
+
+ if (zend_parse_parameters_none() == FAILURE) {
+ return;
+ }
+
+ cluster_time = mongoc_client_session_get_cluster_time(intern->client_session);
+
+ if (!cluster_time) {
+ RETURN_NULL();
+ }
+
+ if (!php_phongo_bson_to_zval_ex(bson_get_data(cluster_time), cluster_time->len, &state)) {
+ /* Exception should already have been thrown */
+ zval_ptr_dtor(&state.zchild);
+ return;
+ }
+
+#if PHP_VERSION_ID >= 70000
+ RETURN_ZVAL(&state.zchild, 0, 1);
+#else
+ RETURN_ZVAL(state.zchild, 0, 1);
+#endif
+} /* }}} */
+
+/* {{{ proto object MongoDB\Driver\Session::getLogicalSessionId()
+ Returns the logical session ID for this Session */
+static PHP_METHOD(Session, getLogicalSessionId)
+{
+ php_phongo_session_t *intern;
+ const bson_t *lsid;
+ php_phongo_bson_state state = PHONGO_BSON_STATE_INITIALIZER;
+ SUPPRESS_UNUSED_WARNING(return_value_ptr) SUPPRESS_UNUSED_WARNING(return_value_used)
+
+
+ intern = Z_SESSION_OBJ_P(getThis());
+
+ if (zend_parse_parameters_none() == FAILURE) {
+ return;
+ }
+
+ lsid = mongoc_client_session_get_lsid(intern->client_session);
+
+
+ if (!php_phongo_bson_to_zval_ex(bson_get_data(lsid), lsid->len, &state)) {
+ /* Exception should already have been thrown */
+ zval_ptr_dtor(&state.zchild);
+ return;
+ }
+
+#if PHP_VERSION_ID >= 70000
+ RETURN_ZVAL(&state.zchild, 0, 1);
+#else
+ RETURN_ZVAL(state.zchild, 0, 1);
+#endif
+} /* }}} */
+
+/* {{{ proto MongoDB\BSON\Timestamp|null MongoDB\Driver\Session::getOperationTime()
+ Returns the operation time for this Session */
+static PHP_METHOD(Session, getOperationTime)
+{
+ php_phongo_session_t *intern;
+ uint32_t timestamp, increment;
+ SUPPRESS_UNUSED_WARNING(return_value_ptr) SUPPRESS_UNUSED_WARNING(return_value_used)
+
+
+ intern = Z_SESSION_OBJ_P(getThis());
+
+ if (zend_parse_parameters_none() == FAILURE) {
+ return;
+ }
+
+ mongoc_client_session_get_operation_time(intern->client_session, &timestamp, &increment);
+
+ /* mongoc_client_session_get_operation_time() returns 0 for both parts if
+ * the session has not been used. According to the causal consistency spec,
+ * the operation time for an unused session is null. */
+ if (timestamp == 0 && increment == 0) {
+ RETURN_NULL();
+ }
+
+ php_phongo_new_timestamp_from_increment_and_timestamp(return_value, increment, timestamp TSRMLS_CC);
+} /* }}} */
+
+/* {{{ MongoDB\Driver\Session function entries */
+ZEND_BEGIN_ARG_INFO_EX(ai_Session_advanceClusterTime, 0, 0, 1)
+ ZEND_ARG_INFO(0, clusterTime)
+ZEND_END_ARG_INFO()
+
+ZEND_BEGIN_ARG_INFO_EX(ai_Session_advanceOperationTime, 0, 0, 1)
+ ZEND_ARG_INFO(0, timestamp)
+ZEND_END_ARG_INFO()
+
+ZEND_BEGIN_ARG_INFO_EX(ai_Session_void, 0, 0, 0)
+ZEND_END_ARG_INFO()
+
+static zend_function_entry php_phongo_session_me[] = {
+ PHP_ME(Session, advanceClusterTime, ai_Session_advanceClusterTime, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
+ PHP_ME(Session, advanceOperationTime, ai_Session_advanceOperationTime, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
+ PHP_ME(Session, getClusterTime, ai_Session_void, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
+ PHP_ME(Session, getLogicalSessionId, ai_Session_void, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
+ PHP_ME(Session, getOperationTime, ai_Session_void, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
+ ZEND_NAMED_ME(__construct, PHP_FN(MongoDB_disabled___construct), ai_Session_void, ZEND_ACC_PRIVATE|ZEND_ACC_FINAL)
+ ZEND_NAMED_ME(__wakeup, PHP_FN(MongoDB_disabled___wakeup), ai_Session_void, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
+ PHP_FE_END
+};
+/* }}} */
+
+/* {{{ MongoDB\Driver\Session object handlers */
+static zend_object_handlers php_phongo_handler_session;
+
+static void php_phongo_session_free_object(phongo_free_object_arg *object TSRMLS_DC) /* {{{ */
+{
+ php_phongo_session_t *intern = Z_OBJ_SESSION(object);
+
+ zend_object_std_dtor(&intern->std TSRMLS_CC);
+
+ if (intern->client_session) {
+ mongoc_client_session_destroy(intern->client_session);
+ }
+
+#if PHP_VERSION_ID < 70000
+ efree(intern);
+#endif
+} /* }}} */
+
+static phongo_create_object_retval php_phongo_session_create_object(zend_class_entry *class_type TSRMLS_DC) /* {{{ */
+{
+ php_phongo_session_t *intern = NULL;
+
+ intern = PHONGO_ALLOC_OBJECT_T(php_phongo_session_t, class_type);
+
+ zend_object_std_init(&intern->std, class_type TSRMLS_CC);
+ object_properties_init(&intern->std, class_type);
+
+#if PHP_VERSION_ID >= 70000
+ intern->std.handlers = &php_phongo_handler_session;
+
+ return &intern->std;
+#else
+ {
+ zend_object_value retval;
+ retval.handle = zend_objects_store_put(intern, (zend_objects_store_dtor_t) zend_objects_destroy_object, php_phongo_session_free_object, NULL TSRMLS_CC);
+ retval.handlers = &php_phongo_handler_session;
+
+ return retval;
+ }
+#endif
+} /* }}} */
+
+static HashTable *php_phongo_session_get_debug_info(zval *object, int *is_temp TSRMLS_DC) /* {{{ */
+{
+ php_phongo_session_t *intern = NULL;
+ const mongoc_session_opt_t *cs_opts;
+#if PHP_VERSION_ID >= 70000
+ zval retval;
+#else
+ zval retval = zval_used_for_init;
+#endif
+
+ *is_temp = 1;
+ intern = Z_SESSION_OBJ_P(object);
+
+ array_init(&retval);
+
+ {
+ const bson_t *lsid;
+
+ php_phongo_bson_state state = PHONGO_BSON_STATE_INITIALIZER;
+ /* Use native arrays for debugging output */
+ state.map.root_type = PHONGO_TYPEMAP_NATIVE_ARRAY;
+ state.map.document_type = PHONGO_TYPEMAP_NATIVE_ARRAY;
+
+ lsid = mongoc_client_session_get_lsid(intern->client_session);
+
+ php_phongo_bson_to_zval_ex(bson_get_data(lsid), lsid->len, &state);
+
+#if PHP_VERSION_ID >= 70000
+ ADD_ASSOC_ZVAL_EX(&retval, "logicalSessionId", &state.zchild);
+#else
+ ADD_ASSOC_ZVAL_EX(&retval, "logicalSessionId", state.zchild);
+#endif
+ }
+
+ {
+ const bson_t *cluster_time;
+
+ php_phongo_bson_state state = PHONGO_BSON_STATE_INITIALIZER;
+ /* Use native arrays for debugging output */
+ state.map.root_type = PHONGO_TYPEMAP_NATIVE_ARRAY;
+ state.map.document_type = PHONGO_TYPEMAP_NATIVE_ARRAY;
+
+ cluster_time = mongoc_client_session_get_cluster_time(intern->client_session);
+
+ if (cluster_time) {
+ php_phongo_bson_to_zval_ex(bson_get_data(cluster_time), cluster_time->len, &state);
+
+#if PHP_VERSION_ID >= 70000
+ ADD_ASSOC_ZVAL_EX(&retval, "clusterTime", &state.zchild);
+#else
+ ADD_ASSOC_ZVAL_EX(&retval, "clusterTime", state.zchild);
+#endif
+ } else {
+ ADD_ASSOC_NULL_EX(&retval, "clusterTime");
+ }
+ }
+
+ cs_opts = mongoc_client_session_get_opts(intern->client_session);
+ ADD_ASSOC_BOOL_EX(&retval, "causalConsistency", mongoc_session_opts_get_causal_consistency(cs_opts));
+
+ {
+ uint32_t timestamp, increment;
+
+ mongoc_client_session_get_operation_time(intern->client_session, &timestamp, &increment);
+
+ if (timestamp && increment) {
+#if PHP_VERSION_ID >= 70000
+ zval ztimestamp;
+
+ php_phongo_new_timestamp_from_increment_and_timestamp(&ztimestamp, increment, timestamp TSRMLS_CC);
+ ADD_ASSOC_ZVAL_EX(&retval, "operationTime", &ztimestamp);
+#else
+ zval *ztimestamp;
+
+ MAKE_STD_ZVAL(ztimestamp);
+ php_phongo_new_timestamp_from_increment_and_timestamp(ztimestamp, increment, timestamp TSRMLS_CC);
+ ADD_ASSOC_ZVAL_EX(&retval, "operationTime", ztimestamp);
+#endif
+ } else {
+ ADD_ASSOC_NULL_EX(&retval, "operationTime");
+ }
+ }
+
+ return Z_ARRVAL(retval);
+} /* }}} */
+/* }}} */
+
+void php_phongo_session_init_ce(INIT_FUNC_ARGS) /* {{{ */
+{
+ zend_class_entry ce;
+
+ INIT_NS_CLASS_ENTRY(ce, "MongoDB\\Driver", "Session", php_phongo_session_me);
+ php_phongo_session_ce = zend_register_internal_class(&ce TSRMLS_CC);
+ php_phongo_session_ce->create_object = php_phongo_session_create_object;
+ PHONGO_CE_FINAL(php_phongo_session_ce);
+ PHONGO_CE_DISABLE_SERIALIZATION(php_phongo_session_ce);
+
+ memcpy(&php_phongo_handler_session, phongo_get_std_object_handlers(), sizeof(zend_object_handlers));
+ php_phongo_handler_session.get_debug_info = php_phongo_session_get_debug_info;
+#if PHP_VERSION_ID >= 70000
+ php_phongo_handler_session.free_obj = php_phongo_session_free_object;
+ php_phongo_handler_session.offset = XtOffsetOf(php_phongo_session_t, std);
+#endif
+} /* }}} */
+
+/*
+ * Local variables:
+ * tab-width: 4
+ * c-basic-offset: 4
+ * End:
+ * vim600: noet sw=4 ts=4 fdm=marker
+ * vim<600: noet sw=4 ts=4
+ */
diff --git a/mongodb-1.3.4/src/MongoDB/WriteConcern.c b/mongodb-1.4.2/src/MongoDB/WriteConcern.c
similarity index 99%
rename from mongodb-1.3.4/src/MongoDB/WriteConcern.c
rename to mongodb-1.4.2/src/MongoDB/WriteConcern.c
index 730d3707..70c0b610 100644
--- a/mongodb-1.3.4/src/MongoDB/WriteConcern.c
+++ b/mongodb-1.4.2/src/MongoDB/WriteConcern.c
@@ -1,301 +1,301 @@
/*
* Copyright 2014-2017 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifdef HAVE_CONFIG_H
# include "config.h"
#endif
#include <php.h>
#include <Zend/zend_interfaces.h>
#include "phongo_compat.h"
#include "php_phongo.h"
zend_class_entry *php_phongo_writeconcern_ce;
/* {{{ proto void MongoDB\Driver\WriteConcern::__construct(integer|string $w[, integer $wtimeout[, boolean $journal]])
Constructs a new WriteConcern */
static PHP_METHOD(WriteConcern, __construct)
{
php_phongo_writeconcern_t *intern;
zend_error_handling error_handling;
zval *w, *journal;
phongo_long wtimeout = 0;
SUPPRESS_UNUSED_WARNING(return_value) SUPPRESS_UNUSED_WARNING(return_value_ptr) SUPPRESS_UNUSED_WARNING(return_value_used)
zend_replace_error_handling(EH_THROW, phongo_exception_from_phongo_domain(PHONGO_ERROR_INVALID_ARGUMENT), &error_handling TSRMLS_CC);
intern = Z_WRITECONCERN_OBJ_P(getThis());
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "z|lz", &w, &wtimeout, &journal) == FAILURE) {
zend_restore_error_handling(&error_handling TSRMLS_CC);
return;
}
zend_restore_error_handling(&error_handling TSRMLS_CC);
intern->write_concern = mongoc_write_concern_new();
if (Z_TYPE_P(w) == IS_LONG) {
if (Z_LVAL_P(w) < -3) {
phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected w to be >= -3, %ld given", Z_LVAL_P(w));
return;
}
mongoc_write_concern_set_w(intern->write_concern, Z_LVAL_P(w));
} else if (Z_TYPE_P(w) == IS_STRING) {
if (strcmp(Z_STRVAL_P(w), PHONGO_WRITE_CONCERN_W_MAJORITY) == 0) {
mongoc_write_concern_set_w(intern->write_concern, MONGOC_WRITE_CONCERN_W_MAJORITY);
} else {
mongoc_write_concern_set_wtag(intern->write_concern, Z_STRVAL_P(w));
}
} else {
- phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected w to be integer or string, %s given", zend_get_type_by_const(Z_TYPE_P(w)));
+ phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected w to be integer or string, %s given", PHONGO_ZVAL_CLASS_OR_TYPE_NAME_P(w));
return;
}
switch(ZEND_NUM_ARGS()) {
case 3:
if (Z_TYPE_P(journal) != IS_NULL) {
#ifdef ZEND_ENGINE_3
mongoc_write_concern_set_journal(intern->write_concern, zend_is_true(journal));
#else
mongoc_write_concern_set_journal(intern->write_concern, Z_BVAL_P(journal));
#endif
}
/* fallthrough */
case 2:
if (wtimeout < 0) {
phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected wtimeout to be >= 0, %" PHONGO_LONG_FORMAT " given", wtimeout);
return;
}
if (wtimeout > INT32_MAX) {
phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected wtimeout to be <= %" PRId32 ", %" PHONGO_LONG_FORMAT " given", INT32_MAX, wtimeout);
return;
}
mongoc_write_concern_set_wtimeout(intern->write_concern, wtimeout);
}
} /* }}} */
/* {{{ proto string|integer|null MongoDB\Driver\WriteConcern::getW()
Returns the WriteConcern "w" option */
static PHP_METHOD(WriteConcern, getW)
{
php_phongo_writeconcern_t *intern;
const char *wtag;
SUPPRESS_UNUSED_WARNING(return_value_ptr) SUPPRESS_UNUSED_WARNING(return_value_used)
intern = Z_WRITECONCERN_OBJ_P(getThis());
if (zend_parse_parameters_none() == FAILURE) {
return;
}
wtag = mongoc_write_concern_get_wtag(intern->write_concern);
if (wtag) {
PHONGO_RETURN_STRING(wtag);
}
if (mongoc_write_concern_get_wmajority(intern->write_concern)) {
PHONGO_RETURN_STRING(PHONGO_WRITE_CONCERN_W_MAJORITY);
}
if (mongoc_write_concern_get_w(intern->write_concern) != MONGOC_WRITE_CONCERN_W_DEFAULT) {
RETURN_LONG(mongoc_write_concern_get_w(intern->write_concern));
}
RETURN_NULL();
} /* }}} */
/* {{{ proto integer MongoDB\Driver\WriteConcern::getWtimeout()
Returns the WriteConcern "wtimeout" option */
static PHP_METHOD(WriteConcern, getWtimeout)
{
php_phongo_writeconcern_t *intern;
SUPPRESS_UNUSED_WARNING(return_value_ptr) SUPPRESS_UNUSED_WARNING(return_value_used)
intern = Z_WRITECONCERN_OBJ_P(getThis());
if (zend_parse_parameters_none() == FAILURE) {
return;
}
RETURN_LONG(mongoc_write_concern_get_wtimeout(intern->write_concern));
} /* }}} */
/* {{{ proto null|boolean MongoDB\Driver\WriteConcern::getJournal()
Returns the WriteConcern "journal" option */
static PHP_METHOD(WriteConcern, getJournal)
{
php_phongo_writeconcern_t *intern;
SUPPRESS_UNUSED_WARNING(return_value_ptr) SUPPRESS_UNUSED_WARNING(return_value_used)
intern = Z_WRITECONCERN_OBJ_P(getThis());
if (zend_parse_parameters_none() == FAILURE) {
return;
}
if (mongoc_write_concern_journal_is_set(intern->write_concern)) {
RETURN_BOOL(mongoc_write_concern_get_journal(intern->write_concern));
}
RETURN_NULL();
} /* }}} */
/* {{{ proto boolean MongoDB\Driver\WriteConcern::isDefault()
Returns whether the write concern has not been modified (i.e. from a Manager
with no write concern URI options). */
static PHP_METHOD(WriteConcern, isDefault)
{
php_phongo_writeconcern_t *intern;
SUPPRESS_UNUSED_WARNING(return_value_ptr) SUPPRESS_UNUSED_WARNING(return_value_used)
intern = Z_WRITECONCERN_OBJ_P(getThis());
if (zend_parse_parameters_none() == FAILURE) {
return;
}
RETURN_BOOL(mongoc_write_concern_is_default(intern->write_concern));
} /* }}} */
/* {{{ proto array MongoDB\Driver\WriteConcern::bsonSerialize()
*/
static PHP_METHOD(WriteConcern, bsonSerialize)
{
const mongoc_write_concern_t *write_concern = phongo_write_concern_from_zval(getThis() TSRMLS_CC);
if (zend_parse_parameters_none() == FAILURE) {
return;
}
php_phongo_write_concern_to_zval(return_value, write_concern);
convert_to_object(return_value);
} /* }}} */
/* {{{ MongoDB\Driver\WriteConcern function entries */
ZEND_BEGIN_ARG_INFO_EX(ai_WriteConcern___construct, 0, 0, 1)
ZEND_ARG_INFO(0, w)
ZEND_ARG_INFO(0, wtimeout)
ZEND_ARG_INFO(0, journal)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(ai_WriteConcern_void, 0, 0, 0)
ZEND_END_ARG_INFO()
static zend_function_entry php_phongo_writeconcern_me[] = {
PHP_ME(WriteConcern, __construct, ai_WriteConcern___construct, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_ME(WriteConcern, getW, ai_WriteConcern_void, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_ME(WriteConcern, getWtimeout, ai_WriteConcern_void, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_ME(WriteConcern, getJournal, ai_WriteConcern_void, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_ME(WriteConcern, isDefault, ai_WriteConcern_void, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_ME(WriteConcern, bsonSerialize, ai_WriteConcern_void, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL)
PHP_FE_END
};
/* }}} */
/* {{{ MongoDB\Driver\WriteConcern object handlers */
static zend_object_handlers php_phongo_handler_writeconcern;
static void php_phongo_writeconcern_free_object(phongo_free_object_arg *object TSRMLS_DC) /* {{{ */
{
php_phongo_writeconcern_t *intern = Z_OBJ_WRITECONCERN(object);
zend_object_std_dtor(&intern->std TSRMLS_CC);
if (intern->write_concern) {
mongoc_write_concern_destroy(intern->write_concern);
}
#if PHP_VERSION_ID < 70000
efree(intern);
#endif
} /* }}} */
static phongo_create_object_retval php_phongo_writeconcern_create_object(zend_class_entry *class_type TSRMLS_DC) /* {{{ */
{
php_phongo_writeconcern_t *intern = NULL;
intern = PHONGO_ALLOC_OBJECT_T(php_phongo_writeconcern_t, class_type);
zend_object_std_init(&intern->std, class_type TSRMLS_CC);
object_properties_init(&intern->std, class_type);
#if PHP_VERSION_ID >= 70000
intern->std.handlers = &php_phongo_handler_writeconcern;
return &intern->std;
#else
{
zend_object_value retval;
retval.handle = zend_objects_store_put(intern, (zend_objects_store_dtor_t) zend_objects_destroy_object, php_phongo_writeconcern_free_object, NULL TSRMLS_CC);
retval.handlers = &php_phongo_handler_writeconcern;
return retval;
}
#endif
} /* }}} */
static HashTable *php_phongo_writeconcern_get_debug_info(zval *object, int *is_temp TSRMLS_DC) /* {{{ */
{
#if PHP_VERSION_ID >= 70000
zval retval;
#else
zval retval = zval_used_for_init;
#endif
const mongoc_write_concern_t *write_concern = phongo_write_concern_from_zval(object TSRMLS_CC);
*is_temp = 1;
php_phongo_write_concern_to_zval(&retval, write_concern);
return Z_ARRVAL(retval);
} /* }}} */
/* }}} */
void php_phongo_writeconcern_init_ce(INIT_FUNC_ARGS) /* {{{ */
{
zend_class_entry ce;
INIT_NS_CLASS_ENTRY(ce, "MongoDB\\Driver", "WriteConcern", php_phongo_writeconcern_me);
php_phongo_writeconcern_ce = zend_register_internal_class(&ce TSRMLS_CC);
php_phongo_writeconcern_ce->create_object = php_phongo_writeconcern_create_object;
PHONGO_CE_FINAL(php_phongo_writeconcern_ce);
PHONGO_CE_DISABLE_SERIALIZATION(php_phongo_writeconcern_ce);
zend_class_implements(php_phongo_writeconcern_ce TSRMLS_CC, 1, php_phongo_serializable_ce);
memcpy(&php_phongo_handler_writeconcern, phongo_get_std_object_handlers(), sizeof(zend_object_handlers));
php_phongo_handler_writeconcern.get_debug_info = php_phongo_writeconcern_get_debug_info;
#if PHP_VERSION_ID >= 70000
php_phongo_handler_writeconcern.free_obj = php_phongo_writeconcern_free_object;
php_phongo_handler_writeconcern.offset = XtOffsetOf(php_phongo_writeconcern_t, std);
#endif
zend_declare_class_constant_stringl(php_phongo_writeconcern_ce, ZEND_STRL("MAJORITY"), ZEND_STRL(PHONGO_WRITE_CONCERN_W_MAJORITY) TSRMLS_CC);
} /* }}} */
/*
* Local variables:
* tab-width: 4
* c-basic-offset: 4
* End:
* vim600: noet sw=4 ts=4 fdm=marker
* vim<600: noet sw=4 ts=4
*/
diff --git a/mongodb-1.3.4/src/MongoDB/WriteConcernError.c b/mongodb-1.4.2/src/MongoDB/WriteConcernError.c
similarity index 100%
rename from mongodb-1.3.4/src/MongoDB/WriteConcernError.c
rename to mongodb-1.4.2/src/MongoDB/WriteConcernError.c
diff --git a/mongodb-1.3.4/src/MongoDB/WriteError.c b/mongodb-1.4.2/src/MongoDB/WriteError.c
similarity index 100%
rename from mongodb-1.3.4/src/MongoDB/WriteError.c
rename to mongodb-1.4.2/src/MongoDB/WriteError.c
diff --git a/mongodb-1.3.4/src/MongoDB/WriteResult.c b/mongodb-1.4.2/src/MongoDB/WriteResult.c
similarity index 100%
rename from mongodb-1.3.4/src/MongoDB/WriteResult.c
rename to mongodb-1.4.2/src/MongoDB/WriteResult.c
diff --git a/mongodb-1.3.4/src/bson-encode.c b/mongodb-1.4.2/src/bson-encode.c
similarity index 93%
rename from mongodb-1.3.4/src/bson-encode.c
rename to mongodb-1.4.2/src/bson-encode.c
index 70524084..4643d5a2 100644
--- a/mongodb-1.3.4/src/bson-encode.c
+++ b/mongodb-1.4.2/src/bson-encode.c
@@ -1,628 +1,637 @@
/*
* Copyright 2014-2017 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifdef HAVE_CONFIG_H
# include "config.h"
#endif
#include <bson.h>
#include <php.h>
#include <Zend/zend_hash.h>
#include <Zend/zend_interfaces.h>
#include "php_phongo.h"
#include "php_bson.h"
#include "phongo_compat.h"
#if SIZEOF_PHONGO_LONG == 8
# define BSON_APPEND_INT(b, key, keylen, val) \
if (val > INT32_MAX || val < INT32_MIN) { \
bson_append_int64(b, key, keylen, val); \
} else { \
bson_append_int32(b, key, keylen, val); \
}
#elif SIZEOF_PHONGO_LONG == 4
# define BSON_APPEND_INT(b, key, keylen, val) \
bson_append_int32(b, key, keylen, val)
#else
# error Unsupported architecture (integers are neither 32-bit nor 64-bit)
#endif
#undef MONGOC_LOG_DOMAIN
#define MONGOC_LOG_DOMAIN "PHONGO-BSON"
/* Determines whether the argument should be serialized as a BSON array or
* document. IS_ARRAY is returned if the argument's keys are a sequence of
* integers starting at zero; otherwise, IS_OBJECT is returned. */
static int php_phongo_is_array_or_document(zval *val TSRMLS_DC) /* {{{ */
{
HashTable *ht_data = HASH_OF(val);
int count;
if (Z_TYPE_P(val) != IS_ARRAY) {
return IS_OBJECT;
}
count = ht_data ? zend_hash_num_elements(ht_data) : 0;
if (count > 0) {
#if PHP_VERSION_ID >= 70000
zend_string *key;
zend_ulong index, idx;
idx = 0;
ZEND_HASH_FOREACH_KEY(ht_data, index, key) {
if (key) {
return IS_OBJECT;
} else {
if (index != idx) {
return IS_OBJECT;
}
}
idx++;
} ZEND_HASH_FOREACH_END();
#else
char *key;
unsigned int key_len;
unsigned long index = 0;
unsigned long idx = 0;
int hash_type = 0;
HashPosition pos;
zend_hash_internal_pointer_reset_ex(ht_data, &pos);
for (;; zend_hash_move_forward_ex(ht_data, &pos)) {
hash_type = zend_hash_get_current_key_ex(ht_data, &key, &key_len, &index, 0, &pos);
if (hash_type == HASH_KEY_NON_EXISTENT) {
break;
}
if (hash_type == HASH_KEY_IS_STRING) {
return IS_OBJECT;
} else {
if (index != idx) {
return IS_OBJECT;
}
}
idx++;
}
#endif
} else {
return Z_TYPE_P(val);
}
return IS_ARRAY;
} /* }}} */
/* Appends the array or object argument to the BSON document. If the object is
* an instance of MongoDB\BSON\Serializable, the return value of bsonSerialize()
* will be appended as an embedded document. Other MongoDB\BSON\Type instances
* will be appended as the appropriate BSON type. Other array or object values
* will be appended as an embedded document. */
static void php_phongo_bson_append_object(bson_t *bson, php_phongo_bson_flags_t flags, const char *key, long key_len, zval *object TSRMLS_DC) /* {{{ */
{
if (Z_TYPE_P(object) == IS_OBJECT && instanceof_function(Z_OBJCE_P(object), php_phongo_cursorid_ce TSRMLS_CC)) {
bson_append_int64(bson, key, key_len, Z_CURSORID_OBJ_P(object)->id);
return;
}
if (Z_TYPE_P(object) == IS_OBJECT && instanceof_function(Z_OBJCE_P(object), php_phongo_type_ce TSRMLS_CC)) {
if (instanceof_function(Z_OBJCE_P(object), php_phongo_serializable_ce TSRMLS_CC)) {
#if PHP_VERSION_ID >= 70000
zval obj_data;
#else
zval *obj_data = NULL;
#endif
bson_t child;
#if PHP_VERSION_ID >= 70000
zend_call_method_with_0_params(object, NULL, NULL, BSON_SERIALIZE_FUNC_NAME, &obj_data);
#else
zend_call_method_with_0_params(&object, NULL, NULL, BSON_SERIALIZE_FUNC_NAME, &obj_data);
#endif
if (Z_ISUNDEF(obj_data)) {
/* zend_call_method() failed or bsonSerialize() threw an
* exception. Either way, there is nothing else to do. */
return;
}
#if PHP_VERSION_ID >= 70000
if (Z_TYPE(obj_data) != IS_ARRAY && !(Z_TYPE(obj_data) == IS_OBJECT && instanceof_function(Z_OBJCE(obj_data), zend_standard_class_def TSRMLS_CC))) {
phongo_throw_exception(PHONGO_ERROR_UNEXPECTED_VALUE TSRMLS_CC,
"Expected %s::%s() to return an array or stdClass, %s given",
- Z_OBJCE_P(object)->name->val,
+ ZSTR_VAL(Z_OBJCE_P(object)->name),
BSON_SERIALIZE_FUNC_NAME,
- (Z_TYPE(obj_data) == IS_OBJECT
- ? Z_OBJCE(obj_data)->name->val
- : zend_get_type_by_const(Z_TYPE(obj_data))
- )
+ PHONGO_ZVAL_CLASS_OR_TYPE_NAME(obj_data)
);
zval_ptr_dtor(&obj_data);
#else
if (Z_TYPE_P(obj_data) != IS_ARRAY && !(Z_TYPE_P(obj_data) == IS_OBJECT && instanceof_function(Z_OBJCE_P(obj_data), zend_standard_class_def TSRMLS_CC))) {
phongo_throw_exception(PHONGO_ERROR_UNEXPECTED_VALUE TSRMLS_CC,
"Expected %s::%s() to return an array or stdClass, %s given",
- Z_OBJCE_P(object)->name,
+ ZSTR_VAL(Z_OBJCE_P(object)->name),
BSON_SERIALIZE_FUNC_NAME,
- (Z_TYPE_P(obj_data) == IS_OBJECT
- ? Z_OBJCE_P(obj_data)->name
- : zend_get_type_by_const(Z_TYPE_P(obj_data))
- )
+ PHONGO_ZVAL_CLASS_OR_TYPE_NAME_P(obj_data)
);
zval_ptr_dtor(&obj_data);
#endif
return;
}
/* Persistable objects must always be serialized as BSON documents;
* otherwise, infer based on bsonSerialize()'s return value. */
#if PHP_VERSION_ID >= 70000
if (instanceof_function(Z_OBJCE_P(object), php_phongo_persistable_ce TSRMLS_CC) || php_phongo_is_array_or_document(&obj_data TSRMLS_CC) == IS_OBJECT) {
#else
if (instanceof_function(Z_OBJCE_P(object), php_phongo_persistable_ce TSRMLS_CC) || php_phongo_is_array_or_document(obj_data TSRMLS_CC) == IS_OBJECT) {
#endif
bson_append_document_begin(bson, key, key_len, &child);
if (instanceof_function(Z_OBJCE_P(object), php_phongo_persistable_ce TSRMLS_CC)) {
#if PHP_VERSION_ID >= 70000
bson_append_binary(&child, PHONGO_ODM_FIELD_NAME, -1, 0x80, (const uint8_t *)Z_OBJCE_P(object)->name->val, Z_OBJCE_P(object)->name->len);
#else
bson_append_binary(&child, PHONGO_ODM_FIELD_NAME, -1, 0x80, (const uint8_t *)Z_OBJCE_P(object)->name, strlen(Z_OBJCE_P(object)->name));
#endif
}
#if PHP_VERSION_ID >= 70000
php_phongo_zval_to_bson(&obj_data, flags, &child, NULL TSRMLS_CC);
#else
php_phongo_zval_to_bson(obj_data, flags, &child, NULL TSRMLS_CC);
#endif
bson_append_document_end(bson, &child);
} else {
bson_append_array_begin(bson, key, key_len, &child);
#if PHP_VERSION_ID >= 70000
php_phongo_zval_to_bson(&obj_data, flags, &child, NULL TSRMLS_CC);
#else
php_phongo_zval_to_bson(obj_data, flags, &child, NULL TSRMLS_CC);
#endif
bson_append_array_end(bson, &child);
}
zval_ptr_dtor(&obj_data);
return;
}
if (instanceof_function(Z_OBJCE_P(object), php_phongo_objectid_ce TSRMLS_CC)) {
bson_oid_t oid;
php_phongo_objectid_t *intern = Z_OBJECTID_OBJ_P(object);
mongoc_log(MONGOC_LOG_LEVEL_TRACE, MONGOC_LOG_DOMAIN, "encoding ObjectId");
bson_oid_init_from_string(&oid, intern->oid);
bson_append_oid(bson, key, key_len, &oid);
return;
}
if (instanceof_function(Z_OBJCE_P(object), php_phongo_utcdatetime_ce TSRMLS_CC)) {
php_phongo_utcdatetime_t *intern = Z_UTCDATETIME_OBJ_P(object);
mongoc_log(MONGOC_LOG_LEVEL_TRACE, MONGOC_LOG_DOMAIN, "encoding UTCDateTime");
bson_append_date_time(bson, key, key_len, intern->milliseconds);
return;
}
if (instanceof_function(Z_OBJCE_P(object), php_phongo_binary_ce TSRMLS_CC)) {
php_phongo_binary_t *intern = Z_BINARY_OBJ_P(object);
mongoc_log(MONGOC_LOG_LEVEL_TRACE, MONGOC_LOG_DOMAIN, "encoding Binary");
bson_append_binary(bson, key, key_len, intern->type, (const uint8_t *)intern->data, (uint32_t)intern->data_len);
return;
}
if (instanceof_function(Z_OBJCE_P(object), php_phongo_decimal128_ce TSRMLS_CC)) {
php_phongo_decimal128_t *intern = Z_DECIMAL128_OBJ_P(object);
mongoc_log(MONGOC_LOG_LEVEL_TRACE, MONGOC_LOG_DOMAIN, "encoding Decimal128");
bson_append_decimal128(bson, key, key_len, &intern->decimal);
return;
}
if (instanceof_function(Z_OBJCE_P(object), php_phongo_regex_ce TSRMLS_CC)) {
php_phongo_regex_t *intern = Z_REGEX_OBJ_P(object);
mongoc_log(MONGOC_LOG_LEVEL_TRACE, MONGOC_LOG_DOMAIN, "encoding Regex");
bson_append_regex(bson, key, key_len, intern->pattern, intern->flags);
return;
}
if (instanceof_function(Z_OBJCE_P(object), php_phongo_javascript_ce TSRMLS_CC)) {
php_phongo_javascript_t *intern = Z_JAVASCRIPT_OBJ_P(object);
if (intern->scope) {
mongoc_log(MONGOC_LOG_LEVEL_TRACE, MONGOC_LOG_DOMAIN, "encoding Javascript with scope");
bson_append_code_with_scope(bson, key, key_len, intern->code, intern->scope);
} else {
mongoc_log(MONGOC_LOG_LEVEL_TRACE, MONGOC_LOG_DOMAIN, "encoding Javascript without scope");
bson_append_code(bson, key, key_len, intern->code);
}
return;
}
if (instanceof_function(Z_OBJCE_P(object), php_phongo_timestamp_ce TSRMLS_CC)) {
php_phongo_timestamp_t *intern = Z_TIMESTAMP_OBJ_P(object);
mongoc_log(MONGOC_LOG_LEVEL_TRACE, MONGOC_LOG_DOMAIN, "encoding Timestamp");
bson_append_timestamp(bson, key, key_len, intern->timestamp, intern->increment);
return;
}
if (instanceof_function(Z_OBJCE_P(object), php_phongo_maxkey_ce TSRMLS_CC)) {
mongoc_log(MONGOC_LOG_LEVEL_TRACE, MONGOC_LOG_DOMAIN, "encoding MaxKey");
bson_append_maxkey(bson, key, key_len);
return;
}
if (instanceof_function(Z_OBJCE_P(object), php_phongo_minkey_ce TSRMLS_CC)) {
mongoc_log(MONGOC_LOG_LEVEL_TRACE, MONGOC_LOG_DOMAIN, "encoding MinKey");
bson_append_minkey(bson, key, key_len);
return;
}
+ /* Deprecated types */
+ if (instanceof_function(Z_OBJCE_P(object), php_phongo_dbpointer_ce TSRMLS_CC)) {
+ bson_oid_t oid;
+ php_phongo_dbpointer_t *intern = Z_DBPOINTER_OBJ_P(object);
+
+ mongoc_log(MONGOC_LOG_LEVEL_TRACE, MONGOC_LOG_DOMAIN, "encoding DBPointer");
+ bson_oid_init_from_string(&oid, intern->id);
+ bson_append_dbpointer(bson, key, key_len, intern->ref, &oid);
+ return;
+ }
+ if (instanceof_function(Z_OBJCE_P(object), php_phongo_symbol_ce TSRMLS_CC)) {
+ php_phongo_symbol_t *intern = Z_SYMBOL_OBJ_P(object);
+
+ mongoc_log(MONGOC_LOG_LEVEL_TRACE, MONGOC_LOG_DOMAIN, "encoding Symbol");
+ bson_append_symbol(bson, key, key_len, intern->symbol, intern->symbol_len);
+ return;
+ }
+ if (instanceof_function(Z_OBJCE_P(object), php_phongo_undefined_ce TSRMLS_CC)) {
+ mongoc_log(MONGOC_LOG_LEVEL_TRACE, MONGOC_LOG_DOMAIN, "encoding Undefined");
+ bson_append_undefined(bson, key, key_len);
+ return;
+ }
+
phongo_throw_exception(PHONGO_ERROR_UNEXPECTED_VALUE TSRMLS_CC, "Unexpected %s instance: %s", ZSTR_VAL(php_phongo_type_ce->name), ZSTR_VAL(Z_OBJCE_P(object)->name));
return;
} else {
bson_t child;
mongoc_log(MONGOC_LOG_LEVEL_TRACE, MONGOC_LOG_DOMAIN, "encoding document");
bson_append_document_begin(bson, key, key_len, &child);
php_phongo_zval_to_bson(object, flags, &child, NULL TSRMLS_CC);
bson_append_document_end(bson, &child);
}
} /* }}} */
/* Appends the zval argument to the BSON document. If the argument is an object,
* or an array that should be serialized as an embedded document, this function
* will defer to php_phongo_bson_append_object(). */
static void php_phongo_bson_append(bson_t *bson, php_phongo_bson_flags_t flags, const char *key, long key_len, zval *entry TSRMLS_DC) /* {{{ */
{
#if PHP_VERSION_ID >= 70000
try_again:
#endif
switch (Z_TYPE_P(entry))
{
case IS_NULL:
bson_append_null(bson, key, key_len);
break;
#if PHP_VERSION_ID >= 70000
case IS_TRUE:
bson_append_bool(bson, key, key_len, true);
break;
case IS_FALSE:
bson_append_bool(bson, key, key_len, false);
break;
#else
case IS_BOOL:
bson_append_bool(bson, key, key_len, Z_BVAL_P(entry));
break;
#endif
case IS_LONG:
BSON_APPEND_INT(bson, key, key_len, Z_LVAL_P(entry));
break;
case IS_DOUBLE:
bson_append_double(bson, key, key_len, Z_DVAL_P(entry));
break;
case IS_STRING:
if (bson_utf8_validate(Z_STRVAL_P(entry), Z_STRLEN_P(entry), true)) {
bson_append_utf8(bson, key, key_len, Z_STRVAL_P(entry), Z_STRLEN_P(entry));
} else {
phongo_throw_exception(PHONGO_ERROR_UNEXPECTED_VALUE TSRMLS_CC, "Detected invalid UTF-8 for fieldname \"%s\": %s", key, Z_STRVAL_P(entry));
}
break;
case IS_ARRAY:
if (php_phongo_is_array_or_document(entry TSRMLS_CC) == IS_ARRAY) {
bson_t child;
HashTable *tmp_ht = HASH_OF(entry);
if (tmp_ht && ZEND_HASH_GET_APPLY_COUNT(tmp_ht) > 0) {
phongo_throw_exception(PHONGO_ERROR_UNEXPECTED_VALUE TSRMLS_CC, "Detected recursion for fieldname \"%s\"", key);
break;
}
if (tmp_ht && ZEND_HASH_APPLY_PROTECTION(tmp_ht)) {
ZEND_HASH_INC_APPLY_COUNT(tmp_ht);
}
bson_append_array_begin(bson, key, key_len, &child);
php_phongo_zval_to_bson(entry, flags, &child, NULL TSRMLS_CC);
bson_append_array_end(bson, &child);
if (tmp_ht && ZEND_HASH_APPLY_PROTECTION(tmp_ht)) {
ZEND_HASH_DEC_APPLY_COUNT(tmp_ht);
}
break;
}
- /* break intentionally omitted */
+ PHONGO_BREAK_INTENTIONALLY_MISSING
+
case IS_OBJECT: {
HashTable *tmp_ht = HASH_OF(entry);
if (tmp_ht && ZEND_HASH_GET_APPLY_COUNT(tmp_ht) > 0) {
phongo_throw_exception(PHONGO_ERROR_UNEXPECTED_VALUE TSRMLS_CC, "Detected recursion for fieldname \"%s\"", key);
break;
}
if (tmp_ht && ZEND_HASH_APPLY_PROTECTION(tmp_ht)) {
ZEND_HASH_INC_APPLY_COUNT(tmp_ht);
}
php_phongo_bson_append_object(bson, flags, key, key_len, entry TSRMLS_CC);
if (tmp_ht && ZEND_HASH_APPLY_PROTECTION(tmp_ht)) {
ZEND_HASH_DEC_APPLY_COUNT(tmp_ht);
}
break;
}
#if PHP_VERSION_ID >= 70000
case IS_INDIRECT:
php_phongo_bson_append(bson, flags, key, key_len, Z_INDIRECT_P(entry) TSRMLS_DC);
break;
case IS_REFERENCE:
ZVAL_DEREF(entry);
goto try_again;
#endif
default:
phongo_throw_exception(PHONGO_ERROR_UNEXPECTED_VALUE TSRMLS_CC, "Detected unsupported PHP type for fieldname \"%s\": %d (%s)", key, Z_TYPE_P(entry), zend_get_type_by_const(Z_TYPE_P(entry)));
}
} /* }}} */
/* Converts the array or object argument to a BSON document. If the object is an
* instance of MongoDB\BSON\Serializable, the return value of bsonSerialize()
* will be used. */
void php_phongo_zval_to_bson(zval *data, php_phongo_bson_flags_t flags, bson_t *bson, bson_t **bson_out TSRMLS_DC) /* {{{ */
{
HashTable *ht_data = NULL;
#if PHP_VERSION_ID >= 70000
zval obj_data;
#else
HashPosition pos;
zval *obj_data = NULL;
#endif
/* If we will be encoding a class that may contain protected and private
* properties, we'll need to filter them out later. */
bool ht_data_from_properties = false;
/* If the object is an instance of MongoDB\BSON\Persistable, we will need to
* inject the PHP class name as a BSON key and ignore any existing key in
* the return value of bsonSerialize(). */
bool skip_odm_field = false;
ZVAL_UNDEF(&obj_data);
switch(Z_TYPE_P(data)) {
case IS_OBJECT:
if (instanceof_function(Z_OBJCE_P(data), php_phongo_serializable_ce TSRMLS_CC)) {
#if PHP_VERSION_ID >= 70000
zend_call_method_with_0_params(data, NULL, NULL, BSON_SERIALIZE_FUNC_NAME, &obj_data);
#else
zend_call_method_with_0_params(&data, NULL, NULL, BSON_SERIALIZE_FUNC_NAME, &obj_data);
#endif
if (Z_ISUNDEF(obj_data)) {
/* zend_call_method() failed or bsonSerialize() threw an
* exception. Either way, there is nothing else to do. */
return;
}
#if PHP_VERSION_ID >= 70000
if (Z_TYPE(obj_data) != IS_ARRAY && !(Z_TYPE(obj_data) == IS_OBJECT && instanceof_function(Z_OBJCE(obj_data), zend_standard_class_def TSRMLS_CC))) {
#else
if (Z_TYPE_P(obj_data) != IS_ARRAY && !(Z_TYPE_P(obj_data) == IS_OBJECT && instanceof_function(Z_OBJCE_P(obj_data), zend_standard_class_def TSRMLS_CC))) {
#endif
phongo_throw_exception(PHONGO_ERROR_UNEXPECTED_VALUE TSRMLS_CC,
"Expected %s::%s() to return an array or stdClass, %s given",
-#if PHP_VERSION_ID >= 70000
- Z_OBJCE_P(data)->name->val,
-#else
- Z_OBJCE_P(data)->name,
-#endif
+ ZSTR_VAL(Z_OBJCE_P(data)->name),
BSON_SERIALIZE_FUNC_NAME,
#if PHP_VERSION_ID >= 70000
- (Z_TYPE(obj_data) == IS_OBJECT
- ? Z_OBJCE(obj_data)->name->val
- : zend_get_type_by_const(Z_TYPE(obj_data))
+ PHONGO_ZVAL_CLASS_OR_TYPE_NAME(obj_data)
#else
- (Z_TYPE_P(obj_data) == IS_OBJECT
- ? Z_OBJCE_P(obj_data)->name
- : zend_get_type_by_const(Z_TYPE_P(obj_data))
+ PHONGO_ZVAL_CLASS_OR_TYPE_NAME_P(obj_data)
#endif
- )
);
goto cleanup;
}
#if PHP_VERSION_ID >= 70000
ht_data = HASH_OF(&obj_data);
#else
ht_data = HASH_OF(obj_data);
#endif
if (instanceof_function(Z_OBJCE_P(data), php_phongo_persistable_ce TSRMLS_CC)) {
#if PHP_VERSION_ID >= 70000
bson_append_binary(bson, PHONGO_ODM_FIELD_NAME, -1, 0x80, (const uint8_t *)Z_OBJCE_P(data)->name->val, Z_OBJCE_P(data)->name->len);
#else
bson_append_binary(bson, PHONGO_ODM_FIELD_NAME, -1, 0x80, (const uint8_t *)Z_OBJCE_P(data)->name, strlen(Z_OBJCE_P(data)->name));
#endif
/* Ensure that we ignore an existing key with the same name
* if one exists in the bsonSerialize() return value. */
skip_odm_field = true;
}
break;
}
if (instanceof_function(Z_OBJCE_P(data), php_phongo_type_ce TSRMLS_CC)) {
phongo_throw_exception(PHONGO_ERROR_UNEXPECTED_VALUE TSRMLS_CC, "%s instance %s cannot be serialized as a root element", ZSTR_VAL(php_phongo_type_ce->name), ZSTR_VAL(Z_OBJCE_P(data)->name));
return;
}
ht_data = Z_OBJ_HT_P(data)->get_properties(data TSRMLS_CC);
ht_data_from_properties = true;
break;
case IS_ARRAY:
ht_data = HASH_OF(data);
break;
default:
return;
}
#if PHP_VERSION_ID >= 70000
{
zend_string *string_key = NULL;
zend_ulong num_key = 0;
zval *value;
ZEND_HASH_FOREACH_KEY_VAL(ht_data, num_key, string_key, value) {
if (string_key) {
if (ht_data_from_properties) {
/* Skip protected and private properties */
if (ZSTR_VAL(string_key)[0] == '\0' && ZSTR_LEN(string_key) > 0) {
continue;
}
}
if (strlen(ZSTR_VAL(string_key)) != ZSTR_LEN(string_key)) {
phongo_throw_exception(PHONGO_ERROR_UNEXPECTED_VALUE TSRMLS_CC, "BSON keys cannot contain null bytes. Unexpected null byte after \"%s\".", ZSTR_VAL(string_key));
goto cleanup;
}
if (skip_odm_field && !strcmp(ZSTR_VAL(string_key), PHONGO_ODM_FIELD_NAME)) {
continue;
}
if (flags & PHONGO_BSON_ADD_ID) {
if (!strcmp(ZSTR_VAL(string_key), "_id")) {
flags &= ~PHONGO_BSON_ADD_ID;
}
}
}
/* Ensure we're working with a string key */
if (!string_key) {
string_key = zend_long_to_str(num_key);
} else {
zend_string_addref(string_key);
}
php_phongo_bson_append(bson, flags & ~PHONGO_BSON_ADD_ID, ZSTR_VAL(string_key), strlen(ZSTR_VAL(string_key)), value TSRMLS_CC);
zend_string_release(string_key);
} ZEND_HASH_FOREACH_END();
}
#else
zend_hash_internal_pointer_reset_ex(ht_data, &pos);
for (;; zend_hash_move_forward_ex(ht_data, &pos)) {
char *string_key = NULL;
uint string_key_len = 0;
ulong num_key = 0;
zval **value;
int hash_type;
hash_type = zend_hash_get_current_key_ex(ht_data, &string_key, &string_key_len, &num_key, 0, &pos);
if (hash_type == HASH_KEY_NON_EXISTENT) {
break;
}
if (zend_hash_get_current_data_ex(ht_data, (void **) &value, &pos) == FAILURE) {
break;
}
if (hash_type == HASH_KEY_IS_STRING) {
if (ht_data_from_properties) {
/* Skip protected and private properties */
if (string_key[0] == '\0' && string_key_len > 1) {
continue;
}
}
if (strlen(string_key) != string_key_len - 1) {
phongo_throw_exception(PHONGO_ERROR_UNEXPECTED_VALUE TSRMLS_CC, "BSON keys cannot contain null bytes. Unexpected null byte after \"%s\".", ZSTR_VAL(string_key));
goto cleanup;
}
if (skip_odm_field && !strcmp(string_key, PHONGO_ODM_FIELD_NAME)) {
continue;
}
if (flags & PHONGO_BSON_ADD_ID) {
if (!strcmp(string_key, "_id")) {
flags &= ~PHONGO_BSON_ADD_ID;
}
}
}
/* Ensure we're working with a string key */
if (hash_type == HASH_KEY_IS_LONG) {
spprintf(&string_key, 0, "%ld", num_key);
}
php_phongo_bson_append(bson, flags & ~PHONGO_BSON_ADD_ID, string_key, strlen(string_key), *value TSRMLS_CC);
if (hash_type == HASH_KEY_IS_LONG) {
efree(string_key);
}
}
#endif
if (flags & PHONGO_BSON_ADD_ID) {
bson_oid_t oid;
bson_oid_init(&oid, NULL);
bson_append_oid(bson, "_id", strlen("_id"), &oid);
mongoc_log(MONGOC_LOG_LEVEL_TRACE, MONGOC_LOG_DOMAIN, "Added new _id");
}
if (flags & PHONGO_BSON_RETURN_ID && bson_out) {
bson_iter_t iter;
*bson_out = bson_new();
if (bson_iter_init_find(&iter, bson, "_id") && !bson_append_iter(*bson_out, NULL, 0, &iter)) {
/* This should not be able to happen since we are copying from
* within a valid bson_t. */
phongo_throw_exception(PHONGO_ERROR_UNEXPECTED_VALUE TSRMLS_CC, "Error copying \"_id\" field from encoded document");
goto cleanup;
}
}
cleanup:
if (!Z_ISUNDEF(obj_data)) {
zval_ptr_dtor(&obj_data);
}
} /* }}} */
/*
* Local variables:
* tab-width: 4
* c-basic-offset: 4
* End:
* vim600: noet sw=4 ts=4 fdm=marker
* vim<600: noet sw=4 ts=4
*/
diff --git a/mongodb-1.3.4/src/bson.c b/mongodb-1.4.2/src/bson.c
similarity index 93%
rename from mongodb-1.3.4/src/bson.c
rename to mongodb-1.4.2/src/bson.c
index c6d553df..3c45b4c6 100644
--- a/mongodb-1.3.4/src/bson.c
+++ b/mongodb-1.4.2/src/bson.c
@@ -1,942 +1,1011 @@
/*
* Copyright 2014-2017 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifdef HAVE_CONFIG_H
# include "config.h"
#endif
#include <bson.h>
#include <php.h>
#include <Zend/zend_hash.h>
#include <Zend/zend_interfaces.h>
#include "php_phongo.h"
#include "php_bson.h"
#include "phongo_compat.h"
#include "php_array_api.h"
#undef MONGOC_LOG_DOMAIN
#define MONGOC_LOG_DOMAIN "PHONGO-BSON"
#define PHONGO_IS_CLASS_INSTANTIATABLE(ce) \
(!(ce->ce_flags & (ZEND_ACC_INTERFACE|ZEND_ACC_IMPLICIT_ABSTRACT_CLASS|ZEND_ACC_EXPLICIT_ABSTRACT_CLASS)))
#if PHP_VERSION_ID >= 70000
# define PHONGO_BSON_STATE_ZCHILD(state) (&((php_phongo_bson_state *)(state))->zchild)
#else
# define PHONGO_BSON_STATE_ZCHILD(state) (((php_phongo_bson_state *)(state))->zchild)
#endif
/* Forward declarations */
static bool php_phongo_bson_visit_document(const bson_iter_t *iter ARG_UNUSED, const char *key, const bson_t *v_document, void *data);
static bool php_phongo_bson_visit_array(const bson_iter_t *iter ARG_UNUSED, const char *key, const bson_t *v_document, void *data);
static void php_phongo_bson_visit_corrupt(const bson_iter_t *iter ARG_UNUSED, void *data ARG_UNUSED) /* {{{ */
{
mongoc_log(MONGOC_LOG_LEVEL_WARNING, MONGOC_LOG_DOMAIN, "Corrupt BSON data detected!");
} /* }}} */
static void php_phongo_bson_visit_unsupported_type(const bson_iter_t *iter ARG_UNUSED, const char *key, uint32_t v_type_code, void *data ARG_UNUSED) /* {{{ */
{
TSRMLS_FETCH();
phongo_throw_exception(PHONGO_ERROR_UNEXPECTED_VALUE TSRMLS_CC, "Detected unknown BSON type 0x%02hhx for fieldname \"%s\". Are you using the latest driver?", v_type_code, key);
} /* }}} */
static bool php_phongo_bson_visit_double(const bson_iter_t *iter ARG_UNUSED, const char *key, double v_double, void *data) /* {{{ */
{
zval *retval = PHONGO_BSON_STATE_ZCHILD(data);
if (((php_phongo_bson_state *)data)->is_visiting_array) {
add_next_index_double(retval, v_double);
} else {
add_assoc_double(retval, key, v_double);
}
return false;
} /* }}} */
static bool php_phongo_bson_visit_utf8(const bson_iter_t *iter ARG_UNUSED, const char *key, size_t v_utf8_len, const char *v_utf8, void *data) /* {{{ */
{
zval *retval = PHONGO_BSON_STATE_ZCHILD(data);
if (((php_phongo_bson_state *)data)->is_visiting_array) {
ADD_NEXT_INDEX_STRINGL(retval, v_utf8, v_utf8_len);
} else {
ADD_ASSOC_STRING_EX(retval, key, strlen(key), v_utf8, v_utf8_len);
}
return false;
} /* }}} */
static bool php_phongo_bson_visit_binary(const bson_iter_t *iter ARG_UNUSED, const char *key, bson_subtype_t v_subtype, size_t v_binary_len, const uint8_t *v_binary, void *data) /* {{{ */
{
zval *retval = PHONGO_BSON_STATE_ZCHILD(data);
TSRMLS_FETCH();
if (v_subtype == 0x80 && strcmp(key, PHONGO_ODM_FIELD_NAME) == 0) {
#if PHP_VERSION_ID >= 70000
zend_string *zs_classname = zend_string_init((const char *)v_binary, v_binary_len, 0);
zend_class_entry *found_ce = zend_fetch_class(zs_classname, ZEND_FETCH_CLASS_AUTO|ZEND_FETCH_CLASS_SILENT TSRMLS_CC);
zend_string_release(zs_classname);
#else
zend_class_entry *found_ce = zend_fetch_class((const char *)v_binary, v_binary_len, ZEND_FETCH_CLASS_AUTO|ZEND_FETCH_CLASS_SILENT TSRMLS_CC);
#endif
if (found_ce && PHONGO_IS_CLASS_INSTANTIATABLE(found_ce) && instanceof_function(found_ce, php_phongo_persistable_ce TSRMLS_CC)) {
((php_phongo_bson_state *)data)->odm = found_ce;
}
}
{
#if PHP_VERSION_ID >= 70000
zval zchild;
php_phongo_new_binary_from_binary_and_type(&zchild, (const char *)v_binary, v_binary_len, v_subtype TSRMLS_CC);
if (((php_phongo_bson_state *)data)->is_visiting_array) {
add_next_index_zval(retval, &zchild);
} else {
ADD_ASSOC_ZVAL(retval, key, &zchild);
}
#else
zval *zchild = NULL;
MAKE_STD_ZVAL(zchild);
php_phongo_new_binary_from_binary_and_type(zchild, (const char *)v_binary, v_binary_len, v_subtype TSRMLS_CC);
if (((php_phongo_bson_state *)data)->is_visiting_array) {
add_next_index_zval(retval, zchild);
} else {
ADD_ASSOC_ZVAL(retval, key, zchild);
}
#endif
}
return false;
} /* }}} */
static bool php_phongo_bson_visit_undefined(const bson_iter_t *iter, const char *key, void *data) /* {{{ */
{
- mongoc_log(MONGOC_LOG_LEVEL_WARNING, MONGOC_LOG_DOMAIN, "Detected unsupported BSON type 0x06 (undefined) for fieldname \"%s\"", key);
+ zval *retval = PHONGO_BSON_STATE_ZCHILD(data);
+#if PHP_VERSION_ID >= 70000
+ zval zchild;
+
+ object_init_ex(&zchild, php_phongo_undefined_ce);
+
+ if (((php_phongo_bson_state *)data)->is_visiting_array) {
+ add_next_index_zval(retval, &zchild);
+ } else {
+ ADD_ASSOC_ZVAL(retval, key, &zchild);
+ }
+#else
+ zval *zchild = NULL;
+ TSRMLS_FETCH();
+
+ MAKE_STD_ZVAL(zchild);
+ object_init_ex(zchild, php_phongo_undefined_ce);
+
+ if (((php_phongo_bson_state *)data)->is_visiting_array) {
+ add_next_index_zval(retval, zchild);
+ } else {
+ ADD_ASSOC_ZVAL(retval, key, zchild);
+ }
+#endif
return false;
} /* }}} */
static bool php_phongo_bson_visit_oid(const bson_iter_t *iter ARG_UNUSED, const char *key, const bson_oid_t *v_oid, void *data) /* {{{ */
{
zval *retval = PHONGO_BSON_STATE_ZCHILD(data);
#if PHP_VERSION_ID >= 70000
zval zchild;
php_phongo_objectid_new_from_oid(&zchild, v_oid TSRMLS_CC);
if (((php_phongo_bson_state *)data)->is_visiting_array) {
add_next_index_zval(retval, &zchild);
} else {
ADD_ASSOC_ZVAL(retval, key, &zchild);
}
#else
zval *zchild = NULL;
TSRMLS_FETCH();
MAKE_STD_ZVAL(zchild);
php_phongo_objectid_new_from_oid(zchild, v_oid TSRMLS_CC);
if (((php_phongo_bson_state *)data)->is_visiting_array) {
add_next_index_zval(retval, zchild);
} else {
ADD_ASSOC_ZVAL(retval, key, zchild);
}
#endif
return false;
} /* }}} */
static bool php_phongo_bson_visit_bool(const bson_iter_t *iter ARG_UNUSED, const char *key, bool v_bool, void *data) /* {{{ */
{
zval *retval = PHONGO_BSON_STATE_ZCHILD(data);
if (((php_phongo_bson_state *)data)->is_visiting_array) {
add_next_index_bool(retval, v_bool);
} else {
add_assoc_bool(retval, key, v_bool);
}
return false;
} /* }}} */
static bool php_phongo_bson_visit_date_time(const bson_iter_t *iter ARG_UNUSED, const char *key, int64_t msec_since_epoch, void *data) /* {{{ */
{
zval *retval = PHONGO_BSON_STATE_ZCHILD(data);
#if PHP_VERSION_ID >= 70000
zval zchild;
php_phongo_new_utcdatetime_from_epoch(&zchild, msec_since_epoch TSRMLS_CC);
if (((php_phongo_bson_state *)data)->is_visiting_array) {
add_next_index_zval(retval, &zchild);
} else {
ADD_ASSOC_ZVAL(retval, key, &zchild);
}
#else
zval *zchild = NULL;
TSRMLS_FETCH();
MAKE_STD_ZVAL(zchild);
php_phongo_new_utcdatetime_from_epoch(zchild, msec_since_epoch TSRMLS_CC);
if (((php_phongo_bson_state *)data)->is_visiting_array) {
add_next_index_zval(retval, zchild);
} else {
ADD_ASSOC_ZVAL(retval, key, zchild);
}
#endif
return false;
} /* }}} */
static bool php_phongo_bson_visit_decimal128(const bson_iter_t *iter ARG_UNUSED, const char *key, const bson_decimal128_t *decimal, void *data) /* {{{ */
{
zval *retval = PHONGO_BSON_STATE_ZCHILD(data);
#if PHP_VERSION_ID >= 70000
zval zchild;
php_phongo_new_decimal128(&zchild, decimal TSRMLS_CC);
if (((php_phongo_bson_state *)data)->is_visiting_array) {
add_next_index_zval(retval, &zchild);
} else {
ADD_ASSOC_ZVAL(retval, key, &zchild);
}
#else
zval *zchild = NULL;
TSRMLS_FETCH();
MAKE_STD_ZVAL(zchild);
php_phongo_new_decimal128(zchild, decimal TSRMLS_CC);
if (((php_phongo_bson_state *)data)->is_visiting_array) {
add_next_index_zval(retval, zchild);
} else {
ADD_ASSOC_ZVAL(retval, key, zchild);
}
#endif
return false;
} /* }}} */
static bool php_phongo_bson_visit_null(const bson_iter_t *iter ARG_UNUSED, const char *key, void *data) /* {{{ */
{
zval *retval = PHONGO_BSON_STATE_ZCHILD(data);
if (((php_phongo_bson_state *)data)->is_visiting_array) {
add_next_index_null(retval);
} else {
add_assoc_null(retval, key);
}
return false;
} /* }}} */
static bool php_phongo_bson_visit_regex(const bson_iter_t *iter ARG_UNUSED, const char *key, const char *v_regex, const char *v_options, void *data) /* {{{ */
{
zval *retval = PHONGO_BSON_STATE_ZCHILD(data);
#if PHP_VERSION_ID >= 70000
zval zchild;
php_phongo_new_regex_from_regex_and_options(&zchild, v_regex, v_options TSRMLS_CC);
if (((php_phongo_bson_state *)data)->is_visiting_array) {
add_next_index_zval(retval, &zchild);
} else {
ADD_ASSOC_ZVAL(retval, key, &zchild);
}
#else
zval *zchild = NULL;
TSRMLS_FETCH();
MAKE_STD_ZVAL(zchild);
php_phongo_new_regex_from_regex_and_options(zchild, v_regex, v_options TSRMLS_CC);
if (((php_phongo_bson_state *)data)->is_visiting_array) {
add_next_index_zval(retval, zchild);
} else {
ADD_ASSOC_ZVAL(retval, key, zchild);
}
#endif
return false;
} /* }}} */
-static bool php_phongo_bson_visit_symbol(const bson_iter_t *iter, const char *key, size_t symbol_len, const char *symbol, void *data) /* {{{ */
+static bool php_phongo_bson_visit_symbol(const bson_iter_t *iter, const char *key, size_t v_symbol_len, const char *v_symbol, void *data) /* {{{ */
{
- mongoc_log(MONGOC_LOG_LEVEL_WARNING, MONGOC_LOG_DOMAIN, "Detected unsupported BSON type 0x0E (symbol) for fieldname \"%s\"", key);
+ zval *retval = PHONGO_BSON_STATE_ZCHILD(data);
+#if PHP_VERSION_ID >= 70000
+ zval zchild;
+
+ php_phongo_new_symbol(&zchild, v_symbol, v_symbol_len TSRMLS_CC);
+
+ if (((php_phongo_bson_state *)data)->is_visiting_array) {
+ add_next_index_zval(retval, &zchild);
+ } else {
+ ADD_ASSOC_ZVAL(retval, key, &zchild);
+ }
+#else
+ zval *zchild = NULL;
+ TSRMLS_FETCH();
+
+ MAKE_STD_ZVAL(zchild);
+ php_phongo_new_symbol(zchild, v_symbol, v_symbol_len TSRMLS_CC);
+
+ if (((php_phongo_bson_state *)data)->is_visiting_array) {
+ add_next_index_zval(retval, zchild);
+ } else {
+ ADD_ASSOC_ZVAL(retval, key, zchild);
+ }
+#endif
return false;
} /* }}} */
static bool php_phongo_bson_visit_code(const bson_iter_t *iter ARG_UNUSED, const char *key, size_t v_code_len, const char *v_code, void *data) /* {{{ */
{
zval *retval = PHONGO_BSON_STATE_ZCHILD(data);
#if PHP_VERSION_ID >= 70000
zval zchild;
php_phongo_new_javascript_from_javascript(1, &zchild, v_code, v_code_len TSRMLS_CC);
if (((php_phongo_bson_state *)data)->is_visiting_array) {
add_next_index_zval(retval, &zchild);
} else {
ADD_ASSOC_ZVAL(retval, key, &zchild);
}
#else
zval *zchild = NULL;
TSRMLS_FETCH();
MAKE_STD_ZVAL(zchild);
php_phongo_new_javascript_from_javascript(1, zchild, v_code, v_code_len TSRMLS_CC);
if (((php_phongo_bson_state *)data)->is_visiting_array) {
add_next_index_zval(retval, zchild);
} else {
ADD_ASSOC_ZVAL(retval, key, zchild);
}
#endif
return false;
} /* }}} */
-static bool php_phongo_bson_visit_dbpointer(const bson_iter_t *iter, const char *key, size_t collection_len, const char *collection, const bson_oid_t *oid, void *data) /* {{{ */
+static bool php_phongo_bson_visit_dbpointer(const bson_iter_t *iter, const char *key, size_t namespace_len, const char *namespace, const bson_oid_t *oid, void *data) /* {{{ */
{
- mongoc_log(MONGOC_LOG_LEVEL_WARNING, MONGOC_LOG_DOMAIN, "Detected unsupported BSON type 0x0C (DBPointer) for fieldname \"%s\"", key);
+ zval *retval = PHONGO_BSON_STATE_ZCHILD(data);
+#if PHP_VERSION_ID >= 70000
+ zval zchild;
+
+ php_phongo_new_dbpointer(&zchild, namespace, namespace_len, oid TSRMLS_CC);
+
+ if (((php_phongo_bson_state *)data)->is_visiting_array) {
+ add_next_index_zval(retval, &zchild);
+ } else {
+ ADD_ASSOC_ZVAL(retval, key, &zchild);
+ }
+#else
+ zval *zchild = NULL;
+ TSRMLS_FETCH();
+
+ MAKE_STD_ZVAL(zchild);
+ php_phongo_new_dbpointer(zchild, namespace, namespace_len, oid TSRMLS_CC);
+
+ if (((php_phongo_bson_state *)data)->is_visiting_array) {
+ add_next_index_zval(retval, zchild);
+ } else {
+ ADD_ASSOC_ZVAL(retval, key, zchild);
+ }
+#endif
return false;
} /* }}} */
static bool php_phongo_bson_visit_codewscope(const bson_iter_t *iter ARG_UNUSED, const char *key, size_t v_code_len, const char *v_code, const bson_t *v_scope, void *data) /* {{{ */
{
zval *retval = PHONGO_BSON_STATE_ZCHILD(data);
#if PHP_VERSION_ID >= 70000
zval zchild;
php_phongo_new_javascript_from_javascript_and_scope(1, &zchild, v_code, v_code_len, v_scope TSRMLS_CC);
if (((php_phongo_bson_state *)data)->is_visiting_array) {
add_next_index_zval(retval, &zchild);
} else {
ADD_ASSOC_ZVAL(retval, key, &zchild);
}
#else
zval *zchild = NULL;
TSRMLS_FETCH();
MAKE_STD_ZVAL(zchild);
php_phongo_new_javascript_from_javascript_and_scope(1, zchild, v_code, v_code_len, v_scope TSRMLS_CC);
if (((php_phongo_bson_state *)data)->is_visiting_array) {
add_next_index_zval(retval, zchild);
} else {
ADD_ASSOC_ZVAL(retval, key, zchild);
}
#endif
return false;
} /* }}} */
static bool php_phongo_bson_visit_int32(const bson_iter_t *iter ARG_UNUSED, const char *key, int32_t v_int32, void *data) /* {{{ */
{
zval *retval = PHONGO_BSON_STATE_ZCHILD(data);
if (((php_phongo_bson_state *)data)->is_visiting_array) {
add_next_index_long(retval, v_int32);
} else {
add_assoc_long(retval, key, v_int32);
}
return false;
} /* }}} */
static bool php_phongo_bson_visit_timestamp(const bson_iter_t *iter ARG_UNUSED, const char *key, uint32_t v_timestamp, uint32_t v_increment, void *data) /* {{{ */
{
zval *retval = PHONGO_BSON_STATE_ZCHILD(data);
#if PHP_VERSION_ID >= 70000
zval zchild;
php_phongo_new_timestamp_from_increment_and_timestamp(&zchild, v_increment, v_timestamp TSRMLS_CC);
if (((php_phongo_bson_state *)data)->is_visiting_array) {
add_next_index_zval(retval, &zchild);
} else {
ADD_ASSOC_ZVAL(retval, key, &zchild);
}
#else
zval *zchild = NULL;
TSRMLS_FETCH();
MAKE_STD_ZVAL(zchild);
php_phongo_new_timestamp_from_increment_and_timestamp(zchild, v_increment, v_timestamp TSRMLS_CC);
if (((php_phongo_bson_state *)data)->is_visiting_array) {
add_next_index_zval(retval, zchild);
} else {
ADD_ASSOC_ZVAL(retval, key, zchild);
}
#endif
return false;
} /* }}} */
static bool php_phongo_bson_visit_int64(const bson_iter_t *iter ARG_UNUSED, const char *key, int64_t v_int64, void *data) /* {{{ */
{
zval *retval = PHONGO_BSON_STATE_ZCHILD(data);
#if SIZEOF_PHONGO_LONG == 4
TSRMLS_FETCH();
#endif
if (((php_phongo_bson_state *)data)->is_visiting_array) {
ADD_NEXT_INDEX_INT64(retval, v_int64);
} else {
ADD_ASSOC_INT64(retval, key, v_int64);
}
return false;
} /* }}} */
static bool php_phongo_bson_visit_maxkey(const bson_iter_t *iter ARG_UNUSED, const char *key, void *data) /* {{{ */
{
zval *retval = PHONGO_BSON_STATE_ZCHILD(data);
#if PHP_VERSION_ID >= 70000
zval zchild;
object_init_ex(&zchild, php_phongo_maxkey_ce);
if (((php_phongo_bson_state *)data)->is_visiting_array) {
add_next_index_zval(retval, &zchild);
} else {
ADD_ASSOC_ZVAL(retval, key, &zchild);
}
#else
zval *zchild = NULL;
TSRMLS_FETCH();
MAKE_STD_ZVAL(zchild);
object_init_ex(zchild, php_phongo_maxkey_ce);
if (((php_phongo_bson_state *)data)->is_visiting_array) {
add_next_index_zval(retval, zchild);
} else {
ADD_ASSOC_ZVAL(retval, key, zchild);
}
#endif
return false;
} /* }}} */
static bool php_phongo_bson_visit_minkey(const bson_iter_t *iter ARG_UNUSED, const char *key, void *data) /* {{{ */
{
zval *retval = PHONGO_BSON_STATE_ZCHILD(data);
#if PHP_VERSION_ID >= 70000
zval zchild;
object_init_ex(&zchild, php_phongo_minkey_ce);
if (((php_phongo_bson_state *)data)->is_visiting_array) {
add_next_index_zval(retval, &zchild);
} else {
ADD_ASSOC_ZVAL(retval, key, &zchild);
}
#else
zval *zchild = NULL;
TSRMLS_FETCH();
MAKE_STD_ZVAL(zchild);
object_init_ex(zchild, php_phongo_minkey_ce);
if (((php_phongo_bson_state *)data)->is_visiting_array) {
add_next_index_zval(retval, zchild);
} else {
ADD_ASSOC_ZVAL(retval, key, zchild);
}
#endif
return false;
} /* }}} */
static const bson_visitor_t php_bson_visitors = {
NULL /* php_phongo_bson_visit_before*/,
NULL /*php_phongo_bson_visit_after*/,
php_phongo_bson_visit_corrupt,
php_phongo_bson_visit_double,
php_phongo_bson_visit_utf8,
php_phongo_bson_visit_document,
php_phongo_bson_visit_array,
php_phongo_bson_visit_binary,
php_phongo_bson_visit_undefined,
php_phongo_bson_visit_oid,
php_phongo_bson_visit_bool,
php_phongo_bson_visit_date_time,
php_phongo_bson_visit_null,
php_phongo_bson_visit_regex,
php_phongo_bson_visit_dbpointer,
php_phongo_bson_visit_code,
php_phongo_bson_visit_symbol,
php_phongo_bson_visit_codewscope,
php_phongo_bson_visit_int32,
php_phongo_bson_visit_timestamp,
php_phongo_bson_visit_int64,
php_phongo_bson_visit_maxkey,
php_phongo_bson_visit_minkey,
php_phongo_bson_visit_unsupported_type,
php_phongo_bson_visit_decimal128,
{ NULL }
};
static bool php_phongo_bson_visit_document(const bson_iter_t *iter ARG_UNUSED, const char *key, const bson_t *v_document, void *data) /* {{{ */
{
zval *retval = PHONGO_BSON_STATE_ZCHILD(data);
bson_iter_t child;
TSRMLS_FETCH();
if (bson_iter_init(&child, v_document)) {
php_phongo_bson_state state = PHONGO_BSON_STATE_INITIALIZER;
state.map = ((php_phongo_bson_state *)data)->map;
#if PHP_VERSION_ID >= 70000
array_init(&state.zchild);
#else
MAKE_STD_ZVAL(state.zchild);
array_init(state.zchild);
#endif
if (!bson_iter_visit_all(&child, &php_bson_visitors, &state) && !child.err_off) {
/* If php_phongo_bson_visit_binary() finds an ODM class, it should
* supersede a default type map and named document class. */
if (state.odm && state.map.document_type == PHONGO_TYPEMAP_NONE) {
state.map.document_type = PHONGO_TYPEMAP_CLASS;
}
switch(state.map.document_type) {
case PHONGO_TYPEMAP_NATIVE_ARRAY:
#if PHP_VERSION_ID >= 70000
if (((php_phongo_bson_state *)data)->is_visiting_array) {
add_next_index_zval(retval, &state.zchild);
} else {
ADD_ASSOC_ZVAL(retval, key, &state.zchild);
}
#else
if (((php_phongo_bson_state *)data)->is_visiting_array) {
add_next_index_zval(retval, state.zchild);
} else {
ADD_ASSOC_ZVAL(retval, key, state.zchild);
}
#endif
break;
case PHONGO_TYPEMAP_CLASS: {
#if PHP_VERSION_ID >= 70000
zval obj;
object_init_ex(&obj, state.odm ? state.odm : state.map.document);
zend_call_method_with_1_params(&obj, NULL, NULL, BSON_UNSERIALIZE_FUNC_NAME, NULL, &state.zchild);
if (((php_phongo_bson_state *)data)->is_visiting_array) {
add_next_index_zval(retval, &obj);
} else {
ADD_ASSOC_ZVAL(retval, key, &obj);
}
zval_ptr_dtor(&state.zchild);
#else
zval *obj = NULL;
MAKE_STD_ZVAL(obj);
object_init_ex(obj, state.odm ? state.odm : state.map.document);
zend_call_method_with_1_params(&obj, NULL, NULL, BSON_UNSERIALIZE_FUNC_NAME, NULL, state.zchild);
if (((php_phongo_bson_state *)data)->is_visiting_array) {
add_next_index_zval(retval, obj);
} else {
ADD_ASSOC_ZVAL(retval, key, obj);
}
zval_ptr_dtor(&state.zchild);
#endif
break;
}
case PHONGO_TYPEMAP_NATIVE_OBJECT:
default:
#if PHP_VERSION_ID >= 70000
convert_to_object(&state.zchild);
if (((php_phongo_bson_state *)data)->is_visiting_array) {
add_next_index_zval(retval, &state.zchild);
} else {
ADD_ASSOC_ZVAL(retval, key, &state.zchild);
}
#else
convert_to_object(state.zchild);
if (((php_phongo_bson_state *)data)->is_visiting_array) {
add_next_index_zval(retval, state.zchild);
} else {
ADD_ASSOC_ZVAL(retval, key, state.zchild);
}
#endif
}
} else {
/* Iteration stopped prematurely due to corruption or a failed
* visitor. Free state.zchild, which we just initialized, and return
* true to stop iteration for our parent context. */
zval_ptr_dtor(&state.zchild);
return true;
}
}
return false;
} /* }}} */
static bool php_phongo_bson_visit_array(const bson_iter_t *iter ARG_UNUSED, const char *key, const bson_t *v_array, void *data) /* {{{ */
{
zval *retval = PHONGO_BSON_STATE_ZCHILD(data);
bson_iter_t child;
TSRMLS_FETCH();
if (bson_iter_init(&child, v_array)) {
php_phongo_bson_state state = PHONGO_BSON_STATE_INITIALIZER;
state.map = ((php_phongo_bson_state *)data)->map;
/* Note that we are visiting an array, so element visitors know to use
* add_next_index() (i.e. disregard BSON keys) instead of add_assoc()
* when building the PHP array.
*/
state.is_visiting_array = true;
#if PHP_VERSION_ID >= 70000
array_init(&state.zchild);
#else
MAKE_STD_ZVAL(state.zchild);
array_init(state.zchild);
#endif
if (!bson_iter_visit_all(&child, &php_bson_visitors, &state) && !child.err_off) {
switch(state.map.array_type) {
case PHONGO_TYPEMAP_CLASS: {
#if PHP_VERSION_ID >= 70000
zval obj;
object_init_ex(&obj, state.map.array);
zend_call_method_with_1_params(&obj, NULL, NULL, BSON_UNSERIALIZE_FUNC_NAME, NULL, &state.zchild);
if (((php_phongo_bson_state *)data)->is_visiting_array) {
add_next_index_zval(retval, &obj);
} else {
ADD_ASSOC_ZVAL(retval, key, &obj);
}
zval_ptr_dtor(&state.zchild);
#else
zval *obj = NULL;
MAKE_STD_ZVAL(obj);
object_init_ex(obj, state.map.array);
zend_call_method_with_1_params(&obj, NULL, NULL, BSON_UNSERIALIZE_FUNC_NAME, NULL, state.zchild);
if (((php_phongo_bson_state *)data)->is_visiting_array) {
add_next_index_zval(retval, obj);
} else {
ADD_ASSOC_ZVAL(retval, key, obj);
}
zval_ptr_dtor(&state.zchild);
#endif
break;
}
case PHONGO_TYPEMAP_NATIVE_OBJECT:
#if PHP_VERSION_ID >= 70000
convert_to_object(&state.zchild);
if (((php_phongo_bson_state *)data)->is_visiting_array) {
add_next_index_zval(retval, &state.zchild);
} else {
ADD_ASSOC_ZVAL(retval, key, &state.zchild);
}
#else
convert_to_object(state.zchild);
if (((php_phongo_bson_state *)data)->is_visiting_array) {
add_next_index_zval(retval, state.zchild);
} else {
ADD_ASSOC_ZVAL(retval, key, state.zchild);
}
#endif
break;
case PHONGO_TYPEMAP_NATIVE_ARRAY:
default:
#if PHP_VERSION_ID >= 70000
if (((php_phongo_bson_state *)data)->is_visiting_array) {
add_next_index_zval(retval, &state.zchild);
} else {
ADD_ASSOC_ZVAL(retval, key, &state.zchild);
}
#else
if (((php_phongo_bson_state *)data)->is_visiting_array) {
add_next_index_zval(retval, state.zchild);
} else {
ADD_ASSOC_ZVAL(retval, key, state.zchild);
}
#endif
break;
}
} else {
/* Iteration stopped prematurely due to corruption or a failed
* visitor. Free state.zchild, which we just initialized, and return
* true to stop iteration for our parent context. */
zval_ptr_dtor(&state.zchild);
return true;
}
}
return false;
} /* }}} */
/* Converts a BSON document to a PHP value using the default typemap. */
#if PHP_VERSION_ID >= 70000
bool php_phongo_bson_to_zval(const unsigned char *data, int data_len, zval *zv) /* {{{ */
#else
bool php_phongo_bson_to_zval(const unsigned char *data, int data_len, zval **zv)
#endif
{
bool retval;
php_phongo_bson_state state = PHONGO_BSON_STATE_INITIALIZER;
retval = php_phongo_bson_to_zval_ex(data, data_len, &state);
#if PHP_VERSION_ID >= 70000
ZVAL_ZVAL(zv, &state.zchild, 1, 1);
#else
*zv = state.zchild;
#endif
return retval;
} /* }}} */
/* Converts a BSON document to a PHP value according to the typemap specified in
* the state argument.
*
* On success, the result will be set on the state argument and true will be
* returned. On error, an exception will have been thrown and false will be
* returned.
*
* Note: the result zval in the state argument will always be initialized for
* PHP 5.x so that the caller may always zval_ptr_dtor() it. The zval is left
* as-is on PHP 7; however, it should have the type undefined if the state
* was initialized to zero.
*/
bool php_phongo_bson_to_zval_ex(const unsigned char *data, int data_len, php_phongo_bson_state *state) /* {{{ */
{
bson_reader_t *reader = NULL;
bson_iter_t iter;
const bson_t *b;
bool eof = false;
bool retval = false;
TSRMLS_FETCH();
#if PHP_VERSION_ID < 70000
MAKE_STD_ZVAL(state->zchild);
/* Ensure that state->zchild has a type, since the calling code may want to
* zval_ptr_dtor() it if we throw an exception. */
ZVAL_NULL(state->zchild);
#endif
reader = bson_reader_new_from_data(data, data_len);
if (!(b = bson_reader_read(reader, NULL))) {
phongo_throw_exception(PHONGO_ERROR_UNEXPECTED_VALUE TSRMLS_CC, "Could not read document from BSON reader");
goto cleanup;
}
if (!bson_iter_init(&iter, b)) {
phongo_throw_exception(PHONGO_ERROR_UNEXPECTED_VALUE TSRMLS_CC, "Could not initialize BSON iterator");
goto cleanup;
}
/* We initialize an array because it will either be returned as-is (native
* array in type map), passed to bsonUnserialize() (ODM class), or used to
* initialize a stdClass object (native object in type map). */
#if PHP_VERSION_ID >= 70000
array_init(&state->zchild);
#else
array_init(state->zchild);
#endif
if (bson_iter_visit_all(&iter, &php_bson_visitors, state) || iter.err_off) {
/* Iteration stopped prematurely due to corruption or a failed visitor.
* While we free the reader, state->zchild should be left as-is, since
* the calling code may want to zval_ptr_dtor() it. If an exception has
* been thrown already (due to an unsupported BSON type for example,
* don't overwrite with a generic exception message. */
if (!EG(exception)) {
phongo_throw_exception(PHONGO_ERROR_UNEXPECTED_VALUE TSRMLS_CC, "Detected corrupt BSON data");
}
goto cleanup;
}
/* If php_phongo_bson_visit_binary() finds an ODM class, it should supersede
* a default type map and named root class. */
if (state->odm && state->map.root_type == PHONGO_TYPEMAP_NONE) {
state->map.root_type = PHONGO_TYPEMAP_CLASS;
}
switch (state->map.root_type) {
case PHONGO_TYPEMAP_NATIVE_ARRAY:
/* Nothing to do here */
break;
case PHONGO_TYPEMAP_CLASS: {
#if PHP_VERSION_ID >= 70000
zval obj;
object_init_ex(&obj, state->odm ? state->odm : state->map.root);
zend_call_method_with_1_params(&obj, NULL, NULL, BSON_UNSERIALIZE_FUNC_NAME, NULL, &state->zchild);
zval_ptr_dtor(&state->zchild);
ZVAL_COPY_VALUE(&state->zchild, &obj);
#else
zval *obj = NULL;
MAKE_STD_ZVAL(obj);
object_init_ex(obj, state->odm ? state->odm : state->map.root);
zend_call_method_with_1_params(&obj, NULL, NULL, BSON_UNSERIALIZE_FUNC_NAME, NULL, state->zchild);
zval_ptr_dtor(&state->zchild);
state->zchild = obj;
#endif
break;
}
case PHONGO_TYPEMAP_NATIVE_OBJECT:
default:
#if PHP_VERSION_ID >= 70000
convert_to_object(&state->zchild);
#else
convert_to_object(state->zchild);
#endif
}
if (bson_reader_read(reader, &eof) || !eof) {
phongo_throw_exception(PHONGO_ERROR_UNEXPECTED_VALUE TSRMLS_CC, "Reading document did not exhaust input buffer");
goto cleanup;
}
retval = true;
cleanup:
if (reader) {
bson_reader_destroy(reader);
}
return retval;
} /* }}} */
/* Fetches a zend_class_entry for the given class name and checks that it is
* also instantiatable and implements a specified interface. Returns the class
* on success; otherwise, NULL is returned and an exception is thrown. */
static zend_class_entry *php_phongo_bson_state_fetch_class(const char *classname, int classname_len, zend_class_entry *interface_ce TSRMLS_DC) /* {{{ */
{
#if PHP_VERSION_ID >= 70000
zend_string *zs_classname = zend_string_init(classname, classname_len, 0);
zend_class_entry *found_ce = zend_fetch_class(zs_classname, ZEND_FETCH_CLASS_AUTO|ZEND_FETCH_CLASS_SILENT TSRMLS_CC);
zend_string_release(zs_classname);
#else
zend_class_entry *found_ce = zend_fetch_class(classname, classname_len, ZEND_FETCH_CLASS_AUTO|ZEND_FETCH_CLASS_SILENT TSRMLS_CC);
#endif
if (!found_ce) {
phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Class %s does not exist", classname);
} else if (!PHONGO_IS_CLASS_INSTANTIATABLE(found_ce)) {
phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Class %s is not instantiatable", classname);
} else if (!instanceof_function(found_ce, interface_ce TSRMLS_CC)) {
phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Class %s does not implement %s", classname, ZSTR_VAL(interface_ce->name));
} else {
return found_ce;
}
return NULL;
} /* }}} */
/* Parses a BSON type (i.e. array, document, or root). On success, the type and
* type_ce output arguments will be assigned and true will be returned;
* otherwise, false is returned and an exception is thrown. */
static bool php_phongo_bson_state_parse_type(zval *options, const char *name, php_phongo_bson_typemap_types *type, zend_class_entry **type_ce TSRMLS_DC) /* {{{ */
{
char *classname;
int classname_len;
zend_bool classname_free = 0;
bool retval = true;
classname = php_array_fetch_string(options, name, &classname_len, &classname_free);
if (!classname_len) {
goto cleanup;
}
if (!strcasecmp(classname, "array")) {
*type = PHONGO_TYPEMAP_NATIVE_ARRAY;
*type_ce = NULL;
} else if (!strcasecmp(classname, "stdclass") || !strcasecmp(classname, "object")) {
*type = PHONGO_TYPEMAP_NATIVE_OBJECT;
*type_ce = NULL;
} else {
if ((*type_ce = php_phongo_bson_state_fetch_class(classname, classname_len, php_phongo_unserializable_ce TSRMLS_CC))) {
*type = PHONGO_TYPEMAP_CLASS;
} else {
retval = false;
}
}
cleanup:
if (classname_free) {
str_efree(classname);
}
return retval;
} /* }}} */
/* Applies the array argument to a typemap struct. Returns true on success;
* otherwise, false is returned an an exception is thrown. */
bool php_phongo_bson_typemap_to_state(zval *typemap, php_phongo_bson_typemap *map TSRMLS_DC) /* {{{ */
{
if (!typemap) {
return true;
}
if (!php_phongo_bson_state_parse_type(typemap, "array", &map->array_type, &map->array TSRMLS_CC) ||
!php_phongo_bson_state_parse_type(typemap, "document", &map->document_type, &map->document TSRMLS_CC) ||
!php_phongo_bson_state_parse_type(typemap, "root", &map->root_type, &map->root TSRMLS_CC)) {
/* Exception should already have been thrown */
return false;
}
return true;
} /* }}} */
/*
* Local variables:
* tab-width: 4
* c-basic-offset: 4
* End:
* vim600: noet sw=4 ts=4 fdm=marker
* vim<600: noet sw=4 ts=4
*/
diff --git a/mongodb-1.3.4/src/contrib/php_array_api.h b/mongodb-1.4.2/src/contrib/php_array_api.h
similarity index 100%
rename from mongodb-1.3.4/src/contrib/php_array_api.h
rename to mongodb-1.4.2/src/contrib/php_array_api.h
diff --git a/mongodb-1.4.2/src/libbson/VERSION_CURRENT b/mongodb-1.4.2/src/libbson/VERSION_CURRENT
new file mode 100644
index 00000000..77fee73a
--- /dev/null
+++ b/mongodb-1.4.2/src/libbson/VERSION_CURRENT
@@ -0,0 +1 @@
+1.9.3
diff --git a/mongodb-1.4.2/src/libbson/VERSION_RELEASED b/mongodb-1.4.2/src/libbson/VERSION_RELEASED
new file mode 100644
index 00000000..77fee73a
--- /dev/null
+++ b/mongodb-1.4.2/src/libbson/VERSION_RELEASED
@@ -0,0 +1 @@
+1.9.3
diff --git a/mongodb-1.3.4/src/libbson/build/autotools/CheckAtomics.m4 b/mongodb-1.4.2/src/libbson/build/autotools/CheckAtomics.m4
similarity index 100%
rename from mongodb-1.3.4/src/libbson/build/autotools/CheckAtomics.m4
rename to mongodb-1.4.2/src/libbson/build/autotools/CheckAtomics.m4
diff --git a/mongodb-1.3.4/src/libbson/build/autotools/CheckCompiler.m4 b/mongodb-1.4.2/src/libbson/build/autotools/CheckCompiler.m4
similarity index 100%
rename from mongodb-1.3.4/src/libbson/build/autotools/CheckCompiler.m4
rename to mongodb-1.4.2/src/libbson/build/autotools/CheckCompiler.m4
diff --git a/mongodb-1.3.4/src/libbson/build/autotools/CheckHeaders.m4 b/mongodb-1.4.2/src/libbson/build/autotools/CheckHeaders.m4
similarity index 100%
rename from mongodb-1.3.4/src/libbson/build/autotools/CheckHeaders.m4
rename to mongodb-1.4.2/src/libbson/build/autotools/CheckHeaders.m4
diff --git a/mongodb-1.3.4/src/libbson/build/autotools/CheckHost.m4 b/mongodb-1.4.2/src/libbson/build/autotools/CheckHost.m4
similarity index 100%
rename from mongodb-1.3.4/src/libbson/build/autotools/CheckHost.m4
rename to mongodb-1.4.2/src/libbson/build/autotools/CheckHost.m4
diff --git a/mongodb-1.3.4/src/libbson/build/autotools/CheckProgs.m4 b/mongodb-1.4.2/src/libbson/build/autotools/CheckProgs.m4
similarity index 100%
rename from mongodb-1.3.4/src/libbson/build/autotools/CheckProgs.m4
rename to mongodb-1.4.2/src/libbson/build/autotools/CheckProgs.m4
diff --git a/mongodb-1.3.4/src/libbson/build/autotools/CheckTarget.m4 b/mongodb-1.4.2/src/libbson/build/autotools/CheckTarget.m4
similarity index 100%
rename from mongodb-1.3.4/src/libbson/build/autotools/CheckTarget.m4
rename to mongodb-1.4.2/src/libbson/build/autotools/CheckTarget.m4
diff --git a/mongodb-1.3.4/src/libbson/build/autotools/Coverage.m4 b/mongodb-1.4.2/src/libbson/build/autotools/Coverage.m4
similarity index 100%
rename from mongodb-1.3.4/src/libbson/build/autotools/Coverage.m4
rename to mongodb-1.4.2/src/libbson/build/autotools/Coverage.m4
diff --git a/mongodb-1.3.4/src/libbson/build/autotools/Endian.m4 b/mongodb-1.4.2/src/libbson/build/autotools/Endian.m4
similarity index 100%
rename from mongodb-1.3.4/src/libbson/build/autotools/Endian.m4
rename to mongodb-1.4.2/src/libbson/build/autotools/Endian.m4
diff --git a/mongodb-1.3.4/src/libbson/build/autotools/FindDependencies.m4 b/mongodb-1.4.2/src/libbson/build/autotools/FindDependencies.m4
similarity index 96%
rename from mongodb-1.3.4/src/libbson/build/autotools/FindDependencies.m4
rename to mongodb-1.4.2/src/libbson/build/autotools/FindDependencies.m4
index 11ae218a..468f6818 100644
--- a/mongodb-1.3.4/src/libbson/build/autotools/FindDependencies.m4
+++ b/mongodb-1.4.2/src/libbson/build/autotools/FindDependencies.m4
@@ -1,112 +1,116 @@
# Check for strnlen()
dnl AC_CHECK_FUNC isn't properly respecting _XOPEN_SOURCE for strnlen for unknown reason
AC_SUBST(BSON_HAVE_STRNLEN, 0)
AC_CACHE_CHECK([for strnlen],
bson_cv_have_strnlen,
[AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
#include <string.h>
int strnlen () { return 0; }
]])],
[bson_cv_have_strnlen=no],
[bson_cv_have_strnlen=yes])])
if test "$bson_cv_have_strnlen" = yes; then
AC_SUBST(BSON_HAVE_STRNLEN, 1)
fi
# Check for reallocf() (BSD/Darwin)
AC_SUBST(BSON_HAVE_REALLOCF, 0)
AC_CACHE_CHECK([for reallocf],
bson_cv_have_reallocf,
[AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
#include <stdlib.h>
int reallocf () { return 0; }
]])],
[bson_cv_have_reallocf=no],
[bson_cv_have_reallocf=yes])])
if test "$bson_cv_have_reallocf" = yes; then
AC_SUBST(BSON_HAVE_REALLOCF, 1)
fi
# Check for syscall()
AC_SUBST(BSON_HAVE_SYSCALL_TID, 0)
AC_CACHE_CHECK([for syscall],
bson_cv_have_syscall_tid,
[AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
#include <unistd.h>
#include <sys/syscall.h>
int syscall () { return 0; }
]])],
[bson_cv_have_syscall_tid=no],
[bson_cv_have_syscall_tid=yes])])
if test "$bson_cv_have_syscall_tid" = yes -a "$os_darwin" != "yes"; then
AC_CACHE_CHECK([for SYS_gettid],
bson_cv_have_sys_gettid_tid,
[AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
#include <unistd.h>
#include <sys/syscall.h>
int gettid () { return SYS_gettid; }
]])],
[bson_cv_have_sys_gettid_tid=yes],
[bson_cv_have_sys_gettid_tid=no])])
if test "$bson_cv_have_sys_gettid_tid" = yes; then
AC_SUBST(BSON_HAVE_SYSCALL_TID, 1)
fi
fi
# Check for snprintf()
AC_SUBST(BSON_HAVE_SNPRINTF, 0)
AC_CHECK_FUNC(snprintf, [AC_SUBST(BSON_HAVE_SNPRINTF, 1)])
# Check for struct timespec
AC_SUBST(BSON_HAVE_TIMESPEC, 0)
AC_CHECK_TYPE([struct timespec], [AC_SUBST(BSON_HAVE_TIMESPEC, 1)], [], [#include <time.h>])
# Check for clock_gettime and if it needs -lrt
AC_SUBST(BSON_HAVE_CLOCK_GETTIME, 0)
AC_SEARCH_LIBS([clock_gettime], [rt], [AC_SUBST(BSON_HAVE_CLOCK_GETTIME, 1)])
# Check if math functions need -lm
AC_SEARCH_LIBS([floor], [m])
# Check for gmtime_r()
AC_SUBST(BSON_HAVE_GMTIME_R, 0)
AC_CHECK_FUNC(gmtime_r, [AC_SUBST(BSON_HAVE_GMTIME_R, 1)])
+# Check for rand_r()
+AC_SUBST(BSON_HAVE_RAND_R, 0)
+AC_CHECK_FUNC(rand_r, [AC_SUBST(BSON_HAVE_RAND_R, 1)], [], [#include <stdlib.h>])
+
# Check for pthreads. We might need to make this better to handle mingw,
# but I actually think it is okay to just check for it even though we will
# use win32 primatives.
AX_PTHREAD([],
[AC_MSG_ERROR([libbson requires pthreads on non-Windows platforms.])])
# The following is borrowed from the guile configure script.
#
# On past versions of Solaris, believe 8 through 10 at least, you
# had to write "pthread_once_t foo = { PTHREAD_ONCE_INIT };".
# This is contrary to POSIX:
# http://www.opengroup.org/onlinepubs/000095399/functions/pthread_once.html
# Check here if this style is required.
#
# glibc (2.3.6 at least) works both with or without braces, so the
# test checks whether it works without.
#
AC_SUBST(BSON_PTHREAD_ONCE_INIT_NEEDS_BRACES, 0)
AC_CACHE_CHECK([whether PTHREAD_ONCE_INIT needs braces],
bson_cv_need_braces_on_pthread_once_init,
[AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include <pthread.h>
pthread_once_t foo = PTHREAD_ONCE_INIT;]])],
[bson_cv_need_braces_on_pthread_once_init=no],
[bson_cv_need_braces_on_pthread_once_init=yes])])
if test "$bson_cv_need_braces_on_pthread_once_init" = yes; then
AC_SUBST(BSON_PTHREAD_ONCE_INIT_NEEDS_BRACES, 1)
fi
# Solaris needs to link against socket libs.
# This is only used in our streaming bson examples
if test "$os_solaris" = "yes"; then
SOCKET_CFLAGS="$CFLAGS -D__EXTENSIONS__"
SOCKET_CFLAGS="$CFLAGS -D_XOPEN_SOURCE=1"
SOCKET_CFLAGS="$CFLAGS -D_XOPEN_SOURCE_EXTENDED=1"
SOCKET_LDFLAGS="$LDFLAGS -lsocket -lnsl"
AC_SUBST(SOCKET_CFLAGS)
AC_SUBST(SOCKET_LDFLAGS)
fi
diff --git a/mongodb-1.3.4/src/libbson/build/autotools/MaintainerFlags.m4 b/mongodb-1.4.2/src/libbson/build/autotools/MaintainerFlags.m4
similarity index 100%
rename from mongodb-1.3.4/src/libbson/build/autotools/MaintainerFlags.m4
rename to mongodb-1.4.2/src/libbson/build/autotools/MaintainerFlags.m4
diff --git a/mongodb-1.3.4/src/libbson/build/autotools/Optimizations.m4 b/mongodb-1.4.2/src/libbson/build/autotools/Optimizations.m4
similarity index 100%
rename from mongodb-1.3.4/src/libbson/build/autotools/Optimizations.m4
rename to mongodb-1.4.2/src/libbson/build/autotools/Optimizations.m4
diff --git a/mongodb-1.3.4/src/libbson/build/autotools/PrintBuildConfiguration.m4 b/mongodb-1.4.2/src/libbson/build/autotools/PrintBuildConfiguration.m4
similarity index 100%
rename from mongodb-1.3.4/src/libbson/build/autotools/PrintBuildConfiguration.m4
rename to mongodb-1.4.2/src/libbson/build/autotools/PrintBuildConfiguration.m4
diff --git a/mongodb-1.3.4/src/libbson/build/autotools/ReadCommandLineArguments.m4 b/mongodb-1.4.2/src/libbson/build/autotools/ReadCommandLineArguments.m4
similarity index 100%
rename from mongodb-1.3.4/src/libbson/build/autotools/ReadCommandLineArguments.m4
rename to mongodb-1.4.2/src/libbson/build/autotools/ReadCommandLineArguments.m4
diff --git a/mongodb-1.3.4/src/libbson/build/autotools/SetupAutomake.m4 b/mongodb-1.4.2/src/libbson/build/autotools/SetupAutomake.m4
similarity index 100%
rename from mongodb-1.3.4/src/libbson/build/autotools/SetupAutomake.m4
rename to mongodb-1.4.2/src/libbson/build/autotools/SetupAutomake.m4
diff --git a/mongodb-1.3.4/src/libbson/build/autotools/SetupLibtool.m4 b/mongodb-1.4.2/src/libbson/build/autotools/SetupLibtool.m4
similarity index 100%
rename from mongodb-1.3.4/src/libbson/build/autotools/SetupLibtool.m4
rename to mongodb-1.4.2/src/libbson/build/autotools/SetupLibtool.m4
diff --git a/mongodb-1.3.4/src/libbson/build/autotools/Versions.m4 b/mongodb-1.4.2/src/libbson/build/autotools/Versions.m4
similarity index 100%
rename from mongodb-1.3.4/src/libbson/build/autotools/Versions.m4
rename to mongodb-1.4.2/src/libbson/build/autotools/Versions.m4
diff --git a/mongodb-1.3.4/src/libbson/build/autotools/m4/ac_check_typedef.m4 b/mongodb-1.4.2/src/libbson/build/autotools/m4/ac_check_typedef.m4
similarity index 100%
rename from mongodb-1.3.4/src/libbson/build/autotools/m4/ac_check_typedef.m4
rename to mongodb-1.4.2/src/libbson/build/autotools/m4/ac_check_typedef.m4
diff --git a/mongodb-1.3.4/src/libmongoc/build/autotools/m4/ac_compile_check_sizeof.m4 b/mongodb-1.4.2/src/libbson/build/autotools/m4/ac_compile_check_sizeof.m4
similarity index 72%
rename from mongodb-1.3.4/src/libmongoc/build/autotools/m4/ac_compile_check_sizeof.m4
rename to mongodb-1.4.2/src/libbson/build/autotools/m4/ac_compile_check_sizeof.m4
index 7ba9b480..afecad77 100644
--- a/mongodb-1.3.4/src/libmongoc/build/autotools/m4/ac_compile_check_sizeof.m4
+++ b/mongodb-1.4.2/src/libbson/build/autotools/m4/ac_compile_check_sizeof.m4
@@ -1,25 +1,24 @@
AC_DEFUN([AC_COMPILE_CHECK_SIZEOF],
-[changequote(&lt;&lt;, &gt;&gt;)dnl
+[
dnl The name to #define.
-define(&lt;&lt;AC_TYPE_NAME&gt;&gt;, translit(sizeof_$1, [a-z *], [A-Z_P]))dnl
+define(AC_TYPE_NAME, translit(sizeof_$1, [a-z *], [A-Z_P]))dnl
dnl The cache variable name.
-define(&lt;&lt;AC_CV_NAME&gt;&gt;, translit(ac_cv_sizeof_$1, [ *], [_p]))dnl
-changequote([, ])dnl
+define(AC_CV_NAME, translit(ac_cv_sizeof_$1, [ *], [_p]))dnl
AC_MSG_CHECKING(size of $1)
AC_CACHE_VAL(AC_CV_NAME,
[for ac_size in 4 8 1 2 16 $2 ; do # List sizes in rough order of prevalence.
AC_TRY_COMPILE([#include "confdefs.h"
-#include &lt;sys/types.h&gt;
+#include <sys/types.h>
$2
], [switch (0) case 0: case (sizeof ($1) == $ac_size):;], AC_CV_NAME=$ac_size)
if test x$AC_CV_NAME != x ; then break; fi
done
])
if test x$AC_CV_NAME = x ; then
AC_MSG_ERROR([cannot determine a size for $1])
fi
AC_MSG_RESULT($AC_CV_NAME)
AC_DEFINE_UNQUOTED(AC_TYPE_NAME, $AC_CV_NAME, [The number of bytes in type $1])
undefine([AC_TYPE_NAME])dnl
undefine([AC_CV_NAME])dnl
])
diff --git a/mongodb-1.3.4/src/libbson/build/autotools/m4/ac_create_stdint_h.m4 b/mongodb-1.4.2/src/libbson/build/autotools/m4/ac_create_stdint_h.m4
similarity index 100%
rename from mongodb-1.3.4/src/libbson/build/autotools/m4/ac_create_stdint_h.m4
rename to mongodb-1.4.2/src/libbson/build/autotools/m4/ac_create_stdint_h.m4
diff --git a/mongodb-1.3.4/src/libbson/build/autotools/m4/as-compiler-flag.m4 b/mongodb-1.4.2/src/libbson/build/autotools/m4/as-compiler-flag.m4
similarity index 100%
rename from mongodb-1.3.4/src/libbson/build/autotools/m4/as-compiler-flag.m4
rename to mongodb-1.4.2/src/libbson/build/autotools/m4/as-compiler-flag.m4
diff --git a/mongodb-1.3.4/src/libbson/build/autotools/m4/ax_check_compile_flag.m4 b/mongodb-1.4.2/src/libbson/build/autotools/m4/ax_check_compile_flag.m4
similarity index 100%
rename from mongodb-1.3.4/src/libbson/build/autotools/m4/ax_check_compile_flag.m4
rename to mongodb-1.4.2/src/libbson/build/autotools/m4/ax_check_compile_flag.m4
diff --git a/mongodb-1.3.4/src/libbson/build/autotools/m4/ax_check_link_flag.m4 b/mongodb-1.4.2/src/libbson/build/autotools/m4/ax_check_link_flag.m4
similarity index 100%
rename from mongodb-1.3.4/src/libbson/build/autotools/m4/ax_check_link_flag.m4
rename to mongodb-1.4.2/src/libbson/build/autotools/m4/ax_check_link_flag.m4
diff --git a/mongodb-1.3.4/src/libbson/build/autotools/m4/ax_pthread.m4 b/mongodb-1.4.2/src/libbson/build/autotools/m4/ax_pthread.m4
similarity index 100%
rename from mongodb-1.3.4/src/libbson/build/autotools/m4/ax_pthread.m4
rename to mongodb-1.4.2/src/libbson/build/autotools/m4/ax_pthread.m4
diff --git a/mongodb-1.3.4/src/libmongoc/build/autotools/m4/pkg.m4 b/mongodb-1.4.2/src/libbson/build/autotools/m4/pkg.m4
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/build/autotools/m4/pkg.m4
rename to mongodb-1.4.2/src/libbson/build/autotools/m4/pkg.m4
diff --git a/mongodb-1.3.4/src/libbson/build/autotools/m4/silent.m4 b/mongodb-1.4.2/src/libbson/build/autotools/m4/silent.m4
similarity index 100%
rename from mongodb-1.3.4/src/libbson/build/autotools/m4/silent.m4
rename to mongodb-1.4.2/src/libbson/build/autotools/m4/silent.m4
diff --git a/mongodb-1.3.4/src/libbson/src/bson/b64_ntop.h b/mongodb-1.4.2/src/libbson/src/bson/b64_ntop.h
similarity index 100%
rename from mongodb-1.3.4/src/libbson/src/bson/b64_ntop.h
rename to mongodb-1.4.2/src/libbson/src/bson/b64_ntop.h
diff --git a/mongodb-1.3.4/src/libbson/src/bson/b64_pton.h b/mongodb-1.4.2/src/libbson/src/bson/b64_pton.h
similarity index 100%
rename from mongodb-1.3.4/src/libbson/src/bson/b64_pton.h
rename to mongodb-1.4.2/src/libbson/src/bson/b64_pton.h
diff --git a/mongodb-1.3.4/src/libbson/src/bson/bcon.c b/mongodb-1.4.2/src/libbson/src/bson/bcon.c
similarity index 100%
rename from mongodb-1.3.4/src/libbson/src/bson/bcon.c
rename to mongodb-1.4.2/src/libbson/src/bson/bcon.c
diff --git a/mongodb-1.3.4/src/libbson/src/bson/bcon.h b/mongodb-1.4.2/src/libbson/src/bson/bcon.h
similarity index 100%
rename from mongodb-1.3.4/src/libbson/src/bson/bcon.h
rename to mongodb-1.4.2/src/libbson/src/bson/bcon.h
diff --git a/mongodb-1.3.4/src/libbson/src/bson/bson-atomic.c b/mongodb-1.4.2/src/libbson/src/bson/bson-atomic.c
similarity index 100%
rename from mongodb-1.3.4/src/libbson/src/bson/bson-atomic.c
rename to mongodb-1.4.2/src/libbson/src/bson/bson-atomic.c
diff --git a/mongodb-1.3.4/src/libbson/src/bson/bson-atomic.h b/mongodb-1.4.2/src/libbson/src/bson/bson-atomic.h
similarity index 100%
rename from mongodb-1.3.4/src/libbson/src/bson/bson-atomic.h
rename to mongodb-1.4.2/src/libbson/src/bson/bson-atomic.h
diff --git a/mongodb-1.3.4/src/libbson/src/bson/bson-clock.c b/mongodb-1.4.2/src/libbson/src/bson/bson-clock.c
similarity index 100%
rename from mongodb-1.3.4/src/libbson/src/bson/bson-clock.c
rename to mongodb-1.4.2/src/libbson/src/bson/bson-clock.c
diff --git a/mongodb-1.3.4/src/libbson/src/bson/bson-clock.h b/mongodb-1.4.2/src/libbson/src/bson/bson-clock.h
similarity index 100%
rename from mongodb-1.3.4/src/libbson/src/bson/bson-clock.h
rename to mongodb-1.4.2/src/libbson/src/bson/bson-clock.h
diff --git a/mongodb-1.3.4/src/libbson/src/bson/bson-compat.h b/mongodb-1.4.2/src/libbson/src/bson/bson-compat.h
similarity index 96%
rename from mongodb-1.3.4/src/libbson/src/bson/bson-compat.h
rename to mongodb-1.4.2/src/libbson/src/bson/bson-compat.h
index ed730998..5ab89a20 100644
--- a/mongodb-1.3.4/src/libbson/src/bson/bson-compat.h
+++ b/mongodb-1.4.2/src/libbson/src/bson/bson-compat.h
@@ -1,170 +1,182 @@
/*
* Copyright 2013 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef BSON_COMPAT_H
#define BSON_COMPAT_H
#if !defined(BSON_INSIDE) && !defined(BSON_COMPILATION)
#error "Only <bson.h> can be included directly."
#endif
#if defined(__MINGW32__)
#if defined(__USE_MINGW_ANSI_STDIO)
#if __USE_MINGW_ANSI_STDIO < 1
#error "__USE_MINGW_ANSI_STDIO > 0 is required for correct PRI* macros"
#endif
#else
#define __USE_MINGW_ANSI_STDIO 1
#endif
#endif
#include "bson-config.h"
#include "bson-macros.h"
#ifdef BSON_OS_WIN32
#if defined(_WIN32_WINNT) && (_WIN32_WINNT < 0x0600)
#undef _WIN32_WINNT
#endif
#ifndef _WIN32_WINNT
#define _WIN32_WINNT 0x0600
#endif
#ifndef NOMINMAX
#define NOMINMAX
#endif
#include <winsock2.h>
#ifndef WIN32_LEAN_AND_MEAN
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
#undef WIN32_LEAN_AND_MEAN
#else
#include <windows.h>
#endif
#include <direct.h>
#include <io.h>
#endif
#ifdef BSON_OS_UNIX
#include <unistd.h>
#include <sys/time.h>
#endif
#include "bson-macros.h"
#include <errno.h>
#include <ctype.h>
#include <limits.h>
#include <fcntl.h>
#include <sys/stat.h>
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
BSON_BEGIN_DECLS
#ifdef _MSC_VER
#include <time.h>
#include "bson-stdint-win32.h"
#ifndef __cplusplus
/* benign redefinition of type */
#pragma warning(disable : 4142)
#ifndef _SSIZE_T_DEFINED
#define _SSIZE_T_DEFINED
typedef SSIZE_T ssize_t;
#endif
#ifndef _SIZE_T_DEFINED
#define _SIZE_T_DEFINED
typedef SIZE_T size_t;
#endif
#pragma warning(default : 4142)
#else
/*
* MSVC++ does not include ssize_t, just size_t.
* So we need to synthesize that as well.
*/
#pragma warning(disable : 4142)
#ifndef _SSIZE_T_DEFINED
#define _SSIZE_T_DEFINED
typedef SSIZE_T ssize_t;
#endif
#pragma warning(default : 4142)
#endif
+#ifndef PRIi32
#define PRIi32 "d"
+#endif
+#ifndef PRId32
#define PRId32 "d"
+#endif
+#ifndef PRIu32
#define PRIu32 "u"
+#endif
+#ifndef PRIi64
#define PRIi64 "I64i"
+#endif
+#ifndef PRId64
#define PRId64 "I64i"
+#endif
+#ifndef PRIu64
#define PRIu64 "I64u"
+#endif
#else
#include "bson-stdint.h"
#include <inttypes.h>
#endif
#if defined(__MINGW32__) && !defined(INIT_ONCE_STATIC_INIT)
#define INIT_ONCE_STATIC_INIT RTL_RUN_ONCE_INIT
typedef RTL_RUN_ONCE INIT_ONCE;
#endif
#ifdef BSON_HAVE_STDBOOL_H
#include <stdbool.h>
#elif !defined(__bool_true_false_are_defined)
#ifndef __cplusplus
typedef signed char bool;
#define false 0
#define true 1
#endif
#define __bool_true_false_are_defined 1
#endif
#if defined(__GNUC__)
#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)
#define bson_sync_synchronize() __sync_synchronize ()
#elif defined(__i386__) || defined(__i486__) || defined(__i586__) || \
defined(__i686__) || defined(__x86_64__)
#define bson_sync_synchronize() asm volatile("mfence" ::: "memory")
#else
#define bson_sync_synchronize() asm volatile("sync" ::: "memory")
#endif
#elif defined(_MSC_VER)
#define bson_sync_synchronize() MemoryBarrier ()
#endif
#if !defined(va_copy) && defined(__va_copy)
#define va_copy(dst, src) __va_copy (dst, src)
#endif
#if !defined(va_copy)
#define va_copy(dst, src) ((dst) = (src))
#endif
BSON_END_DECLS
#endif /* BSON_COMPAT_H */
diff --git a/mongodb-1.3.4/src/libbson/src/bson/bson-config.h b/mongodb-1.4.2/src/libbson/src/bson/bson-config.h
similarity index 96%
rename from mongodb-1.3.4/src/libbson/src/bson/bson-config.h
rename to mongodb-1.4.2/src/libbson/src/bson/bson-config.h
index 2f5fae0b..a5c1f9ed 100644
--- a/mongodb-1.3.4/src/libbson/src/bson/bson-config.h
+++ b/mongodb-1.4.2/src/libbson/src/bson/bson-config.h
@@ -1,127 +1,132 @@
#ifndef BSON_CONFIG_H
#define BSON_CONFIG_H
/*
* Define to 1234 for Little Endian, 4321 for Big Endian.
*/
#define BSON_BYTE_ORDER 1234
/*
* Define to 1 if you have stdbool.h
*/
#define BSON_HAVE_STDBOOL_H 1
#if BSON_HAVE_STDBOOL_H != 1
# undef BSON_HAVE_STDBOOL_H
#endif
/*
* Define to 1 for POSIX-like systems, 2 for Windows.
*/
#define BSON_OS 1
/*
* Define to 1 if we have access to GCC 32-bit atomic builtins.
* While this requires GCC 4.1+ in most cases, it is also architecture
* dependent. For example, some PPC or ARM systems may not have it even
* if it is a recent GCC version.
*/
#define BSON_HAVE_ATOMIC_32_ADD_AND_FETCH 1
#if BSON_HAVE_ATOMIC_32_ADD_AND_FETCH != 1
# undef BSON_HAVE_ATOMIC_32_ADD_AND_FETCH
#endif
/*
* Similarly, define to 1 if we have access to GCC 64-bit atomic builtins.
*/
#define BSON_HAVE_ATOMIC_64_ADD_AND_FETCH 1
#if BSON_HAVE_ATOMIC_64_ADD_AND_FETCH != 1
# undef BSON_HAVE_ATOMIC_64_ADD_AND_FETCH
#endif
/*
* Define to 1 if your system requires {} around PTHREAD_ONCE_INIT.
* This is typically just Solaris 8-10.
*/
#define BSON_PTHREAD_ONCE_INIT_NEEDS_BRACES 0
#if BSON_PTHREAD_ONCE_INIT_NEEDS_BRACES != 1
# undef BSON_PTHREAD_ONCE_INIT_NEEDS_BRACES
#endif
/*
* Define to 1 if you have clock_gettime() available.
*/
#define BSON_HAVE_CLOCK_GETTIME 1
#if BSON_HAVE_CLOCK_GETTIME != 1
# undef BSON_HAVE_CLOCK_GETTIME
#endif
/*
* Define to 1 if you have strnlen available on your platform.
*/
#define BSON_HAVE_STRNLEN 1
#if BSON_HAVE_STRNLEN != 1
# undef BSON_HAVE_STRNLEN
#endif
/*
* Define to 1 if you have snprintf available on your platform.
*/
#define BSON_HAVE_SNPRINTF 1
#if BSON_HAVE_SNPRINTF != 1
# undef BSON_HAVE_SNPRINTF
#endif
/*
* Define to 1 if you have gmtime_r available on your platform.
*/
#define BSON_HAVE_GMTIME_R 1
#if BSON_HAVE_GMTIME_R != 1
# undef BSON_HAVE_GMTIME_R
#endif
/*
* Define to 1 if you have reallocf available on your platform.
*/
#define BSON_HAVE_REALLOCF 0
#if BSON_HAVE_REALLOCF != 1
# undef BSON_HAVE_REALLOCF
#endif
/*
* Define to 1 if you have struct timespec available on your platform.
*/
#define BSON_HAVE_TIMESPEC 1
#if BSON_HAVE_TIMESPEC != 1
# undef BSON_HAVE_TIMESPEC
#endif
/*
* Define to 1 if you want extra aligned types in libbson
*/
#define BSON_EXTRA_ALIGN 0
#if BSON_EXTRA_ALIGN != 1
# undef BSON_EXTRA_ALIGN
#endif
/*
* Define to 1 if you have SYS_gettid syscall
*/
#define BSON_HAVE_SYSCALL_TID 1
#if BSON_HAVE_SYSCALL_TID != 1
# undef BSON_HAVE_SYSCALL_TID
#endif
+#define BSON_HAVE_RAND_R 1
+#if BSON_HAVE_RAND_R != 1
+# undef BSON_HAVE_RAND_R
+#endif
+
#endif /* BSON_CONFIG_H */
diff --git a/mongodb-1.3.4/src/libbson/src/bson/bson-config.h.in b/mongodb-1.4.2/src/libbson/src/bson/bson-config.h.in
similarity index 96%
rename from mongodb-1.3.4/src/libbson/src/bson/bson-config.h.in
rename to mongodb-1.4.2/src/libbson/src/bson/bson-config.h.in
index c6728e0b..dd90b4bc 100644
--- a/mongodb-1.3.4/src/libbson/src/bson/bson-config.h.in
+++ b/mongodb-1.4.2/src/libbson/src/bson/bson-config.h.in
@@ -1,127 +1,132 @@
#ifndef BSON_CONFIG_H
#define BSON_CONFIG_H
/*
* Define to 1234 for Little Endian, 4321 for Big Endian.
*/
#define BSON_BYTE_ORDER @BSON_BYTE_ORDER@
/*
* Define to 1 if you have stdbool.h
*/
#define BSON_HAVE_STDBOOL_H @BSON_HAVE_STDBOOL_H@
#if BSON_HAVE_STDBOOL_H != 1
# undef BSON_HAVE_STDBOOL_H
#endif
/*
* Define to 1 for POSIX-like systems, 2 for Windows.
*/
#define BSON_OS @BSON_OS@
/*
* Define to 1 if we have access to GCC 32-bit atomic builtins.
* While this requires GCC 4.1+ in most cases, it is also architecture
* dependent. For example, some PPC or ARM systems may not have it even
* if it is a recent GCC version.
*/
#define BSON_HAVE_ATOMIC_32_ADD_AND_FETCH @BSON_HAVE_ATOMIC_32_ADD_AND_FETCH@
#if BSON_HAVE_ATOMIC_32_ADD_AND_FETCH != 1
# undef BSON_HAVE_ATOMIC_32_ADD_AND_FETCH
#endif
/*
* Similarly, define to 1 if we have access to GCC 64-bit atomic builtins.
*/
#define BSON_HAVE_ATOMIC_64_ADD_AND_FETCH @BSON_HAVE_ATOMIC_64_ADD_AND_FETCH@
#if BSON_HAVE_ATOMIC_64_ADD_AND_FETCH != 1
# undef BSON_HAVE_ATOMIC_64_ADD_AND_FETCH
#endif
/*
* Define to 1 if your system requires {} around PTHREAD_ONCE_INIT.
* This is typically just Solaris 8-10.
*/
#define BSON_PTHREAD_ONCE_INIT_NEEDS_BRACES @BSON_PTHREAD_ONCE_INIT_NEEDS_BRACES@
#if BSON_PTHREAD_ONCE_INIT_NEEDS_BRACES != 1
# undef BSON_PTHREAD_ONCE_INIT_NEEDS_BRACES
#endif
/*
* Define to 1 if you have clock_gettime() available.
*/
#define BSON_HAVE_CLOCK_GETTIME @BSON_HAVE_CLOCK_GETTIME@
#if BSON_HAVE_CLOCK_GETTIME != 1
# undef BSON_HAVE_CLOCK_GETTIME
#endif
/*
* Define to 1 if you have strnlen available on your platform.
*/
#define BSON_HAVE_STRNLEN @BSON_HAVE_STRNLEN@
#if BSON_HAVE_STRNLEN != 1
# undef BSON_HAVE_STRNLEN
#endif
/*
* Define to 1 if you have snprintf available on your platform.
*/
#define BSON_HAVE_SNPRINTF @BSON_HAVE_SNPRINTF@
#if BSON_HAVE_SNPRINTF != 1
# undef BSON_HAVE_SNPRINTF
#endif
/*
* Define to 1 if you have gmtime_r available on your platform.
*/
#define BSON_HAVE_GMTIME_R @BSON_HAVE_GMTIME_R@
#if BSON_HAVE_GMTIME_R != 1
# undef BSON_HAVE_GMTIME_R
#endif
/*
* Define to 1 if you have reallocf available on your platform.
*/
#define BSON_HAVE_REALLOCF @BSON_HAVE_REALLOCF@
#if BSON_HAVE_REALLOCF != 1
# undef BSON_HAVE_REALLOCF
#endif
/*
* Define to 1 if you have struct timespec available on your platform.
*/
#define BSON_HAVE_TIMESPEC @BSON_HAVE_TIMESPEC@
#if BSON_HAVE_TIMESPEC != 1
# undef BSON_HAVE_TIMESPEC
#endif
/*
* Define to 1 if you want extra aligned types in libbson
*/
#define BSON_EXTRA_ALIGN @BSON_EXTRA_ALIGN@
#if BSON_EXTRA_ALIGN != 1
# undef BSON_EXTRA_ALIGN
#endif
/*
* Define to 1 if you have SYS_gettid syscall
*/
#define BSON_HAVE_SYSCALL_TID @BSON_HAVE_SYSCALL_TID@
#if BSON_HAVE_SYSCALL_TID != 1
# undef BSON_HAVE_SYSCALL_TID
#endif
+#define BSON_HAVE_RAND_R @BSON_HAVE_RAND_R@
+#if BSON_HAVE_RAND_R != 1
+# undef BSON_HAVE_RAND_R
+#endif
+
#endif /* BSON_CONFIG_H */
diff --git a/mongodb-1.3.4/src/libbson/src/bson/bson-context-private.h b/mongodb-1.4.2/src/libbson/src/bson/bson-context-private.h
similarity index 94%
rename from mongodb-1.3.4/src/libbson/src/bson/bson-context-private.h
rename to mongodb-1.4.2/src/libbson/src/bson/bson-context-private.h
index 26918984..494137ae 100644
--- a/mongodb-1.3.4/src/libbson/src/bson/bson-context-private.h
+++ b/mongodb-1.4.2/src/libbson/src/bson/bson-context-private.h
@@ -1,47 +1,48 @@
/*
* Copyright 2014 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef BSON_CONTEXT_PRIVATE_H
#define BSON_CONTEXT_PRIVATE_H
#include "bson-context.h"
#include "bson-thread-private.h"
BSON_BEGIN_DECLS
struct _bson_context_t {
- bson_context_flags_t flags : 7;
+ /* flags are defined in bson_context_flags_t */
+ int flags : 7;
bool pidbe_once : 1;
uint8_t pidbe[2];
uint8_t md5[3];
int32_t seq32;
int64_t seq64;
void (*oid_get_host) (bson_context_t *context, bson_oid_t *oid);
void (*oid_get_pid) (bson_context_t *context, bson_oid_t *oid);
void (*oid_get_seq32) (bson_context_t *context, bson_oid_t *oid);
void (*oid_get_seq64) (bson_context_t *context, bson_oid_t *oid);
};
BSON_END_DECLS
#endif /* BSON_CONTEXT_PRIVATE_H */
diff --git a/mongodb-1.3.4/src/libbson/src/bson/bson-context.c b/mongodb-1.4.2/src/libbson/src/bson/bson-context.c
similarity index 98%
rename from mongodb-1.3.4/src/libbson/src/bson/bson-context.c
rename to mongodb-1.4.2/src/libbson/src/bson/bson-context.c
index 00309bf7..74845519 100644
--- a/mongodb-1.3.4/src/libbson/src/bson/bson-context.c
+++ b/mongodb-1.4.2/src/libbson/src/bson/bson-context.c
@@ -1,508 +1,509 @@
/*
* Copyright 2013 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "bson-compat.h"
#include <limits.h>
#include <stdarg.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include "bson-atomic.h"
#include "bson-clock.h"
#include "bson-context.h"
#include "bson-context-private.h"
#include "bson-md5.h"
#include "bson-memory.h"
#include "bson-thread-private.h"
#ifdef BSON_HAVE_SYSCALL_TID
#include <sys/syscall.h>
#endif
#ifndef HOST_NAME_MAX
#define HOST_NAME_MAX 256
#endif
/*
* Globals.
*/
static bson_context_t gContextDefault;
#ifdef BSON_HAVE_SYSCALL_TID
-static uint16_t
-gettid (void)
+static long
+bson_gettid (void)
{
return syscall (SYS_gettid);
}
#endif
/*
*--------------------------------------------------------------------------
*
* _bson_context_get_oid_host --
*
* Retrieves the first three bytes of MD5(hostname) and assigns them
* to the host portion of oid.
*
* Returns:
* None.
*
* Side effects:
* @oid is modified.
*
*--------------------------------------------------------------------------
*/
static void
_bson_context_get_oid_host (bson_context_t *context, /* IN */
bson_oid_t *oid) /* OUT */
{
uint8_t *bytes = (uint8_t *) oid;
uint8_t digest[16];
bson_md5_t md5;
char hostname[HOST_NAME_MAX];
BSON_ASSERT (context);
BSON_ASSERT (oid);
gethostname (hostname, sizeof hostname);
hostname[HOST_NAME_MAX - 1] = '\0';
bson_md5_init (&md5);
bson_md5_append (
&md5, (const uint8_t *) hostname, (uint32_t) strlen (hostname));
bson_md5_finish (&md5, &digest[0]);
bytes[4] = digest[0];
bytes[5] = digest[1];
bytes[6] = digest[2];
}
/*
*--------------------------------------------------------------------------
*
* _bson_context_get_oid_host_cached --
*
* Fetch the cached copy of the MD5(hostname).
*
* Returns:
* None.
*
* Side effects:
* @oid is modified.
*
*--------------------------------------------------------------------------
*/
static void
_bson_context_get_oid_host_cached (bson_context_t *context, /* IN */
bson_oid_t *oid) /* OUT */
{
BSON_ASSERT (context);
BSON_ASSERT (oid);
oid->bytes[4] = context->md5[0];
oid->bytes[5] = context->md5[1];
oid->bytes[6] = context->md5[2];
}
static BSON_INLINE uint16_t
_bson_getpid (void)
{
uint16_t pid;
#ifdef BSON_OS_WIN32
DWORD real_pid;
real_pid = GetCurrentProcessId ();
pid = (real_pid & 0xFFFF) ^ ((real_pid >> 16) & 0xFFFF);
#else
pid = getpid ();
#endif
return pid;
}
/*
*--------------------------------------------------------------------------
*
* _bson_context_get_oid_pid --
*
* Initialize the pid field of @oid.
*
* The pid field is 2 bytes, big-endian for memcmp().
*
* Returns:
* None.
*
* Side effects:
* @oid is modified.
*
*--------------------------------------------------------------------------
*/
static void
_bson_context_get_oid_pid (bson_context_t *context, /* IN */
bson_oid_t *oid) /* OUT */
{
uint16_t pid = _bson_getpid ();
uint8_t *bytes = (uint8_t *) &pid;
BSON_ASSERT (context);
BSON_ASSERT (oid);
pid = BSON_UINT16_TO_BE (pid);
oid->bytes[7] = bytes[0];
oid->bytes[8] = bytes[1];
}
/*
*--------------------------------------------------------------------------
*
* _bson_context_get_oid_pid_cached --
*
* Fetch the cached copy of the current pid.
* This helps avoid multiple calls to getpid() which is slower
* on some systems.
*
* Returns:
* None.
*
* Side effects:
* @oid is modified.
*
*--------------------------------------------------------------------------
*/
static void
_bson_context_get_oid_pid_cached (bson_context_t *context, /* IN */
bson_oid_t *oid) /* OUT */
{
oid->bytes[7] = context->pidbe[0];
oid->bytes[8] = context->pidbe[1];
}
/*
*--------------------------------------------------------------------------
*
* _bson_context_get_oid_seq32 --
*
* 32-bit sequence generator, non-thread-safe version.
*
* Returns:
* None.
*
* Side effects:
* @oid is modified.
*
*--------------------------------------------------------------------------
*/
static void
_bson_context_get_oid_seq32 (bson_context_t *context, /* IN */
bson_oid_t *oid) /* OUT */
{
uint32_t seq = context->seq32++;
seq = BSON_UINT32_TO_BE (seq);
memcpy (&oid->bytes[9], ((uint8_t *) &seq) + 1, 3);
}
/*
*--------------------------------------------------------------------------
*
* _bson_context_get_oid_seq32_threadsafe --
*
* Thread-safe version of 32-bit sequence generator.
*
* Returns:
* None.
*
* Side effects:
* @oid is modified.
*
*--------------------------------------------------------------------------
*/
static void
_bson_context_get_oid_seq32_threadsafe (bson_context_t *context, /* IN */
bson_oid_t *oid) /* OUT */
{
int32_t seq = bson_atomic_int_add (&context->seq32, 1);
seq = BSON_UINT32_TO_BE (seq);
memcpy (&oid->bytes[9], ((uint8_t *) &seq) + 1, 3);
}
/*
*--------------------------------------------------------------------------
*
* _bson_context_get_oid_seq64 --
*
* 64-bit oid sequence generator, non-thread-safe version.
*
* Returns:
* None.
*
* Side effects:
* @oid is modified.
*
*--------------------------------------------------------------------------
*/
static void
_bson_context_get_oid_seq64 (bson_context_t *context, /* IN */
bson_oid_t *oid) /* OUT */
{
uint64_t seq;
BSON_ASSERT (context);
BSON_ASSERT (oid);
seq = BSON_UINT64_TO_BE (context->seq64++);
memcpy (&oid->bytes[4], &seq, sizeof (seq));
}
/*
*--------------------------------------------------------------------------
*
* _bson_context_get_oid_seq64_threadsafe --
*
* Thread-safe 64-bit sequence generator.
*
* Returns:
* None.
*
* Side effects:
* @oid is modified.
*
*--------------------------------------------------------------------------
*/
static void
_bson_context_get_oid_seq64_threadsafe (bson_context_t *context, /* IN */
bson_oid_t *oid) /* OUT */
{
int64_t seq = bson_atomic_int64_add (&context->seq64, 1);
seq = BSON_UINT64_TO_BE (seq);
memcpy (&oid->bytes[4], &seq, sizeof (seq));
}
static void
_bson_context_init (bson_context_t *context, /* IN */
bson_context_flags_t flags) /* IN */
{
struct timeval tv;
uint16_t pid;
unsigned int seed[3];
unsigned int real_seed;
bson_oid_t oid;
- context->flags = flags;
+ context->flags = (int) flags;
context->oid_get_host = _bson_context_get_oid_host_cached;
context->oid_get_pid = _bson_context_get_oid_pid_cached;
context->oid_get_seq32 = _bson_context_get_oid_seq32;
context->oid_get_seq64 = _bson_context_get_oid_seq64;
/*
* Generate a seed for our the random starting position of our increment
* bytes. We mask off the last nibble so that the last digit of the OID will
* start at zero. Just to be nice.
*
* The seed itself is made up of the current time in seconds, milliseconds,
* and pid xored together. I welcome better solutions if at all necessary.
*/
bson_gettimeofday (&tv);
seed[0] = (unsigned int) tv.tv_sec;
seed[1] = (unsigned int) tv.tv_usec;
seed[2] = _bson_getpid ();
real_seed = seed[0] ^ seed[1] ^ seed[2];
-#ifdef BSON_OS_WIN32
+#ifndef BSON_HAVE_RAND_R
/* ms's runtime is multithreaded by default, so no rand_r */
+ /* no rand_r on android either */
srand (real_seed);
context->seq32 = rand () & 0x007FFFF0;
#elif defined(__FreeBSD__) || defined(__NetBSD__) || defined(__DragonFly__) || \
defined(__OpenBSD__)
arc4random_buf (&context->seq32, sizeof (context->seq32));
context->seq32 &= 0x007FFFF0;
#else
context->seq32 = rand_r (&real_seed) & 0x007FFFF0;
#endif
if ((flags & BSON_CONTEXT_DISABLE_HOST_CACHE)) {
context->oid_get_host = _bson_context_get_oid_host;
} else {
_bson_context_get_oid_host (context, &oid);
context->md5[0] = oid.bytes[4];
context->md5[1] = oid.bytes[5];
context->md5[2] = oid.bytes[6];
}
if ((flags & BSON_CONTEXT_THREAD_SAFE)) {
context->oid_get_seq32 = _bson_context_get_oid_seq32_threadsafe;
context->oid_get_seq64 = _bson_context_get_oid_seq64_threadsafe;
}
if ((flags & BSON_CONTEXT_DISABLE_PID_CACHE)) {
context->oid_get_pid = _bson_context_get_oid_pid;
} else {
#ifdef BSON_HAVE_SYSCALL_TID
if ((flags & BSON_CONTEXT_USE_TASK_ID)) {
- int32_t tid;
+ uint16_t tid;
/* This call is always successful */
- tid = gettid ();
+ tid = (uint16_t) bson_gettid ();
pid = BSON_UINT16_TO_BE (tid);
} else
#endif
{
pid = BSON_UINT16_TO_BE (_bson_getpid ());
}
memcpy (&context->pidbe[0], &pid, 2);
}
}
/*
*--------------------------------------------------------------------------
*
* bson_context_new --
*
* Initializes a new context with the flags specified.
*
* In most cases, you want to call this with @flags set to
* BSON_CONTEXT_NONE.
*
* If you are running on Linux, %BSON_CONTEXT_USE_TASK_ID can result
* in a healthy speedup for multi-threaded scenarios.
*
* If you absolutely must have a single context for your application
* and use more than one thread, then %BSON_CONTEXT_THREAD_SAFE should
* be bitwise-or'd with your flags. This requires synchronization
* between threads.
*
* If you expect your hostname to change often, you may consider
* specifying %BSON_CONTEXT_DISABLE_HOST_CACHE so that gethostname()
* is called for every OID generated. This is much slower.
*
* If you expect your pid to change without notice, such as from an
* unexpected call to fork(), then specify
* %BSON_CONTEXT_DISABLE_PID_CACHE.
*
* Returns:
* A newly allocated bson_context_t that should be freed with
* bson_context_destroy().
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
bson_context_t *
bson_context_new (bson_context_flags_t flags)
{
bson_context_t *context;
context = bson_malloc0 (sizeof *context);
_bson_context_init (context, flags);
return context;
}
/*
*--------------------------------------------------------------------------
*
* bson_context_destroy --
*
* Cleans up a bson_context_t and releases any associated resources.
* This should be called when you are done using @context.
*
* Returns:
* None.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
void
bson_context_destroy (bson_context_t *context) /* IN */
{
if (context != &gContextDefault) {
memset (context, 0, sizeof *context);
bson_free (context);
}
}
static BSON_ONCE_FUN (_bson_context_init_default)
{
_bson_context_init (
&gContextDefault,
(BSON_CONTEXT_THREAD_SAFE | BSON_CONTEXT_DISABLE_PID_CACHE));
BSON_ONCE_RETURN;
}
/*
*--------------------------------------------------------------------------
*
* bson_context_get_default --
*
* Fetches the default, thread-safe implementation of #bson_context_t.
* If you need faster generation, it is recommended you create your
* own #bson_context_t with bson_context_new().
*
* Returns:
* A shared instance to the default #bson_context_t. This should not
* be modified or freed.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
bson_context_t *
bson_context_get_default (void)
{
static bson_once_t once = BSON_ONCE_INIT;
bson_once (&once, _bson_context_init_default);
return &gContextDefault;
}
diff --git a/mongodb-1.3.4/src/libbson/src/bson/bson-context.h b/mongodb-1.4.2/src/libbson/src/bson/bson-context.h
similarity index 100%
rename from mongodb-1.3.4/src/libbson/src/bson/bson-context.h
rename to mongodb-1.4.2/src/libbson/src/bson/bson-context.h
diff --git a/mongodb-1.3.4/src/libbson/src/bson/bson-decimal128.c b/mongodb-1.4.2/src/libbson/src/bson/bson-decimal128.c
similarity index 98%
rename from mongodb-1.3.4/src/libbson/src/bson/bson-decimal128.c
rename to mongodb-1.4.2/src/libbson/src/bson/bson-decimal128.c
index 5f73df69..89b7d5ce 100644
--- a/mongodb-1.3.4/src/libbson/src/bson/bson-decimal128.c
+++ b/mongodb-1.4.2/src/libbson/src/bson/bson-decimal128.c
@@ -1,734 +1,738 @@
/*
* Copyright 2015 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdlib.h>
#include <string.h>
#include <ctype.h>
#include "bson-decimal128.h"
#include "bson-types.h"
#include "bson-macros.h"
#include "bson-string.h"
#define BSON_DECIMAL128_EXPONENT_MAX 6111
#define BSON_DECIMAL128_EXPONENT_MIN -6176
#define BSON_DECIMAL128_EXPONENT_BIAS 6176
#define BSON_DECIMAL128_MAX_DIGITS 34
#define BSON_DECIMAL128_SET_NAN(dec) \
do { \
(dec).high = 0x7c00000000000000ull; \
(dec).low = 0; \
} while (0);
#define BSON_DECIMAL128_SET_INF(dec, isneg) \
do { \
(dec).high = 0x7800000000000000ull + 0x8000000000000000ull * (isneg); \
(dec).low = 0; \
} while (0);
/**
* _bson_uint128_t:
*
* This struct represents a 128 bit integer.
*/
typedef struct {
uint32_t parts[4]; /* 32-bit words stored high to low. */
} _bson_uint128_t;
/**
*------------------------------------------------------------------------------
*
* _bson_uint128_divide1B --
*
* This function divides a #_bson_uint128_t by 1000000000 (1 billion) and
* computes the quotient and remainder.
*
* The remainder will contain 9 decimal digits for conversion to string.
*
* @value The #_bson_uint128_t operand.
* @quotient A pointer to store the #_bson_uint128_t quotient.
* @rem A pointer to store the #uint64_t remainder.
*
* Returns:
* The quotient at @quotient and the remainder at @rem.
*
* Side effects:
* None.
*
*------------------------------------------------------------------------------
*/
static void
_bson_uint128_divide1B (_bson_uint128_t value, /* IN */
_bson_uint128_t *quotient, /* OUT */
uint32_t *rem) /* OUT */
{
const uint32_t DIVISOR = 1000 * 1000 * 1000;
uint64_t _rem = 0;
int i = 0;
if (!value.parts[0] && !value.parts[1] && !value.parts[2] &&
!value.parts[3]) {
*quotient = value;
*rem = 0;
return;
}
for (i = 0; i <= 3; i++) {
_rem <<= 32; /* Adjust remainder to match value of next dividend */
_rem += value.parts[i]; /* Add the divided to _rem */
value.parts[i] = (uint32_t) (_rem / DIVISOR);
_rem %= DIVISOR; /* Store the remainder */
}
*quotient = value;
*rem = (uint32_t) _rem;
}
/**
*------------------------------------------------------------------------------
*
* bson_decimal128_to_string --
*
* This function converts a BID formatted decimal128 value to string,
* accepting a &bson_decimal128_t as @dec. The string is stored at @str.
*
* @dec : The BID formatted decimal to convert.
* @str : The output decimal128 string. At least %BSON_DECIMAL128_STRING
*characters.
*
* Returns:
* None.
*
* Side effects:
* None.
*
*------------------------------------------------------------------------------
*/
void
bson_decimal128_to_string (const bson_decimal128_t *dec, /* IN */
char *str) /* OUT */
{
uint32_t COMBINATION_MASK = 0x1f; /* Extract least significant 5 bits */
uint32_t EXPONENT_MASK = 0x3fff; /* Extract least significant 14 bits */
uint32_t COMBINATION_INFINITY = 30; /* Value of combination field for Inf */
uint32_t COMBINATION_NAN = 31; /* Value of combination field for NaN */
uint32_t EXPONENT_BIAS = 6176; /* decimal128 exponent bias */
char *str_out = str; /* output pointer in string */
char significand_str[35]; /* decoded significand digits */
/* Note: bits in this routine are referred to starting at 0, */
/* from the sign bit, towards the coefficient. */
uint32_t high; /* bits 0 - 31 */
uint32_t midh; /* bits 32 - 63 */
uint32_t midl; /* bits 64 - 95 */
uint32_t low; /* bits 96 - 127 */
uint32_t combination; /* bits 1 - 5 */
uint32_t biased_exponent; /* decoded biased exponent (14 bits) */
uint32_t significand_digits = 0; /* the number of significand digits */
uint32_t significand[36] = {0}; /* the base-10 digits in the significand */
uint32_t *significand_read = significand; /* read pointer into significand */
int32_t exponent; /* unbiased exponent */
int32_t scientific_exponent; /* the exponent if scientific notation is
* used */
bool is_zero = false; /* true if the number is zero */
uint8_t significand_msb; /* the most signifcant significand bits (50-46) */
_bson_uint128_t
significand128; /* temporary storage for significand decoding */
size_t i; /* indexing variables */
int j, k;
memset (significand_str, 0, sizeof (significand_str));
if ((int64_t) dec->high < 0) { /* negative */
*(str_out++) = '-';
}
low = (uint32_t) dec->low, midl = (uint32_t) (dec->low >> 32),
midh = (uint32_t) dec->high, high = (uint32_t) (dec->high >> 32);
/* Decode combination field and exponent */
combination = (high >> 26) & COMBINATION_MASK;
if (BSON_UNLIKELY ((combination >> 3) == 3)) {
/* Check for 'special' values */
if (combination == COMBINATION_INFINITY) { /* Infinity */
strcpy (str_out, BSON_DECIMAL128_INF);
return;
} else if (combination == COMBINATION_NAN) { /* NaN */
/* str, not str_out, to erase the sign */
strcpy (str, BSON_DECIMAL128_NAN);
/* we don't care about the NaN payload. */
return;
} else {
biased_exponent = (high >> 15) & EXPONENT_MASK;
significand_msb = 0x8 + ((high >> 14) & 0x1);
}
} else {
significand_msb = (high >> 14) & 0x7;
biased_exponent = (high >> 17) & EXPONENT_MASK;
}
exponent = biased_exponent - EXPONENT_BIAS;
/* Create string of significand digits */
/* Convert the 114-bit binary number represented by */
/* (high, midh, midl, low) to at most 34 decimal */
/* digits through modulo and division. */
significand128.parts[0] = (high & 0x3fff) + ((significand_msb & 0xf) << 14);
significand128.parts[1] = midh;
significand128.parts[2] = midl;
significand128.parts[3] = low;
if (significand128.parts[0] == 0 && significand128.parts[1] == 0 &&
significand128.parts[2] == 0 && significand128.parts[3] == 0) {
is_zero = true;
} else if (significand128.parts[0] >= (1 << 17)) {
/* The significand is non-canonical or zero.
* In order to preserve compatability with the densely packed decimal
* format, the maximum value for the significand of decimal128 is
* 1e34 - 1. If the value is greater than 1e34 - 1, the IEEE 754
* standard dictates that the significand is interpreted as zero.
*/
is_zero = true;
} else {
for (k = 3; k >= 0; k--) {
uint32_t least_digits = 0;
_bson_uint128_divide1B (
significand128, &significand128, &least_digits);
/* We now have the 9 least significant digits (in base 2). */
/* Convert and output to string. */
if (!least_digits) {
continue;
}
for (j = 8; j >= 0; j--) {
significand[k * 9 + j] = least_digits % 10;
least_digits /= 10;
}
}
}
/* Output format options: */
/* Scientific - [-]d.dddE(+/-)dd or [-]dE(+/-)dd */
/* Regular - ddd.ddd */
if (is_zero) {
significand_digits = 1;
*significand_read = 0;
} else {
significand_digits = 36;
while (!(*significand_read)) {
significand_digits--;
significand_read++;
}
}
scientific_exponent = significand_digits - 1 + exponent;
/* The scientific exponent checks are dictated by the string conversion
* specification and are somewhat arbitrary cutoffs.
*
* We must check exponent > 0, because if this is the case, the number
* has trailing zeros. However, we *cannot* output these trailing zeros,
* because doing so would change the precision of the value, and would
* change stored data if the string converted number is round tripped.
*/
if (scientific_exponent < -6 || exponent > 0) {
/* Scientific format */
*(str_out++) = *(significand_read++) + '0';
significand_digits--;
if (significand_digits) {
*(str_out++) = '.';
}
- for (i = 0; i < significand_digits; i++) {
+ for (i = 0; i < significand_digits && (str_out - str) < 36; i++) {
*(str_out++) = *(significand_read++) + '0';
}
/* Exponent */
*(str_out++) = 'E';
bson_snprintf (str_out, 6, "%+d", scientific_exponent);
} else {
/* Regular format with no decimal place */
if (exponent >= 0) {
- for (i = 0; i < significand_digits; i++) {
+ for (i = 0; i < significand_digits && (str_out - str) < 36; i++) {
*(str_out++) = *(significand_read++) + '0';
}
*str_out = '\0';
} else {
int32_t radix_position = significand_digits + exponent;
if (radix_position > 0) { /* non-zero digits before radix */
- for (i = 0; i < radix_position; i++) {
+ for (i = 0;
+ i < radix_position && (str_out - str) < BSON_DECIMAL128_STRING;
+ i++) {
*(str_out++) = *(significand_read++) + '0';
}
} else { /* leading zero before radix point */
*(str_out++) = '0';
}
*(str_out++) = '.';
while (radix_position++ < 0) { /* add leading zeros after radix */
*(str_out++) = '0';
}
- for (i = 0; i < significand_digits - BSON_MAX (radix_position - 1, 0);
+ for (i = 0;
+ (i < significand_digits - BSON_MAX (radix_position - 1, 0)) &&
+ (str_out - str) < BSON_DECIMAL128_STRING;
i++) {
*(str_out++) = *(significand_read++) + '0';
}
*str_out = '\0';
}
}
}
typedef struct {
uint64_t high, low;
} _bson_uint128_6464_t;
/**
*-------------------------------------------------------------------------
*
* mul64x64 --
*
* This function multiplies two &uint64_t into a &_bson_uint128_6464_t.
*
* Returns:
* The product of @left and @right.
*
* Side Effects:
* None.
*
*-------------------------------------------------------------------------
*/
static void
_mul_64x64 (uint64_t left, /* IN */
uint64_t right, /* IN */
_bson_uint128_6464_t *product) /* OUT */
{
uint64_t left_high, left_low, right_high, right_low, product_high,
product_mid, product_mid2, product_low;
_bson_uint128_6464_t rt = {0};
if (!left && !right) {
*product = rt;
return;
}
left_high = left >> 32;
left_low = (uint32_t) left;
right_high = right >> 32;
right_low = (uint32_t) right;
product_high = left_high * right_high;
product_mid = left_high * right_low;
product_mid2 = left_low * right_high;
product_low = left_low * right_low;
product_high += product_mid >> 32;
product_mid = (uint32_t) product_mid + product_mid2 + (product_low >> 32);
product_high = product_high + (product_mid >> 32);
product_low = (product_mid << 32) + (uint32_t) product_low;
rt.high = product_high;
rt.low = product_low;
*product = rt;
}
/**
*------------------------------------------------------------------------------
*
* _dec128_tolower --
*
* This function converts the ASCII character @c to lowercase. It is locale
* insensitive (unlike the stdlib tolower).
*
* Returns:
* The lowercased character.
*/
char
_dec128_tolower (char c)
{
if (isupper (c)) {
c += 32;
}
return c;
}
/**
*------------------------------------------------------------------------------
*
* _dec128_istreq --
*
* This function compares the null-terminated *ASCII* strings @a and @b
* for case-insensitive equality.
*
* Returns:
* true if the strings are equal, false otherwise.
*/
bool
_dec128_istreq (const char *a, /* IN */
const char *b /* IN */)
{
while (*a != '\0' || *b != '\0') {
/* strings are different lengths. */
if (*a == '\0' || *b == '\0') {
return false;
}
if (_dec128_tolower (*a) != _dec128_tolower (*b)) {
return false;
}
a++;
b++;
}
return true;
}
/**
*------------------------------------------------------------------------------
*
* bson_decimal128_from_string --
*
* This function converts @string in the format [+-]ddd[.]ddd[E][+-]dddd to
* decimal128. Out of range values are converted to +/-Infinity. Invalid
* strings are converted to NaN.
*
* If more digits are provided than the available precision allows,
* round to the nearest expressable decimal128 with ties going to even will
* occur.
*
* Note: @string must be ASCII only!
*
* Returns:
* true on success, or false on failure. @dec will be NaN if @str was invalid
* The &bson_decimal128_t converted from @string at @dec.
*
* Side effects:
* None.
*
*------------------------------------------------------------------------------
*/
bool
bson_decimal128_from_string (const char *string, /* IN */
bson_decimal128_t *dec) /* OUT */
{
_bson_uint128_6464_t significand = {0};
const char *str_read = string; /* Read pointer for consuming str. */
/* Parsing state tracking */
bool is_negative = false;
bool saw_radix = false;
bool includes_sign = false; /* True if the input string contains a sign. */
bool found_nonzero = false;
size_t significant_digits = 0; /* Total number of significant digits
* (no leading or trailing zero) */
size_t ndigits_read = 0; /* Total number of significand digits read */
size_t ndigits = 0; /* Total number of digits (no leading zeros) */
size_t radix_position = 0; /* The number of the digits after radix */
size_t first_nonzero = 0; /* The index of the first non-zero in *str* */
uint16_t digits[BSON_DECIMAL128_MAX_DIGITS] = {0};
uint16_t ndigits_stored = 0; /* The number of digits in digits */
uint16_t *digits_insert = digits; /* Insertion pointer for digits */
size_t first_digit = 0; /* The index of the first non-zero digit */
size_t last_digit = 0; /* The index of the last digit */
int32_t exponent = 0;
uint64_t significand_high = 0; /* The high 17 digits of the significand */
uint64_t significand_low = 0; /* The low 17 digits of the significand */
uint16_t biased_exponent = 0; /* The biased exponent */
BSON_ASSERT (dec);
dec->high = 0;
dec->low = 0;
if (*str_read == '+' || *str_read == '-') {
is_negative = *(str_read++) == '-';
includes_sign = true;
}
/* Check for Infinity or NaN */
if (!isdigit (*str_read) && *str_read != '.') {
if (_dec128_istreq (str_read, "inf") ||
_dec128_istreq (str_read, "infinity")) {
BSON_DECIMAL128_SET_INF (*dec, is_negative);
return true;
} else if (_dec128_istreq (str_read, "nan")) {
BSON_DECIMAL128_SET_NAN (*dec);
return true;
}
BSON_DECIMAL128_SET_NAN (*dec);
return false;
}
/* Read digits */
while (isdigit (*str_read) || *str_read == '.') {
if (*str_read == '.') {
if (saw_radix) {
BSON_DECIMAL128_SET_NAN (*dec);
return false;
}
saw_radix = true;
str_read++;
continue;
}
if (ndigits_stored < 34) {
if (*str_read != '0' || found_nonzero) {
if (!found_nonzero) {
first_nonzero = ndigits_read;
}
found_nonzero = true;
*(digits_insert++) = *(str_read) - '0'; /* Only store 34 digits */
ndigits_stored++;
}
}
if (found_nonzero) {
ndigits++;
}
if (saw_radix) {
radix_position++;
}
ndigits_read++;
str_read++;
}
if (saw_radix && !ndigits_read) {
BSON_DECIMAL128_SET_NAN (*dec);
return false;
}
/* Read exponent if exists */
if (*str_read == 'e' || *str_read == 'E') {
int nread = 0;
#ifdef _MSC_VER
#define SSCANF sscanf_s
#else
#define SSCANF sscanf
#endif
int read_exponent = SSCANF (++str_read, "%d%n", &exponent, &nread);
str_read += nread;
if (!read_exponent || nread == 0) {
BSON_DECIMAL128_SET_NAN (*dec);
return false;
}
#undef SSCANF
}
if (*str_read) {
BSON_DECIMAL128_SET_NAN (*dec);
return false;
}
/* Done reading input. */
/* Find first non-zero digit in digits */
first_digit = 0;
if (!ndigits_stored) { /* value is zero */
first_digit = 0;
last_digit = 0;
digits[0] = 0;
ndigits = 1;
ndigits_stored = 1;
significant_digits = 0;
} else {
last_digit = ndigits_stored - 1;
significant_digits = ndigits;
/* Mark trailing zeros as non-significant */
while (string[first_nonzero + significant_digits - 1 + includes_sign +
saw_radix] == '0') {
significant_digits--;
}
}
/* Normalization of exponent */
/* Correct exponent based on radix position, and shift significand as needed
*/
/* to represent user input */
/* Overflow prevention */
if (exponent <= radix_position && radix_position - exponent > (1 << 14)) {
exponent = BSON_DECIMAL128_EXPONENT_MIN;
} else {
exponent -= radix_position;
}
/* Attempt to normalize the exponent */
while (exponent > BSON_DECIMAL128_EXPONENT_MAX) {
/* Shift exponent to significand and decrease */
last_digit++;
if (last_digit - first_digit > BSON_DECIMAL128_MAX_DIGITS) {
/* The exponent is too great to shift into the significand. */
if (significant_digits == 0) {
/* Value is zero, we are allowed to clamp the exponent. */
exponent = BSON_DECIMAL128_EXPONENT_MAX;
break;
}
/* Overflow is not permitted, error. */
BSON_DECIMAL128_SET_NAN (*dec);
return false;
}
exponent--;
}
while (exponent < BSON_DECIMAL128_EXPONENT_MIN || ndigits_stored < ndigits) {
/* Shift last digit */
if (last_digit == 0) {
/* underflow is not allowed, but zero clamping is */
if (significant_digits == 0) {
exponent = BSON_DECIMAL128_EXPONENT_MIN;
break;
}
BSON_DECIMAL128_SET_NAN (*dec);
return false;
}
if (ndigits_stored < ndigits) {
if (string[ndigits - 1 + includes_sign + saw_radix] - '0' != 0 &&
significant_digits != 0) {
BSON_DECIMAL128_SET_NAN (*dec);
return false;
}
ndigits--; /* adjust to match digits not stored */
} else {
if (digits[last_digit] != 0) {
/* Inexact rounding is not allowed. */
BSON_DECIMAL128_SET_NAN (*dec);
return false;
}
last_digit--; /* adjust to round */
}
if (exponent < BSON_DECIMAL128_EXPONENT_MAX) {
exponent++;
} else {
BSON_DECIMAL128_SET_NAN (*dec);
return false;
}
}
/* Round */
/* We've normalized the exponent, but might still need to round. */
if (last_digit - first_digit + 1 < significant_digits) {
uint8_t round_digit;
/* There are non-zero digits after last_digit that need rounding. */
/* We round to nearest, ties to even */
round_digit =
string[first_nonzero + last_digit + includes_sign + saw_radix + 1] -
'0';
if (round_digit != 0) {
/* Inexact (non-zero) rounding is not allowed */
BSON_DECIMAL128_SET_NAN (*dec);
return false;
}
}
/* Encode significand */
significand_high = 0, /* The high 17 digits of the significand */
significand_low = 0; /* The low 17 digits of the significand */
if (significant_digits == 0) { /* read a zero */
significand_high = 0;
significand_low = 0;
} else if (last_digit - first_digit < 17) {
size_t d_idx = first_digit;
significand_low = digits[d_idx++];
for (; d_idx <= last_digit; d_idx++) {
significand_low *= 10;
significand_low += digits[d_idx];
significand_high = 0;
}
} else {
size_t d_idx = first_digit;
significand_high = digits[d_idx++];
for (; d_idx <= last_digit - 17; d_idx++) {
significand_high *= 10;
significand_high += digits[d_idx];
}
significand_low = digits[d_idx++];
for (; d_idx <= last_digit; d_idx++) {
significand_low *= 10;
significand_low += digits[d_idx];
}
}
_mul_64x64 (significand_high, 100000000000000000ull, &significand);
significand.low += significand_low;
if (significand.low < significand_low) {
significand.high += 1;
}
biased_exponent = (exponent + (int16_t) BSON_DECIMAL128_EXPONENT_BIAS);
/* Encode combination, exponent, and significand. */
if ((significand.high >> 49) & 1) {
/* Encode '11' into bits 1 to 3 */
dec->high |= (0x3ull << 61);
dec->high |= (biased_exponent & 0x3fffull) << 47;
dec->high |= significand.high & 0x7fffffffffffull;
} else {
dec->high |= (biased_exponent & 0x3fffull) << 49;
dec->high |= significand.high & 0x1ffffffffffffull;
}
dec->low = significand.low;
/* Encode sign */
if (is_negative) {
dec->high |= 0x8000000000000000ull;
}
return true;
}
diff --git a/mongodb-1.3.4/src/libbson/src/bson/bson-decimal128.h b/mongodb-1.4.2/src/libbson/src/bson/bson-decimal128.h
similarity index 100%
rename from mongodb-1.3.4/src/libbson/src/bson/bson-decimal128.h
rename to mongodb-1.4.2/src/libbson/src/bson/bson-decimal128.h
diff --git a/mongodb-1.3.4/src/libbson/src/bson/bson-endian.h b/mongodb-1.4.2/src/libbson/src/bson/bson-endian.h
similarity index 98%
rename from mongodb-1.3.4/src/libbson/src/bson/bson-endian.h
rename to mongodb-1.4.2/src/libbson/src/bson/bson-endian.h
index 6bfe4053..04b2dee9 100644
--- a/mongodb-1.3.4/src/libbson/src/bson/bson-endian.h
+++ b/mongodb-1.4.2/src/libbson/src/bson/bson-endian.h
@@ -1,230 +1,230 @@
/*
* Copyright 2013 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef BSON_ENDIAN_H
#define BSON_ENDIAN_H
#if !defined(BSON_INSIDE) && !defined(BSON_COMPILATION)
#error "Only <bson.h> can be included directly."
#endif
#if defined(__sun)
#include <sys/byteorder.h>
#endif
#include "bson-config.h"
#include "bson-macros.h"
#include "bson-compat.h"
BSON_BEGIN_DECLS
#define BSON_BIG_ENDIAN 4321
#define BSON_LITTLE_ENDIAN 1234
#if defined(__sun)
#define BSON_UINT16_SWAP_LE_BE(v) BSWAP_16 ((uint16_t) v)
#define BSON_UINT32_SWAP_LE_BE(v) BSWAP_32 ((uint32_t) v)
#define BSON_UINT64_SWAP_LE_BE(v) BSWAP_64 ((uint64_t) v)
#elif defined(__clang__) && defined(__clang_major__) && \
defined(__clang_minor__) && (__clang_major__ >= 3) && \
(__clang_minor__ >= 1)
#if __has_builtin(__builtin_bswap16)
#define BSON_UINT16_SWAP_LE_BE(v) __builtin_bswap16 (v)
#endif
#if __has_builtin(__builtin_bswap32)
#define BSON_UINT32_SWAP_LE_BE(v) __builtin_bswap32 (v)
#endif
#if __has_builtin(__builtin_bswap64)
#define BSON_UINT64_SWAP_LE_BE(v) __builtin_bswap64 (v)
#endif
#elif defined(__GNUC__) && (__GNUC__ >= 4)
#if __GNUC__ > 4 || (defined(__GNUC_MINOR__) && __GNUC_MINOR__ >= 3)
#define BSON_UINT32_SWAP_LE_BE(v) __builtin_bswap32 ((uint32_t) v)
#define BSON_UINT64_SWAP_LE_BE(v) __builtin_bswap64 ((uint64_t) v)
#endif
#if __GNUC__ > 4 || (defined(__GNUC_MINOR__) && __GNUC_MINOR__ >= 8)
#define BSON_UINT16_SWAP_LE_BE(v) __builtin_bswap16 ((uint32_t) v)
#endif
#endif
#ifndef BSON_UINT16_SWAP_LE_BE
#define BSON_UINT16_SWAP_LE_BE(v) __bson_uint16_swap_slow ((uint16_t) v)
#endif
#ifndef BSON_UINT32_SWAP_LE_BE
#define BSON_UINT32_SWAP_LE_BE(v) __bson_uint32_swap_slow ((uint32_t) v)
#endif
#ifndef BSON_UINT64_SWAP_LE_BE
#define BSON_UINT64_SWAP_LE_BE(v) __bson_uint64_swap_slow ((uint64_t) v)
#endif
#if BSON_BYTE_ORDER == BSON_LITTLE_ENDIAN
#define BSON_UINT16_FROM_LE(v) ((uint16_t) v)
#define BSON_UINT16_TO_LE(v) ((uint16_t) v)
#define BSON_UINT16_FROM_BE(v) BSON_UINT16_SWAP_LE_BE (v)
#define BSON_UINT16_TO_BE(v) BSON_UINT16_SWAP_LE_BE (v)
#define BSON_UINT32_FROM_LE(v) ((uint32_t) v)
#define BSON_UINT32_TO_LE(v) ((uint32_t) v)
#define BSON_UINT32_FROM_BE(v) BSON_UINT32_SWAP_LE_BE (v)
#define BSON_UINT32_TO_BE(v) BSON_UINT32_SWAP_LE_BE (v)
#define BSON_UINT64_FROM_LE(v) ((uint64_t) v)
#define BSON_UINT64_TO_LE(v) ((uint64_t) v)
#define BSON_UINT64_FROM_BE(v) BSON_UINT64_SWAP_LE_BE (v)
#define BSON_UINT64_TO_BE(v) BSON_UINT64_SWAP_LE_BE (v)
#define BSON_DOUBLE_FROM_LE(v) ((double) v)
#define BSON_DOUBLE_TO_LE(v) ((double) v)
#elif BSON_BYTE_ORDER == BSON_BIG_ENDIAN
#define BSON_UINT16_FROM_LE(v) BSON_UINT16_SWAP_LE_BE (v)
#define BSON_UINT16_TO_LE(v) BSON_UINT16_SWAP_LE_BE (v)
#define BSON_UINT16_FROM_BE(v) ((uint16_t) v)
#define BSON_UINT16_TO_BE(v) ((uint16_t) v)
#define BSON_UINT32_FROM_LE(v) BSON_UINT32_SWAP_LE_BE (v)
#define BSON_UINT32_TO_LE(v) BSON_UINT32_SWAP_LE_BE (v)
#define BSON_UINT32_FROM_BE(v) ((uint32_t) v)
#define BSON_UINT32_TO_BE(v) ((uint32_t) v)
#define BSON_UINT64_FROM_LE(v) BSON_UINT64_SWAP_LE_BE (v)
#define BSON_UINT64_TO_LE(v) BSON_UINT64_SWAP_LE_BE (v)
#define BSON_UINT64_FROM_BE(v) ((uint64_t) v)
#define BSON_UINT64_TO_BE(v) ((uint64_t) v)
#define BSON_DOUBLE_FROM_LE(v) (__bson_double_swap_slow (v))
#define BSON_DOUBLE_TO_LE(v) (__bson_double_swap_slow (v))
#else
#error "The endianness of target architecture is unknown."
#endif
/*
*--------------------------------------------------------------------------
*
* __bson_uint16_swap_slow --
*
* Fallback endianness conversion for 16-bit integers.
*
* Returns:
* The endian swapped version.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
static BSON_INLINE uint16_t
__bson_uint16_swap_slow (uint16_t v) /* IN */
{
return ((v & 0x00FF) << 8) | ((v & 0xFF00) >> 8);
}
/*
*--------------------------------------------------------------------------
*
* __bson_uint32_swap_slow --
*
* Fallback endianness conversion for 32-bit integers.
*
* Returns:
* The endian swapped version.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
static BSON_INLINE uint32_t
__bson_uint32_swap_slow (uint32_t v) /* IN */
{
return ((v & 0x000000FFU) << 24) | ((v & 0x0000FF00U) << 8) |
((v & 0x00FF0000U) >> 8) | ((v & 0xFF000000U) >> 24);
}
/*
*--------------------------------------------------------------------------
*
* __bson_uint64_swap_slow --
*
* Fallback endianness conversion for 64-bit integers.
*
* Returns:
* The endian swapped version.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
static BSON_INLINE uint64_t
__bson_uint64_swap_slow (uint64_t v) /* IN */
{
return ((v & 0x00000000000000FFULL) << 56) |
((v & 0x000000000000FF00ULL) << 40) |
((v & 0x0000000000FF0000ULL) << 24) |
((v & 0x00000000FF000000ULL) << 8) |
((v & 0x000000FF00000000ULL) >> 8) |
((v & 0x0000FF0000000000ULL) >> 24) |
((v & 0x00FF000000000000ULL) >> 40) |
((v & 0xFF00000000000000ULL) >> 56);
}
/*
*--------------------------------------------------------------------------
*
* __bson_double_swap_slow --
*
* Fallback endianness conversion for double floating point.
*
* Returns:
* The endian swapped version.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
-BSON_STATIC_ASSERT (sizeof (double) == sizeof (uint64_t));
+BSON_STATIC_ASSERT2 (sizeof_uint64_t, sizeof (double) == sizeof (uint64_t));
static BSON_INLINE double
__bson_double_swap_slow (double v) /* IN */
{
uint64_t uv;
memcpy (&uv, &v, sizeof (v));
uv = BSON_UINT64_SWAP_LE_BE (uv);
memcpy (&v, &uv, sizeof (v));
return v;
}
BSON_END_DECLS
#endif /* BSON_ENDIAN_H */
diff --git a/mongodb-1.3.4/src/libbson/src/bson/bson-error.c b/mongodb-1.4.2/src/libbson/src/bson/bson-error.c
similarity index 100%
rename from mongodb-1.3.4/src/libbson/src/bson/bson-error.c
rename to mongodb-1.4.2/src/libbson/src/bson/bson-error.c
diff --git a/mongodb-1.3.4/src/libbson/src/bson/bson-error.h b/mongodb-1.4.2/src/libbson/src/bson/bson-error.h
similarity index 100%
rename from mongodb-1.3.4/src/libbson/src/bson/bson-error.h
rename to mongodb-1.4.2/src/libbson/src/bson/bson-error.h
diff --git a/mongodb-1.3.4/src/libbson/src/bson/bson-iso8601-private.h b/mongodb-1.4.2/src/libbson/src/bson/bson-iso8601-private.h
similarity index 100%
rename from mongodb-1.3.4/src/libbson/src/bson/bson-iso8601-private.h
rename to mongodb-1.4.2/src/libbson/src/bson/bson-iso8601-private.h
diff --git a/mongodb-1.3.4/src/libbson/src/bson/bson-iso8601.c b/mongodb-1.4.2/src/libbson/src/bson/bson-iso8601.c
similarity index 100%
rename from mongodb-1.3.4/src/libbson/src/bson/bson-iso8601.c
rename to mongodb-1.4.2/src/libbson/src/bson/bson-iso8601.c
diff --git a/mongodb-1.3.4/src/libbson/src/bson/bson-iter.c b/mongodb-1.4.2/src/libbson/src/bson/bson-iter.c
similarity index 99%
rename from mongodb-1.3.4/src/libbson/src/bson/bson-iter.c
rename to mongodb-1.4.2/src/libbson/src/bson/bson-iter.c
index 5b531a1c..adae52d4 100644
--- a/mongodb-1.3.4/src/libbson/src/bson/bson-iter.c
+++ b/mongodb-1.4.2/src/libbson/src/bson/bson-iter.c
@@ -1,2428 +1,2429 @@
/*
* Copyright 2013-2014 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "bson-iter.h"
#include "bson-config.h"
#include "bson-decimal128.h"
#define ITER_TYPE(i) ((bson_type_t) * ((i)->raw + (i)->type))
/*
*--------------------------------------------------------------------------
*
* bson_iter_init --
*
* Initializes @iter to be used to iterate @bson.
*
* Returns:
* true if bson_iter_t was initialized. otherwise false.
*
* Side effects:
* @iter is initialized.
*
*--------------------------------------------------------------------------
*/
bool
bson_iter_init (bson_iter_t *iter, /* OUT */
const bson_t *bson) /* IN */
{
BSON_ASSERT (iter);
BSON_ASSERT (bson);
if (BSON_UNLIKELY (bson->len < 5)) {
memset (iter, 0, sizeof *iter);
return false;
}
iter->raw = bson_get_data (bson);
iter->len = bson->len;
iter->off = 0;
iter->type = 0;
iter->key = 0;
iter->d1 = 0;
iter->d2 = 0;
iter->d3 = 0;
iter->d4 = 0;
iter->next_off = 4;
iter->err_off = 0;
return true;
}
/*
*--------------------------------------------------------------------------
*
* bson_iter_init_from_data --
*
* Initializes @iter to be used to iterate @data of length @length
*
* Returns:
* true if bson_iter_t was initialized. otherwise false.
*
* Side effects:
* @iter is initialized.
*
*--------------------------------------------------------------------------
*/
bool
bson_iter_init_from_data (bson_iter_t *iter, /* OUT */
const uint8_t *data, /* IN */
size_t length) /* IN */
{
uint32_t len_le;
BSON_ASSERT (iter);
BSON_ASSERT (data);
if (BSON_UNLIKELY ((length < 5) || (length > INT_MAX))) {
memset (iter, 0, sizeof *iter);
return false;
}
memcpy (&len_le, data, sizeof (len_le));
if (BSON_UNLIKELY ((size_t) BSON_UINT32_FROM_LE (len_le) != length)) {
memset (iter, 0, sizeof *iter);
return false;
}
if (BSON_UNLIKELY (data[length - 1])) {
memset (iter, 0, sizeof *iter);
return false;
}
iter->raw = (uint8_t *) data;
iter->len = length;
iter->off = 0;
iter->type = 0;
iter->key = 0;
iter->d1 = 0;
iter->d2 = 0;
iter->d3 = 0;
iter->d4 = 0;
iter->next_off = 4;
iter->err_off = 0;
return true;
}
/*
*--------------------------------------------------------------------------
*
* bson_iter_recurse --
*
* Creates a new sub-iter looking at the document or array that @iter
* is currently pointing at.
*
* Returns:
* true if successful and @child was initialized.
*
* Side effects:
* @child is initialized.
*
*--------------------------------------------------------------------------
*/
bool
bson_iter_recurse (const bson_iter_t *iter, /* IN */
bson_iter_t *child) /* OUT */
{
const uint8_t *data = NULL;
uint32_t len = 0;
BSON_ASSERT (iter);
BSON_ASSERT (child);
if (ITER_TYPE (iter) == BSON_TYPE_DOCUMENT) {
bson_iter_document (iter, &len, &data);
} else if (ITER_TYPE (iter) == BSON_TYPE_ARRAY) {
bson_iter_array (iter, &len, &data);
} else {
return false;
}
child->raw = data;
child->len = len;
child->off = 0;
child->type = 0;
child->key = 0;
child->d1 = 0;
child->d2 = 0;
child->d3 = 0;
child->d4 = 0;
child->next_off = 4;
child->err_off = 0;
return true;
}
/*
*--------------------------------------------------------------------------
*
* bson_iter_init_find --
*
* Initializes a #bson_iter_t and moves the iter to the first field
* matching @key.
*
* Returns:
* true if the field named @key was found; otherwise false.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
bool
bson_iter_init_find (bson_iter_t *iter, /* INOUT */
const bson_t *bson, /* IN */
const char *key) /* IN */
{
BSON_ASSERT (iter);
BSON_ASSERT (bson);
BSON_ASSERT (key);
return bson_iter_init (iter, bson) && bson_iter_find (iter, key);
}
/*
*--------------------------------------------------------------------------
*
* bson_iter_init_find_case --
*
* A case-insensitive version of bson_iter_init_find().
*
* Returns:
* true if the field was found and @iter is observing that field.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
bool
bson_iter_init_find_case (bson_iter_t *iter, /* INOUT */
const bson_t *bson, /* IN */
const char *key) /* IN */
{
BSON_ASSERT (iter);
BSON_ASSERT (bson);
BSON_ASSERT (key);
return bson_iter_init (iter, bson) && bson_iter_find_case (iter, key);
}
/*
*--------------------------------------------------------------------------
*
* _bson_iter_find_with_len --
*
* Internal helper for finding an exact key.
*
* Returns:
* true if the field @key was found.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
static bool
_bson_iter_find_with_len (bson_iter_t *iter, /* INOUT */
const char *key, /* IN */
int keylen) /* IN */
{
const char *ikey;
if (keylen == 0) {
return false;
}
if (keylen < 0) {
keylen = (int) strlen (key);
}
while (bson_iter_next (iter)) {
ikey = bson_iter_key (iter);
if ((0 == strncmp (key, ikey, keylen)) && (ikey[keylen] == '\0')) {
return true;
}
}
return false;
}
/*
*--------------------------------------------------------------------------
*
* bson_iter_find --
*
* Searches through @iter starting from the current position for a key
* matching @key. This is a case-sensitive search meaning "KEY" and
* "key" would NOT match.
*
* Returns:
* true if @key is found.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
bool
bson_iter_find (bson_iter_t *iter, /* INOUT */
const char *key) /* IN */
{
BSON_ASSERT (iter);
BSON_ASSERT (key);
return _bson_iter_find_with_len (iter, key, -1);
}
/*
*--------------------------------------------------------------------------
*
* bson_iter_find_case --
*
* Searches through @iter starting from the current position for a key
* matching @key. This is a case-insensitive search meaning "KEY" and
* "key" would match.
*
* Returns:
* true if @key is found.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
bool
bson_iter_find_case (bson_iter_t *iter, /* INOUT */
const char *key) /* IN */
{
BSON_ASSERT (iter);
BSON_ASSERT (key);
while (bson_iter_next (iter)) {
if (!bson_strcasecmp (key, bson_iter_key (iter))) {
return true;
}
}
return false;
}
/*
*--------------------------------------------------------------------------
*
* bson_iter_find_descendant --
*
* Locates a descendant using the "parent.child.key" notation. This
* operates similar to bson_iter_find() except that it can recurse
* into children documents using the dot notation.
*
* Returns:
* true if the descendant was found and @descendant was initialized.
*
* Side effects:
* @descendant may be initialized.
*
*--------------------------------------------------------------------------
*/
bool
bson_iter_find_descendant (bson_iter_t *iter, /* INOUT */
const char *dotkey, /* IN */
bson_iter_t *descendant) /* OUT */
{
bson_iter_t tmp;
const char *dot;
size_t sublen;
BSON_ASSERT (iter);
BSON_ASSERT (dotkey);
BSON_ASSERT (descendant);
if ((dot = strchr (dotkey, '.'))) {
sublen = dot - dotkey;
} else {
sublen = strlen (dotkey);
}
if (_bson_iter_find_with_len (iter, dotkey, (int) sublen)) {
if (!dot) {
*descendant = *iter;
return true;
}
if (BSON_ITER_HOLDS_DOCUMENT (iter) || BSON_ITER_HOLDS_ARRAY (iter)) {
if (bson_iter_recurse (iter, &tmp)) {
return bson_iter_find_descendant (&tmp, dot + 1, descendant);
}
}
}
return false;
}
/*
*--------------------------------------------------------------------------
*
* bson_iter_key --
*
* Retrieves the key of the current field. The resulting key is valid
* while @iter is valid.
*
* Returns:
* A string that should not be modified or freed.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
const char *
bson_iter_key (const bson_iter_t *iter) /* IN */
{
BSON_ASSERT (iter);
return bson_iter_key_unsafe (iter);
}
/*
*--------------------------------------------------------------------------
*
* bson_iter_type --
*
* Retrieves the type of the current field. It may be useful to check
* the type using the BSON_ITER_HOLDS_*() macros.
*
* Returns:
* A bson_type_t.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
bson_type_t
bson_iter_type (const bson_iter_t *iter) /* IN */
{
BSON_ASSERT (iter);
BSON_ASSERT (iter->raw);
BSON_ASSERT (iter->len);
return bson_iter_type_unsafe (iter);
}
/*
*--------------------------------------------------------------------------
*
* _bson_iter_next_internal --
*
* Internal function to advance @iter to the next field and retrieve
* the key and BSON type before error-checking.
*
* Return:
* true if an element was decoded, else false.
*
* Side effects:
* @key and @bson_type are set.
*
* If the return value is false:
* - @iter is invalidated: @iter->raw is NULLed
* - @unsupported is set to true if the bson type is unsupported
* - otherwise if the BSON is corrupt, @iter->err_off is nonzero
* - otherwise @bson_type is set to BSON_TYPE_EOD
*
*--------------------------------------------------------------------------
*/
static bool
_bson_iter_next_internal (bson_iter_t *iter, /* INOUT */
const char **key, /* OUT */
uint32_t *bson_type, /* OUT */
bool *unsupported) /* OUT */
{
const uint8_t *data;
uint32_t o;
unsigned int len;
BSON_ASSERT (iter);
*unsupported = false;
if (!iter->raw) {
*key = NULL;
*bson_type = BSON_TYPE_EOD;
return false;
}
data = iter->raw;
len = iter->len;
iter->off = iter->next_off;
iter->type = iter->off;
iter->key = iter->off + 1;
iter->d1 = 0;
iter->d2 = 0;
iter->d3 = 0;
iter->d4 = 0;
/* iterate from start to end of NULL-terminated key string */
for (o = iter->off + 1; o < len; o++) {
if (!data[o]) {
iter->d1 = ++o;
goto fill_data_fields;
}
}
goto mark_invalid;
fill_data_fields:
*key = bson_iter_key_unsafe (iter);
*bson_type = ITER_TYPE (iter);
switch (*bson_type) {
case BSON_TYPE_DATE_TIME:
case BSON_TYPE_DOUBLE:
case BSON_TYPE_INT64:
case BSON_TYPE_TIMESTAMP:
iter->next_off = o + 8;
break;
case BSON_TYPE_CODE:
case BSON_TYPE_SYMBOL:
case BSON_TYPE_UTF8: {
uint32_t l;
if ((o + 4) >= len) {
iter->err_off = o;
goto mark_invalid;
}
iter->d2 = o + 4;
memcpy (&l, iter->raw + iter->d1, sizeof (l));
l = BSON_UINT32_FROM_LE (l);
if (l > (len - (o + 4))) {
iter->err_off = o;
goto mark_invalid;
}
iter->next_off = o + 4 + l;
/*
* Make sure the string length includes the NUL byte.
*/
if (BSON_UNLIKELY ((l == 0) || (iter->next_off >= len))) {
iter->err_off = o;
goto mark_invalid;
}
/*
* Make sure the last byte is a NUL byte.
*/
if (BSON_UNLIKELY ((iter->raw + iter->d2)[l - 1] != '\0')) {
iter->err_off = o + 4 + l - 1;
goto mark_invalid;
}
} break;
case BSON_TYPE_BINARY: {
bson_subtype_t subtype;
uint32_t l;
if (o >= (len - 4)) {
iter->err_off = o;
goto mark_invalid;
}
iter->d2 = o + 4;
iter->d3 = o + 5;
memcpy (&l, iter->raw + iter->d1, sizeof (l));
l = BSON_UINT32_FROM_LE (l);
if (l >= (len - o)) {
iter->err_off = o;
goto mark_invalid;
}
subtype = *(iter->raw + iter->d2);
if (subtype == BSON_SUBTYPE_BINARY_DEPRECATED) {
int32_t binary_len;
if (l < 4) {
iter->err_off = o;
goto mark_invalid;
}
/* subtype 2 has a redundant length header in the data */
memcpy (&binary_len, (iter->raw + iter->d3), sizeof (binary_len));
binary_len = BSON_UINT32_FROM_LE (binary_len);
if (binary_len + 4 != l) {
iter->err_off = iter->d3;
goto mark_invalid;
}
}
iter->next_off = o + 5 + l;
} break;
case BSON_TYPE_ARRAY:
case BSON_TYPE_DOCUMENT: {
uint32_t l;
if (o >= (len - 4)) {
iter->err_off = o;
goto mark_invalid;
}
memcpy (&l, iter->raw + iter->d1, sizeof (l));
l = BSON_UINT32_FROM_LE (l);
if ((l > len) || (l > (len - o))) {
iter->err_off = o;
goto mark_invalid;
}
iter->next_off = o + l;
} break;
case BSON_TYPE_OID:
iter->next_off = o + 12;
break;
case BSON_TYPE_BOOL: {
char val;
if (iter->d1 >= len) {
iter->err_off = o;
goto mark_invalid;
}
memcpy (&val, iter->raw + iter->d1, 1);
if (val != 0x00 && val != 0x01) {
iter->err_off = o;
goto mark_invalid;
}
iter->next_off = o + 1;
} break;
case BSON_TYPE_REGEX: {
bool eor = false;
bool eoo = false;
for (; o < len; o++) {
if (!data[o]) {
iter->d2 = ++o;
eor = true;
break;
}
}
if (!eor) {
iter->err_off = iter->next_off;
goto mark_invalid;
}
for (; o < len; o++) {
if (!data[o]) {
eoo = true;
break;
}
}
if (!eoo) {
iter->err_off = iter->next_off;
goto mark_invalid;
}
iter->next_off = o + 1;
} break;
case BSON_TYPE_DBPOINTER: {
uint32_t l;
if (o >= (len - 4)) {
iter->err_off = o;
goto mark_invalid;
}
iter->d2 = o + 4;
memcpy (&l, iter->raw + iter->d1, sizeof (l));
l = BSON_UINT32_FROM_LE (l);
- if ((l > len) || (l > (len - o))) {
+ /* Check valid string length. l counts '\0' but not 4 bytes for itself. */
+ if (l == 0 || l > (len - o - 4)) {
iter->err_off = o;
goto mark_invalid;
}
if (*(iter->raw + o + l + 3)) {
/* not null terminated */
iter->err_off = o + l + 3;
goto mark_invalid;
}
iter->d3 = o + 4 + l;
iter->next_off = o + 4 + l + 12;
} break;
case BSON_TYPE_CODEWSCOPE: {
uint32_t l;
uint32_t doclen;
if ((len < 19) || (o >= (len - 14))) {
iter->err_off = o;
goto mark_invalid;
}
iter->d2 = o + 4;
iter->d3 = o + 8;
memcpy (&l, iter->raw + iter->d1, sizeof (l));
l = BSON_UINT32_FROM_LE (l);
if ((l < 14) || (l >= (len - o))) {
iter->err_off = o;
goto mark_invalid;
}
iter->next_off = o + l;
if (iter->next_off >= len) {
iter->err_off = o;
goto mark_invalid;
}
memcpy (&l, iter->raw + iter->d2, sizeof (l));
l = BSON_UINT32_FROM_LE (l);
if (l == 0 || l >= (len - o - 4 - 4)) {
iter->err_off = o;
goto mark_invalid;
}
if ((o + 4 + 4 + l + 4) >= iter->next_off) {
iter->err_off = o + 4;
goto mark_invalid;
}
iter->d4 = o + 4 + 4 + l;
memcpy (&doclen, iter->raw + iter->d4, sizeof (doclen));
doclen = BSON_UINT32_FROM_LE (doclen);
if ((o + 4 + 4 + l + doclen) != iter->next_off) {
iter->err_off = o + 4 + 4 + l;
goto mark_invalid;
}
} break;
case BSON_TYPE_INT32:
iter->next_off = o + 4;
break;
case BSON_TYPE_DECIMAL128:
iter->next_off = o + 16;
break;
case BSON_TYPE_MAXKEY:
case BSON_TYPE_MINKEY:
case BSON_TYPE_NULL:
case BSON_TYPE_UNDEFINED:
iter->d1 = -1;
iter->next_off = o;
break;
default:
*unsupported = true;
/* FALL THROUGH */
case BSON_TYPE_EOD:
iter->err_off = o;
goto mark_invalid;
}
/*
* Check to see if any of the field locations would overflow the
* current BSON buffer. If so, set the error location to the offset
* of where the field starts.
*/
if (iter->next_off >= len) {
iter->err_off = o;
goto mark_invalid;
}
iter->err_off = 0;
return true;
mark_invalid:
iter->raw = NULL;
iter->len = 0;
iter->next_off = 0;
return false;
}
/*
*--------------------------------------------------------------------------
*
* bson_iter_next --
*
* Advances @iter to the next field of the underlying BSON document.
* If all fields have been exhausted, then %false is returned.
*
* It is a programming error to use @iter after this function has
* returned false.
*
* Returns:
* true if the iter was advanced to the next record.
* otherwise false and @iter should be considered invalid.
*
* Side effects:
* @iter may be invalidated.
*
*--------------------------------------------------------------------------
*/
bool
bson_iter_next (bson_iter_t *iter) /* INOUT */
{
uint32_t bson_type;
const char *key;
bool unsupported;
return _bson_iter_next_internal (iter, &key, &bson_type, &unsupported);
}
/*
*--------------------------------------------------------------------------
*
* bson_iter_binary --
*
* Retrieves the BSON_TYPE_BINARY field. The subtype is stored in
* @subtype. The length of @binary in bytes is stored in @binary_len.
*
* @binary should not be modified or freed and is only valid while
* @iter's bson_t is valid and unmodified.
*
* Parameters:
* @iter: A bson_iter_t
* @subtype: A location for the binary subtype.
* @binary_len: A location for the length of @binary.
* @binary: A location for a pointer to the binary data.
*
* Returns:
* None.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
void
bson_iter_binary (const bson_iter_t *iter, /* IN */
bson_subtype_t *subtype, /* OUT */
uint32_t *binary_len, /* OUT */
const uint8_t **binary) /* OUT */
{
bson_subtype_t backup;
BSON_ASSERT (iter);
BSON_ASSERT (!binary || binary_len);
if (ITER_TYPE (iter) == BSON_TYPE_BINARY) {
if (!subtype) {
subtype = &backup;
}
*subtype = (bson_subtype_t) * (iter->raw + iter->d2);
if (binary) {
memcpy (binary_len, (iter->raw + iter->d1), sizeof (*binary_len));
*binary_len = BSON_UINT32_FROM_LE (*binary_len);
*binary = iter->raw + iter->d3;
if (*subtype == BSON_SUBTYPE_BINARY_DEPRECATED) {
*binary_len -= sizeof (int32_t);
*binary += sizeof (int32_t);
}
}
return;
}
if (binary) {
*binary = NULL;
}
if (binary_len) {
*binary_len = 0;
}
if (subtype) {
*subtype = BSON_SUBTYPE_BINARY;
}
}
/*
*--------------------------------------------------------------------------
*
* bson_iter_bool --
*
* Retrieves the current field of type BSON_TYPE_BOOL.
*
* Returns:
* true or false, dependent on bson document.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
bool
bson_iter_bool (const bson_iter_t *iter) /* IN */
{
BSON_ASSERT (iter);
if (ITER_TYPE (iter) == BSON_TYPE_BOOL) {
return bson_iter_bool_unsafe (iter);
}
return false;
}
/*
*--------------------------------------------------------------------------
*
* bson_iter_as_bool --
*
* If @iter is on a boolean field, returns the boolean. If it is on a
* non-boolean field such as int32, int64, or double, it will convert
* the value to a boolean.
*
* Zero is false, and non-zero is true.
*
* Returns:
* true or false, dependent on field type.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
bool
bson_iter_as_bool (const bson_iter_t *iter) /* IN */
{
BSON_ASSERT (iter);
switch ((int) ITER_TYPE (iter)) {
case BSON_TYPE_BOOL:
return bson_iter_bool (iter);
case BSON_TYPE_DOUBLE:
return !(bson_iter_double (iter) == 0.0);
case BSON_TYPE_INT64:
return !(bson_iter_int64 (iter) == 0);
case BSON_TYPE_INT32:
return !(bson_iter_int32 (iter) == 0);
case BSON_TYPE_UTF8:
return true;
case BSON_TYPE_NULL:
case BSON_TYPE_UNDEFINED:
return false;
default:
return true;
}
}
/*
*--------------------------------------------------------------------------
*
* bson_iter_double --
*
* Retrieves the current field of type BSON_TYPE_DOUBLE.
*
* Returns:
* A double.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
double
bson_iter_double (const bson_iter_t *iter) /* IN */
{
BSON_ASSERT (iter);
if (ITER_TYPE (iter) == BSON_TYPE_DOUBLE) {
return bson_iter_double_unsafe (iter);
}
return 0;
}
/*
*--------------------------------------------------------------------------
*
* bson_iter_as_double --
*
* If @iter is on a field of type BSON_TYPE_DOUBLE,
* returns the double. If it is on an integer field
* such as int32, int64, or bool, it will convert
* the value to a double.
*
*
* Returns:
* A double.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
double
bson_iter_as_double (const bson_iter_t *iter) /* IN */
{
BSON_ASSERT (iter);
switch ((int) ITER_TYPE (iter)) {
case BSON_TYPE_BOOL:
return (double) bson_iter_bool (iter);
case BSON_TYPE_DOUBLE:
return bson_iter_double (iter);
case BSON_TYPE_INT32:
return (double) bson_iter_int32 (iter);
case BSON_TYPE_INT64:
return (double) bson_iter_int64 (iter);
default:
return 0;
}
}
/*
*--------------------------------------------------------------------------
*
* bson_iter_int32 --
*
* Retrieves the value of the field of type BSON_TYPE_INT32.
*
* Returns:
* A 32-bit signed integer.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
int32_t
bson_iter_int32 (const bson_iter_t *iter) /* IN */
{
BSON_ASSERT (iter);
if (ITER_TYPE (iter) == BSON_TYPE_INT32) {
return bson_iter_int32_unsafe (iter);
}
return 0;
}
/*
*--------------------------------------------------------------------------
*
* bson_iter_int64 --
*
* Retrieves a 64-bit signed integer for the current BSON_TYPE_INT64
* field.
*
* Returns:
* A 64-bit signed integer.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
int64_t
bson_iter_int64 (const bson_iter_t *iter) /* IN */
{
BSON_ASSERT (iter);
if (ITER_TYPE (iter) == BSON_TYPE_INT64) {
return bson_iter_int64_unsafe (iter);
}
return 0;
}
/*
*--------------------------------------------------------------------------
*
* bson_iter_as_int64 --
*
* If @iter is not an int64 field, it will try to convert the value to
* an int64. Such field types include:
*
* - bool
* - double
* - int32
*
* Returns:
* An int64_t.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
int64_t
bson_iter_as_int64 (const bson_iter_t *iter) /* IN */
{
BSON_ASSERT (iter);
switch ((int) ITER_TYPE (iter)) {
case BSON_TYPE_BOOL:
return (int64_t) bson_iter_bool (iter);
case BSON_TYPE_DOUBLE:
return (int64_t) bson_iter_double (iter);
case BSON_TYPE_INT64:
return bson_iter_int64 (iter);
case BSON_TYPE_INT32:
return (int64_t) bson_iter_int32 (iter);
default:
return 0;
}
}
/*
*--------------------------------------------------------------------------
*
* bson_iter_decimal128 --
*
* This function retrieves the current field of type
*%BSON_TYPE_DECIMAL128.
* The result is valid while @iter is valid, and is stored in @dec.
*
* Returns:
*
* True on success, false on failure.
*
* Side Effects:
* None.
*
*--------------------------------------------------------------------------
*/
bool
bson_iter_decimal128 (const bson_iter_t *iter, /* IN */
bson_decimal128_t *dec) /* OUT */
{
BSON_ASSERT (iter);
if (ITER_TYPE (iter) == BSON_TYPE_DECIMAL128) {
bson_iter_decimal128_unsafe (iter, dec);
return true;
}
return false;
}
/*
*--------------------------------------------------------------------------
*
* bson_iter_oid --
*
* Retrieves the current field of type %BSON_TYPE_OID. The result is
* valid while @iter is valid.
*
* Returns:
* A bson_oid_t that should not be modified or freed.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
const bson_oid_t *
bson_iter_oid (const bson_iter_t *iter) /* IN */
{
BSON_ASSERT (iter);
if (ITER_TYPE (iter) == BSON_TYPE_OID) {
return bson_iter_oid_unsafe (iter);
}
return NULL;
}
/*
*--------------------------------------------------------------------------
*
* bson_iter_regex --
*
* Fetches the current field from the iter which should be of type
* BSON_TYPE_REGEX.
*
* Returns:
* Regex from @iter. This should not be modified or freed.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
const char *
bson_iter_regex (const bson_iter_t *iter, /* IN */
const char **options) /* IN */
{
const char *ret = NULL;
const char *ret_options = NULL;
BSON_ASSERT (iter);
if (ITER_TYPE (iter) == BSON_TYPE_REGEX) {
ret = (const char *) (iter->raw + iter->d1);
ret_options = (const char *) (iter->raw + iter->d2);
}
if (options) {
*options = ret_options;
}
return ret;
}
/*
*--------------------------------------------------------------------------
*
* bson_iter_utf8 --
*
* Retrieves the current field of type %BSON_TYPE_UTF8 as a UTF-8
* encoded string.
*
* Parameters:
* @iter: A bson_iter_t.
* @length: A location for the length of the string.
*
* Returns:
* A string that should not be modified or freed.
*
* Side effects:
* @length will be set to the result strings length if non-NULL.
*
*--------------------------------------------------------------------------
*/
const char *
bson_iter_utf8 (const bson_iter_t *iter, /* IN */
uint32_t *length) /* OUT */
{
BSON_ASSERT (iter);
if (ITER_TYPE (iter) == BSON_TYPE_UTF8) {
if (length) {
*length = bson_iter_utf8_len_unsafe (iter);
}
return (const char *) (iter->raw + iter->d2);
}
if (length) {
*length = 0;
}
return NULL;
}
/*
*--------------------------------------------------------------------------
*
* bson_iter_dup_utf8 --
*
* Copies the current UTF-8 element into a newly allocated string. The
* string should be freed using bson_free() when the caller is
* finished with it.
*
* Returns:
* A newly allocated char* that should be freed with bson_free().
*
* Side effects:
* @length will be set to the result strings length if non-NULL.
*
*--------------------------------------------------------------------------
*/
char *
bson_iter_dup_utf8 (const bson_iter_t *iter, /* IN */
uint32_t *length) /* OUT */
{
uint32_t local_length = 0;
const char *str;
char *ret = NULL;
BSON_ASSERT (iter);
if ((str = bson_iter_utf8 (iter, &local_length))) {
ret = bson_malloc0 (local_length + 1);
memcpy (ret, str, local_length);
ret[local_length] = '\0';
}
if (length) {
*length = local_length;
}
return ret;
}
/*
*--------------------------------------------------------------------------
*
* bson_iter_code --
*
* Retrieves the current field of type %BSON_TYPE_CODE. The length of
* the resulting string is stored in @length.
*
* Parameters:
* @iter: A bson_iter_t.
* @length: A location for the code length.
*
* Returns:
* A NUL-terminated string containing the code which should not be
* modified or freed.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
const char *
bson_iter_code (const bson_iter_t *iter, /* IN */
uint32_t *length) /* OUT */
{
BSON_ASSERT (iter);
if (ITER_TYPE (iter) == BSON_TYPE_CODE) {
if (length) {
*length = bson_iter_utf8_len_unsafe (iter);
}
return (const char *) (iter->raw + iter->d2);
}
if (length) {
*length = 0;
}
return NULL;
}
/*
*--------------------------------------------------------------------------
*
* bson_iter_codewscope --
*
* Similar to bson_iter_code() but with a scope associated encoded as
* a BSON document. @scope should not be modified or freed. It is
* valid while @iter is valid.
*
* Parameters:
* @iter: A #bson_iter_t.
* @length: A location for the length of resulting string.
* @scope_len: A location for the length of @scope.
* @scope: A location for the scope encoded as BSON.
*
* Returns:
* A NUL-terminated string that should not be modified or freed.
*
* Side effects:
* @length is set to the resulting string length in bytes.
* @scope_len is set to the length of @scope in bytes.
* @scope is set to the scope documents buffer which can be
* turned into a bson document with bson_init_static().
*
*--------------------------------------------------------------------------
*/
const char *
bson_iter_codewscope (const bson_iter_t *iter, /* IN */
uint32_t *length, /* OUT */
uint32_t *scope_len, /* OUT */
const uint8_t **scope) /* OUT */
{
uint32_t len;
BSON_ASSERT (iter);
if (ITER_TYPE (iter) == BSON_TYPE_CODEWSCOPE) {
if (length) {
memcpy (&len, iter->raw + iter->d2, sizeof (len));
/* The string length was checked > 0 in _bson_iter_next_internal. */
len = BSON_UINT32_FROM_LE (len);
BSON_ASSERT (len > 0);
*length = len - 1;
}
memcpy (&len, iter->raw + iter->d4, sizeof (len));
*scope_len = BSON_UINT32_FROM_LE (len);
*scope = iter->raw + iter->d4;
return (const char *) (iter->raw + iter->d3);
}
if (length) {
*length = 0;
}
if (scope_len) {
*scope_len = 0;
}
if (scope) {
*scope = NULL;
}
return NULL;
}
/*
*--------------------------------------------------------------------------
*
* bson_iter_dbpointer --
*
* Retrieves a BSON_TYPE_DBPOINTER field. @collection_len will be set
* to the length of the collection name. The collection name will be
* placed into @collection. The oid will be placed into @oid.
*
* @collection and @oid should not be modified.
*
* Parameters:
* @iter: A #bson_iter_t.
* @collection_len: A location for the length of @collection.
* @collection: A location for the collection name.
* @oid: A location for the oid.
*
* Returns:
* None.
*
* Side effects:
* @collection_len is set to the length of @collection in bytes
* excluding the null byte.
* @collection is set to the collection name, including a terminating
* null byte.
* @oid is initialized with the oid.
*
*--------------------------------------------------------------------------
*/
void
bson_iter_dbpointer (const bson_iter_t *iter, /* IN */
uint32_t *collection_len, /* OUT */
const char **collection, /* OUT */
const bson_oid_t **oid) /* OUT */
{
BSON_ASSERT (iter);
if (collection) {
*collection = NULL;
}
if (oid) {
*oid = NULL;
}
if (ITER_TYPE (iter) == BSON_TYPE_DBPOINTER) {
if (collection_len) {
memcpy (
collection_len, (iter->raw + iter->d1), sizeof (*collection_len));
*collection_len = BSON_UINT32_FROM_LE (*collection_len);
if ((*collection_len) > 0) {
(*collection_len)--;
}
}
if (collection) {
*collection = (const char *) (iter->raw + iter->d2);
}
if (oid) {
*oid = (const bson_oid_t *) (iter->raw + iter->d3);
}
}
}
/*
*--------------------------------------------------------------------------
*
* bson_iter_symbol --
*
* Retrieves the symbol of the current field of type BSON_TYPE_SYMBOL.
*
* Parameters:
* @iter: A bson_iter_t.
* @length: A location for the length of the symbol.
*
* Returns:
* A string containing the symbol as UTF-8. The value should not be
* modified or freed.
*
* Side effects:
* @length is set to the resulting strings length in bytes,
* excluding the null byte.
*
*--------------------------------------------------------------------------
*/
const char *
bson_iter_symbol (const bson_iter_t *iter, /* IN */
uint32_t *length) /* OUT */
{
const char *ret = NULL;
uint32_t ret_length = 0;
BSON_ASSERT (iter);
if (ITER_TYPE (iter) == BSON_TYPE_SYMBOL) {
ret = (const char *) (iter->raw + iter->d2);
ret_length = bson_iter_utf8_len_unsafe (iter);
}
if (length) {
*length = ret_length;
}
return ret;
}
/*
*--------------------------------------------------------------------------
*
* bson_iter_date_time --
*
* Fetches the number of milliseconds elapsed since the UNIX epoch.
* This value can be negative as times before 1970 are valid.
*
* Returns:
* A signed 64-bit integer containing the number of milliseconds.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
int64_t
bson_iter_date_time (const bson_iter_t *iter) /* IN */
{
BSON_ASSERT (iter);
if (ITER_TYPE (iter) == BSON_TYPE_DATE_TIME) {
return bson_iter_int64_unsafe (iter);
}
return 0;
}
/*
*--------------------------------------------------------------------------
*
* bson_iter_time_t --
*
* Retrieves the current field of type BSON_TYPE_DATE_TIME as a
* time_t.
*
* Returns:
* A #time_t of the number of seconds since UNIX epoch in UTC.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
time_t
bson_iter_time_t (const bson_iter_t *iter) /* IN */
{
BSON_ASSERT (iter);
if (ITER_TYPE (iter) == BSON_TYPE_DATE_TIME) {
return bson_iter_time_t_unsafe (iter);
}
return 0;
}
/*
*--------------------------------------------------------------------------
*
* bson_iter_timestamp --
*
* Fetches the current field if it is a BSON_TYPE_TIMESTAMP.
*
* Parameters:
* @iter: A #bson_iter_t.
* @timestamp: a location for the timestamp.
* @increment: A location for the increment.
*
* Returns:
* None.
*
* Side effects:
* @timestamp is initialized.
* @increment is initialized.
*
*--------------------------------------------------------------------------
*/
void
bson_iter_timestamp (const bson_iter_t *iter, /* IN */
uint32_t *timestamp, /* OUT */
uint32_t *increment) /* OUT */
{
uint64_t encoded;
uint32_t ret_timestamp = 0;
uint32_t ret_increment = 0;
BSON_ASSERT (iter);
if (ITER_TYPE (iter) == BSON_TYPE_TIMESTAMP) {
memcpy (&encoded, iter->raw + iter->d1, sizeof (encoded));
encoded = BSON_UINT64_FROM_LE (encoded);
ret_timestamp = (encoded >> 32) & 0xFFFFFFFF;
ret_increment = encoded & 0xFFFFFFFF;
}
if (timestamp) {
*timestamp = ret_timestamp;
}
if (increment) {
*increment = ret_increment;
}
}
/*
*--------------------------------------------------------------------------
*
* bson_iter_timeval --
*
* Retrieves the current field of type BSON_TYPE_DATE_TIME and stores
* it into the struct timeval provided. tv->tv_sec is set to the
* number of seconds since the UNIX epoch in UTC.
*
* Since BSON_TYPE_DATE_TIME does not support fractions of a second,
* tv->tv_usec will always be set to zero.
*
* Returns:
* None.
*
* Side effects:
* @tv is initialized.
*
*--------------------------------------------------------------------------
*/
void
bson_iter_timeval (const bson_iter_t *iter, /* IN */
struct timeval *tv) /* OUT */
{
BSON_ASSERT (iter);
if (ITER_TYPE (iter) == BSON_TYPE_DATE_TIME) {
bson_iter_timeval_unsafe (iter, tv);
return;
}
memset (tv, 0, sizeof *tv);
}
/**
* bson_iter_document:
* @iter: a bson_iter_t.
* @document_len: A location for the document length.
* @document: A location for a pointer to the document buffer.
*
*/
/*
*--------------------------------------------------------------------------
*
* bson_iter_document --
*
* Retrieves the data to the document BSON structure and stores the
* length of the document buffer in @document_len and the document
* buffer in @document.
*
* If you would like to iterate over the child contents, you might
* consider creating a bson_t on the stack such as the following. It
* allows you to call functions taking a const bson_t* only.
*
* bson_t b;
* uint32_t len;
* const uint8_t *data;
*
* bson_iter_document(iter, &len, &data);
*
* if (bson_init_static (&b, data, len)) {
* ...
* }
*
* There is no need to cleanup the bson_t structure as no data can be
* modified in the process of its use (as it is static/const).
*
* Returns:
* None.
*
* Side effects:
* @document_len is initialized.
* @document is initialized.
*
*--------------------------------------------------------------------------
*/
void
bson_iter_document (const bson_iter_t *iter, /* IN */
uint32_t *document_len, /* OUT */
const uint8_t **document) /* OUT */
{
BSON_ASSERT (iter);
BSON_ASSERT (document_len);
BSON_ASSERT (document);
*document = NULL;
*document_len = 0;
if (ITER_TYPE (iter) == BSON_TYPE_DOCUMENT) {
memcpy (document_len, (iter->raw + iter->d1), sizeof (*document_len));
*document_len = BSON_UINT32_FROM_LE (*document_len);
*document = (iter->raw + iter->d1);
}
}
/**
* bson_iter_array:
* @iter: a #bson_iter_t.
* @array_len: A location for the array length.
* @array: A location for a pointer to the array buffer.
*/
/*
*--------------------------------------------------------------------------
*
* bson_iter_array --
*
* Retrieves the data to the array BSON structure and stores the
* length of the array buffer in @array_len and the array buffer in
* @array.
*
* If you would like to iterate over the child contents, you might
* consider creating a bson_t on the stack such as the following. It
* allows you to call functions taking a const bson_t* only.
*
* bson_t b;
* uint32_t len;
* const uint8_t *data;
*
* bson_iter_array (iter, &len, &data);
*
* if (bson_init_static (&b, data, len)) {
* ...
* }
*
* There is no need to cleanup the #bson_t structure as no data can be
* modified in the process of its use.
*
* Returns:
* None.
*
* Side effects:
* @array_len is initialized.
* @array is initialized.
*
*--------------------------------------------------------------------------
*/
void
bson_iter_array (const bson_iter_t *iter, /* IN */
uint32_t *array_len, /* OUT */
const uint8_t **array) /* OUT */
{
BSON_ASSERT (iter);
BSON_ASSERT (array_len);
BSON_ASSERT (array);
*array = NULL;
*array_len = 0;
if (ITER_TYPE (iter) == BSON_TYPE_ARRAY) {
memcpy (array_len, (iter->raw + iter->d1), sizeof (*array_len));
*array_len = BSON_UINT32_FROM_LE (*array_len);
*array = (iter->raw + iter->d1);
}
}
#define VISIT_FIELD(name) visitor->visit_##name && visitor->visit_##name
#define VISIT_AFTER VISIT_FIELD (after)
#define VISIT_BEFORE VISIT_FIELD (before)
#define VISIT_CORRUPT \
if (visitor->visit_corrupt) \
visitor->visit_corrupt
#define VISIT_DOUBLE VISIT_FIELD (double)
#define VISIT_UTF8 VISIT_FIELD (utf8)
#define VISIT_DOCUMENT VISIT_FIELD (document)
#define VISIT_ARRAY VISIT_FIELD (array)
#define VISIT_BINARY VISIT_FIELD (binary)
#define VISIT_UNDEFINED VISIT_FIELD (undefined)
#define VISIT_OID VISIT_FIELD (oid)
#define VISIT_BOOL VISIT_FIELD (bool)
#define VISIT_DATE_TIME VISIT_FIELD (date_time)
#define VISIT_NULL VISIT_FIELD (null)
#define VISIT_REGEX VISIT_FIELD (regex)
#define VISIT_DBPOINTER VISIT_FIELD (dbpointer)
#define VISIT_CODE VISIT_FIELD (code)
#define VISIT_SYMBOL VISIT_FIELD (symbol)
#define VISIT_CODEWSCOPE VISIT_FIELD (codewscope)
#define VISIT_INT32 VISIT_FIELD (int32)
#define VISIT_TIMESTAMP VISIT_FIELD (timestamp)
#define VISIT_INT64 VISIT_FIELD (int64)
#define VISIT_DECIMAL128 VISIT_FIELD (decimal128)
#define VISIT_MAXKEY VISIT_FIELD (maxkey)
#define VISIT_MINKEY VISIT_FIELD (minkey)
/**
* bson_iter_visit_all:
* @iter: A #bson_iter_t.
* @visitor: A #bson_visitor_t containing the visitors.
* @data: User data for @visitor data parameters.
*
*
* Returns: true if the visitor was pre-maturely ended; otherwise false.
*/
/*
*--------------------------------------------------------------------------
*
* bson_iter_visit_all --
*
* Visits all fields forward from the current position of @iter. For
* each field found a function in @visitor will be called. Typically
* you will use this immediately after initializing a bson_iter_t.
*
* bson_iter_init (&iter, b);
* bson_iter_visit_all (&iter, my_visitor, NULL);
*
* @iter will no longer be valid after this function has executed and
* will need to be reinitialized if intending to reuse.
*
* Returns:
* true if successfully visited all fields or callback requested
* early termination, otherwise false.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
bool
bson_iter_visit_all (bson_iter_t *iter, /* INOUT */
const bson_visitor_t *visitor, /* IN */
void *data) /* IN */
{
uint32_t bson_type;
const char *key;
bool unsupported;
BSON_ASSERT (iter);
BSON_ASSERT (visitor);
while (_bson_iter_next_internal (iter, &key, &bson_type, &unsupported)) {
if (*key && !bson_utf8_validate (key, strlen (key), false)) {
iter->err_off = iter->off;
break;
}
if (VISIT_BEFORE (iter, key, data)) {
return true;
}
switch (bson_type) {
case BSON_TYPE_DOUBLE:
if (VISIT_DOUBLE (iter, key, bson_iter_double (iter), data)) {
return true;
}
break;
case BSON_TYPE_UTF8: {
uint32_t utf8_len;
const char *utf8;
utf8 = bson_iter_utf8 (iter, &utf8_len);
if (!bson_utf8_validate (utf8, utf8_len, true)) {
iter->err_off = iter->off;
return true;
}
if (VISIT_UTF8 (iter, key, utf8_len, utf8, data)) {
return true;
}
} break;
case BSON_TYPE_DOCUMENT: {
const uint8_t *docbuf = NULL;
uint32_t doclen = 0;
bson_t b;
bson_iter_document (iter, &doclen, &docbuf);
if (bson_init_static (&b, docbuf, doclen) &&
VISIT_DOCUMENT (iter, key, &b, data)) {
return true;
}
} break;
case BSON_TYPE_ARRAY: {
const uint8_t *docbuf = NULL;
uint32_t doclen = 0;
bson_t b;
bson_iter_array (iter, &doclen, &docbuf);
if (bson_init_static (&b, docbuf, doclen) &&
VISIT_ARRAY (iter, key, &b, data)) {
return true;
}
} break;
case BSON_TYPE_BINARY: {
const uint8_t *binary = NULL;
bson_subtype_t subtype = BSON_SUBTYPE_BINARY;
uint32_t binary_len = 0;
bson_iter_binary (iter, &subtype, &binary_len, &binary);
if (VISIT_BINARY (iter, key, subtype, binary_len, binary, data)) {
return true;
}
} break;
case BSON_TYPE_UNDEFINED:
if (VISIT_UNDEFINED (iter, key, data)) {
return true;
}
break;
case BSON_TYPE_OID:
if (VISIT_OID (iter, key, bson_iter_oid (iter), data)) {
return true;
}
break;
case BSON_TYPE_BOOL:
if (VISIT_BOOL (iter, key, bson_iter_bool (iter), data)) {
return true;
}
break;
case BSON_TYPE_DATE_TIME:
if (VISIT_DATE_TIME (iter, key, bson_iter_date_time (iter), data)) {
return true;
}
break;
case BSON_TYPE_NULL:
if (VISIT_NULL (iter, key, data)) {
return true;
}
break;
case BSON_TYPE_REGEX: {
const char *regex = NULL;
const char *options = NULL;
regex = bson_iter_regex (iter, &options);
if (!bson_utf8_validate (regex, strlen (regex), true)) {
iter->err_off = iter->off;
return true;
}
if (VISIT_REGEX (iter, key, regex, options, data)) {
return true;
}
} break;
case BSON_TYPE_DBPOINTER: {
uint32_t collection_len = 0;
const char *collection = NULL;
const bson_oid_t *oid = NULL;
bson_iter_dbpointer (iter, &collection_len, &collection, &oid);
if (!bson_utf8_validate (collection, collection_len, true)) {
iter->err_off = iter->off;
return true;
}
if (VISIT_DBPOINTER (
iter, key, collection_len, collection, oid, data)) {
return true;
}
} break;
case BSON_TYPE_CODE: {
uint32_t code_len;
const char *code;
code = bson_iter_code (iter, &code_len);
if (!bson_utf8_validate (code, code_len, true)) {
iter->err_off = iter->off;
return true;
}
if (VISIT_CODE (iter, key, code_len, code, data)) {
return true;
}
} break;
case BSON_TYPE_SYMBOL: {
uint32_t symbol_len;
const char *symbol;
symbol = bson_iter_symbol (iter, &symbol_len);
if (!bson_utf8_validate (symbol, symbol_len, true)) {
iter->err_off = iter->off;
return true;
}
if (VISIT_SYMBOL (iter, key, symbol_len, symbol, data)) {
return true;
}
} break;
case BSON_TYPE_CODEWSCOPE: {
uint32_t length = 0;
const char *code;
const uint8_t *docbuf = NULL;
uint32_t doclen = 0;
bson_t b;
code = bson_iter_codewscope (iter, &length, &doclen, &docbuf);
if (!bson_utf8_validate (code, length, true)) {
iter->err_off = iter->off;
return true;
}
if (bson_init_static (&b, docbuf, doclen) &&
VISIT_CODEWSCOPE (iter, key, length, code, &b, data)) {
return true;
}
} break;
case BSON_TYPE_INT32:
if (VISIT_INT32 (iter, key, bson_iter_int32 (iter), data)) {
return true;
}
break;
case BSON_TYPE_TIMESTAMP: {
uint32_t timestamp;
uint32_t increment;
bson_iter_timestamp (iter, &timestamp, &increment);
if (VISIT_TIMESTAMP (iter, key, timestamp, increment, data)) {
return true;
}
} break;
case BSON_TYPE_INT64:
if (VISIT_INT64 (iter, key, bson_iter_int64 (iter), data)) {
return true;
}
break;
case BSON_TYPE_DECIMAL128: {
bson_decimal128_t dec;
bson_iter_decimal128 (iter, &dec);
if (VISIT_DECIMAL128 (iter, key, &dec, data)) {
return true;
}
} break;
case BSON_TYPE_MAXKEY:
if (VISIT_MAXKEY (iter, bson_iter_key_unsafe (iter), data)) {
return true;
}
break;
case BSON_TYPE_MINKEY:
if (VISIT_MINKEY (iter, bson_iter_key_unsafe (iter), data)) {
return true;
}
break;
case BSON_TYPE_EOD:
default:
break;
}
if (VISIT_AFTER (iter, bson_iter_key_unsafe (iter), data)) {
return true;
}
}
if (iter->err_off) {
if (unsupported && visitor->visit_unsupported_type &&
bson_utf8_validate (key, strlen (key), false)) {
visitor->visit_unsupported_type (iter, key, bson_type, data);
return false;
}
VISIT_CORRUPT (iter, data);
}
#undef VISIT_FIELD
return false;
}
/*
*--------------------------------------------------------------------------
*
* bson_iter_overwrite_bool --
*
* Overwrites the current BSON_TYPE_BOOLEAN field with a new value.
* This is performed in-place and therefore no keys are moved.
*
* Returns:
* None.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
void
bson_iter_overwrite_bool (bson_iter_t *iter, /* IN */
bool value) /* IN */
{
BSON_ASSERT (iter);
value = !!value;
if (ITER_TYPE (iter) == BSON_TYPE_BOOL) {
memcpy ((void *) (iter->raw + iter->d1), &value, 1);
}
}
/*
*--------------------------------------------------------------------------
*
* bson_iter_overwrite_int32 --
*
* Overwrites the current BSON_TYPE_INT32 field with a new value.
* This is performed in-place and therefore no keys are moved.
*
* Returns:
* None.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
void
bson_iter_overwrite_int32 (bson_iter_t *iter, /* IN */
int32_t value) /* IN */
{
BSON_ASSERT (iter);
if (ITER_TYPE (iter) == BSON_TYPE_INT32) {
#if BSON_BYTE_ORDER != BSON_LITTLE_ENDIAN
value = BSON_UINT32_TO_LE (value);
#endif
memcpy ((void *) (iter->raw + iter->d1), &value, sizeof (value));
}
}
/*
*--------------------------------------------------------------------------
*
* bson_iter_overwrite_int64 --
*
* Overwrites the current BSON_TYPE_INT64 field with a new value.
* This is performed in-place and therefore no keys are moved.
*
* Returns:
* None.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
void
bson_iter_overwrite_int64 (bson_iter_t *iter, /* IN */
int64_t value) /* IN */
{
BSON_ASSERT (iter);
if (ITER_TYPE (iter) == BSON_TYPE_INT64) {
#if BSON_BYTE_ORDER != BSON_LITTLE_ENDIAN
value = BSON_UINT64_TO_LE (value);
#endif
memcpy ((void *) (iter->raw + iter->d1), &value, sizeof (value));
}
}
/*
*--------------------------------------------------------------------------
*
* bson_iter_overwrite_double --
*
* Overwrites the current BSON_TYPE_DOUBLE field with a new value.
* This is performed in-place and therefore no keys are moved.
*
* Returns:
* None.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
void
bson_iter_overwrite_double (bson_iter_t *iter, /* IN */
double value) /* IN */
{
BSON_ASSERT (iter);
if (ITER_TYPE (iter) == BSON_TYPE_DOUBLE) {
value = BSON_DOUBLE_TO_LE (value);
memcpy ((void *) (iter->raw + iter->d1), &value, sizeof (value));
}
}
/*
*--------------------------------------------------------------------------
*
* bson_iter_overwrite_decimal128 --
*
* Overwrites the current BSON_TYPE_DECIMAL128 field with a new value.
* This is performed in-place and therefore no keys are moved.
*
* Returns:
* None.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
void
bson_iter_overwrite_decimal128 (bson_iter_t *iter, /* IN */
bson_decimal128_t *value) /* IN */
{
BSON_ASSERT (iter);
if (ITER_TYPE (iter) == BSON_TYPE_DECIMAL128) {
#if BSON_BYTE_ORDER != BSON_LITTLE_ENDIAN
uint64_t data[2];
data[0] = BSON_UINT64_TO_LE (value->low);
data[1] = BSON_UINT64_TO_LE (value->high);
memcpy ((void *) (iter->raw + iter->d1), data, sizeof (data));
#else
memcpy ((void *) (iter->raw + iter->d1), value, sizeof (*value));
#endif
}
}
/*
*--------------------------------------------------------------------------
*
* bson_iter_value --
*
* Retrieves a bson_value_t containing the boxed value of the current
* element. The result of this function valid until the state of
* iter has been changed (through the use of bson_iter_next()).
*
* Returns:
* A bson_value_t that should not be modified or freed. If you need
* to hold on to the value, use bson_value_copy().
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
const bson_value_t *
bson_iter_value (bson_iter_t *iter) /* IN */
{
bson_value_t *value;
BSON_ASSERT (iter);
value = &iter->value;
value->value_type = ITER_TYPE (iter);
switch (value->value_type) {
case BSON_TYPE_DOUBLE:
value->value.v_double = bson_iter_double (iter);
break;
case BSON_TYPE_UTF8:
value->value.v_utf8.str =
(char *) bson_iter_utf8 (iter, &value->value.v_utf8.len);
break;
case BSON_TYPE_DOCUMENT:
bson_iter_document (iter,
&value->value.v_doc.data_len,
(const uint8_t **) &value->value.v_doc.data);
break;
case BSON_TYPE_ARRAY:
bson_iter_array (iter,
&value->value.v_doc.data_len,
(const uint8_t **) &value->value.v_doc.data);
break;
case BSON_TYPE_BINARY:
bson_iter_binary (iter,
&value->value.v_binary.subtype,
&value->value.v_binary.data_len,
(const uint8_t **) &value->value.v_binary.data);
break;
case BSON_TYPE_OID:
bson_oid_copy (bson_iter_oid (iter), &value->value.v_oid);
break;
case BSON_TYPE_BOOL:
value->value.v_bool = bson_iter_bool (iter);
break;
case BSON_TYPE_DATE_TIME:
value->value.v_datetime = bson_iter_date_time (iter);
break;
case BSON_TYPE_REGEX:
value->value.v_regex.regex = (char *) bson_iter_regex (
iter, (const char **) &value->value.v_regex.options);
break;
case BSON_TYPE_DBPOINTER: {
const bson_oid_t *oid;
bson_iter_dbpointer (iter,
&value->value.v_dbpointer.collection_len,
(const char **) &value->value.v_dbpointer.collection,
&oid);
bson_oid_copy (oid, &value->value.v_dbpointer.oid);
break;
}
case BSON_TYPE_CODE:
value->value.v_code.code =
(char *) bson_iter_code (iter, &value->value.v_code.code_len);
break;
case BSON_TYPE_SYMBOL:
value->value.v_symbol.symbol =
(char *) bson_iter_symbol (iter, &value->value.v_symbol.len);
break;
case BSON_TYPE_CODEWSCOPE:
value->value.v_codewscope.code = (char *) bson_iter_codewscope (
iter,
&value->value.v_codewscope.code_len,
&value->value.v_codewscope.scope_len,
(const uint8_t **) &value->value.v_codewscope.scope_data);
break;
case BSON_TYPE_INT32:
value->value.v_int32 = bson_iter_int32 (iter);
break;
case BSON_TYPE_TIMESTAMP:
bson_iter_timestamp (iter,
&value->value.v_timestamp.timestamp,
&value->value.v_timestamp.increment);
break;
case BSON_TYPE_INT64:
value->value.v_int64 = bson_iter_int64 (iter);
break;
case BSON_TYPE_DECIMAL128:
bson_iter_decimal128 (iter, &(value->value.v_decimal128));
break;
case BSON_TYPE_NULL:
case BSON_TYPE_UNDEFINED:
case BSON_TYPE_MAXKEY:
case BSON_TYPE_MINKEY:
break;
case BSON_TYPE_EOD:
default:
return NULL;
}
return value;
}
diff --git a/mongodb-1.3.4/src/libbson/src/bson/bson-iter.h b/mongodb-1.4.2/src/libbson/src/bson/bson-iter.h
similarity index 100%
rename from mongodb-1.3.4/src/libbson/src/bson/bson-iter.h
rename to mongodb-1.4.2/src/libbson/src/bson/bson-iter.h
diff --git a/mongodb-1.3.4/src/libbson/src/bson/bson-json.c b/mongodb-1.4.2/src/libbson/src/bson/bson-json.c
similarity index 99%
rename from mongodb-1.3.4/src/libbson/src/bson/bson-json.c
rename to mongodb-1.4.2/src/libbson/src/bson/bson-json.c
index 88b0eeaa..913b9080 100644
--- a/mongodb-1.3.4/src/libbson/src/bson/bson-json.c
+++ b/mongodb-1.4.2/src/libbson/src/bson/bson-json.c
@@ -1,2377 +1,2373 @@
/*
* Copyright 2014 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <errno.h>
#include <fcntl.h>
#include <sys/types.h>
#include <math.h>
#include "bson.h"
#include "bson-config.h"
#include "bson-json.h"
#include "bson-iso8601-private.h"
#include "b64_pton.h"
#include "jsonsl/jsonsl.h"
#ifdef _WIN32
#include <io.h>
#include <share.h>
#endif
#ifndef _MSC_VER
#include <strings.h>
#endif
#ifdef _MSC_VER
#define SSCANF sscanf_s
#else
#define SSCANF sscanf
#endif
#define STACK_MAX 100
#define BSON_JSON_DEFAULT_BUF_SIZE (1 << 14)
#define AT_LEAST_0(x) ((x) >= 0 ? (x) : 0)
#define READ_STATE_ENUM(ENUM) BSON_JSON_##ENUM,
#define GENERATE_STRING(STRING) #STRING,
#define FOREACH_READ_STATE(RS) \
RS (REGULAR) \
RS (DONE) \
RS (ERROR) \
RS (IN_START_MAP) \
RS (IN_BSON_TYPE) \
RS (IN_BSON_TYPE_DATE_NUMBERLONG) \
RS (IN_BSON_TYPE_DATE_ENDMAP) \
RS (IN_BSON_TYPE_TIMESTAMP_STARTMAP) \
RS (IN_BSON_TYPE_TIMESTAMP_VALUES) \
RS (IN_BSON_TYPE_TIMESTAMP_ENDMAP) \
RS (IN_BSON_TYPE_REGEX_STARTMAP) \
RS (IN_BSON_TYPE_REGEX_VALUES) \
RS (IN_BSON_TYPE_REGEX_ENDMAP) \
RS (IN_BSON_TYPE_BINARY_VALUES) \
RS (IN_BSON_TYPE_BINARY_ENDMAP) \
RS (IN_BSON_TYPE_SCOPE_STARTMAP) \
RS (IN_BSON_TYPE_DBPOINTER_STARTMAP) \
RS (IN_SCOPE) \
RS (IN_DBPOINTER)
typedef enum { FOREACH_READ_STATE (READ_STATE_ENUM) } bson_json_read_state_t;
static const char *read_state_names[] = {FOREACH_READ_STATE (GENERATE_STRING)};
#define BSON_STATE_ENUM(ENUM) BSON_JSON_LF_##ENUM,
#define FOREACH_BSON_STATE(BS) \
/* legacy {$regex: "...", $options: "..."} */ \
BS (REGEX) \
BS (OPTIONS) \
/* modern $regularExpression: {pattern: "...", options: "..."} */ \
BS (REGULAR_EXPRESSION_PATTERN) \
BS (REGULAR_EXPRESSION_OPTIONS) \
BS (CODE) \
BS (SCOPE) \
BS (OID) \
BS (BINARY) \
BS (TYPE) \
BS (DATE) \
BS (TIMESTAMP_T) \
BS (TIMESTAMP_I) \
BS (UNDEFINED) \
BS (MINKEY) \
BS (MAXKEY) \
BS (INT32) \
BS (INT64) \
BS (DOUBLE) \
BS (DECIMAL128) \
BS (DBPOINTER) \
BS (SYMBOL) \
BS (DBREF)
typedef enum {
FOREACH_BSON_STATE (BSON_STATE_ENUM)
} bson_json_read_bson_state_t;
static const char *bson_state_names[] = {FOREACH_BSON_STATE (GENERATE_STRING)};
typedef struct {
uint8_t *buf;
size_t n_bytes;
size_t len;
} bson_json_buf_t;
typedef enum {
BSON_JSON_FRAME_ARRAY,
BSON_JSON_FRAME_DOC,
BSON_JSON_FRAME_SCOPE,
BSON_JSON_FRAME_DBPOINTER,
} bson_json_frame_type_t;
typedef struct {
int i;
bson_json_frame_type_t type;
bool has_ref;
bool has_id;
bson_t bson;
} bson_json_stack_frame_t;
typedef union {
struct {
bool has_pattern;
bool has_options;
bool is_legacy;
} regex;
struct {
bool has_oid;
bson_oid_t oid;
} oid;
struct {
bool has_binary;
bool has_subtype;
bson_subtype_t type;
bool is_legacy;
} binary;
struct {
bool has_date;
int64_t date;
} date;
struct {
bool has_t;
bool has_i;
uint32_t t;
uint32_t i;
} timestamp;
struct {
bool has_undefined;
} undefined;
struct {
bool has_minkey;
} minkey;
struct {
bool has_maxkey;
} maxkey;
struct {
int32_t value;
} v_int32;
struct {
int64_t value;
} v_int64;
struct {
double value;
} v_double;
struct {
bson_decimal128_t value;
} v_decimal128;
} bson_json_bson_data_t;
/* collect info while parsing a {$code: "...", $scope: {...}} object */
typedef struct {
bool has_code;
bool has_scope;
bool in_scope;
bson_json_buf_t key_buf;
bson_json_buf_t code_buf;
} bson_json_code_t;
static void
_bson_json_code_cleanup (bson_json_code_t *code_data)
{
bson_free (code_data->key_buf.buf);
bson_free (code_data->code_buf.buf);
}
typedef struct {
bson_t *bson;
bson_json_stack_frame_t stack[STACK_MAX];
int n;
const char *key;
bson_json_buf_t key_buf;
bson_json_buf_t unescaped;
bson_json_read_state_t read_state;
bson_json_read_bson_state_t bson_state;
bson_type_t bson_type;
bson_json_buf_t bson_type_buf[3];
bson_json_bson_data_t bson_type_data;
bson_json_code_t code_data;
bson_json_buf_t dbpointer_key;
} bson_json_reader_bson_t;
typedef struct {
void *data;
bson_json_reader_cb cb;
bson_json_destroy_cb dcb;
uint8_t *buf;
size_t buf_size;
size_t bytes_read;
size_t bytes_parsed;
bool all_whitespace;
} bson_json_reader_producer_t;
struct _bson_json_reader_t {
bson_json_reader_producer_t producer;
bson_json_reader_bson_t bson;
jsonsl_t json;
ssize_t json_text_pos;
bool should_reset;
ssize_t advance;
bson_json_buf_t tok_accumulator;
bson_error_t *error;
};
typedef struct {
int fd;
bool do_close;
} bson_json_reader_handle_fd_t;
/* forward decl */
static void
_bson_json_save_map_key (bson_json_reader_bson_t *bson,
const uint8_t *val,
size_t len);
static void
_noop (void)
{
}
#define STACK_ELE(_delta, _name) (bson->stack[(_delta) + bson->n]._name)
#define STACK_BSON(_delta) \
(((_delta) + bson->n) == 0 ? bson->bson : &STACK_ELE (_delta, bson))
#define STACK_BSON_PARENT STACK_BSON (-1)
#define STACK_BSON_CHILD STACK_BSON (0)
#define STACK_I STACK_ELE (0, i)
#define STACK_FRAME_TYPE STACK_ELE (0, type)
#define STACK_IS_ARRAY (STACK_FRAME_TYPE == BSON_JSON_FRAME_ARRAY)
#define STACK_IS_DOC (STACK_FRAME_TYPE == BSON_JSON_FRAME_DOC)
#define STACK_IS_SCOPE (STACK_FRAME_TYPE == BSON_JSON_FRAME_SCOPE)
#define STACK_IS_DBPOINTER (STACK_FRAME_TYPE == BSON_JSON_FRAME_DBPOINTER)
#define STACK_HAS_REF STACK_ELE (0, has_ref)
#define STACK_HAS_ID STACK_ELE (0, has_id)
#define STACK_PUSH_ARRAY(statement) \
do { \
if (bson->n >= (STACK_MAX - 1)) { \
return; \
} \
bson->n++; \
STACK_I = 0; \
STACK_FRAME_TYPE = BSON_JSON_FRAME_ARRAY; \
if (bson->n != 0) { \
statement; \
} \
} while (0)
#define STACK_PUSH_DOC(statement) \
do { \
if (bson->n >= (STACK_MAX - 1)) { \
return; \
} \
bson->n++; \
STACK_FRAME_TYPE = BSON_JSON_FRAME_DOC; \
STACK_HAS_REF = false; \
STACK_HAS_ID = false; \
if (bson->n != 0) { \
statement; \
} \
} while (0)
#define STACK_PUSH_SCOPE(statement) \
do { \
if (bson->n >= (STACK_MAX - 1)) { \
return; \
} \
bson->n++; \
STACK_FRAME_TYPE = BSON_JSON_FRAME_SCOPE; \
bson->code_data.in_scope = true; \
if (bson->n != 0) { \
statement; \
} \
} while (0)
#define STACK_PUSH_DBPOINTER(statement) \
do { \
if (bson->n >= (STACK_MAX - 1)) { \
return; \
} \
bson->n++; \
STACK_FRAME_TYPE = BSON_JSON_FRAME_DBPOINTER; \
if (bson->n != 0) { \
statement; \
} \
} while (0)
#define STACK_POP_ARRAY(statement) \
do { \
if (!STACK_IS_ARRAY) { \
return; \
} \
if (bson->n < 0) { \
return; \
} \
if (bson->n > 0) { \
statement; \
} \
bson->n--; \
} while (0)
#define STACK_POP_DOC(statement) \
do { \
if (STACK_IS_ARRAY) { \
return; \
} \
if (bson->n < 0) { \
return; \
} \
if (bson->n > 0) { \
statement; \
} \
bson->n--; \
} while (0)
#define STACK_POP_SCOPE \
do { \
STACK_POP_DOC (_noop ()); \
bson->code_data.in_scope = false; \
} while (0);
#define STACK_POP_DBPOINTER STACK_POP_DOC (_noop ())
#define BASIC_CB_PREAMBLE \
const char *key; \
size_t len; \
bson_json_reader_bson_t *bson = &reader->bson; \
_bson_json_read_fixup_key (bson); \
key = bson->key; \
len = bson->key_buf.len;
#define BASIC_CB_BAIL_IF_NOT_NORMAL(_type) \
if (bson->read_state != BSON_JSON_REGULAR) { \
_bson_json_read_set_error (reader, \
"Invalid read of %s in state %s", \
(_type), \
read_state_names[bson->read_state]); \
return; \
} else if (!key) { \
_bson_json_read_set_error (reader, \
"Invalid read of %s without key in state %s", \
(_type), \
read_state_names[bson->read_state]); \
return; \
}
#define HANDLE_OPTION(_key, _type, _state) \
(len == strlen (_key) && strncmp ((const char *) val, (_key), len) == 0) \
{ \
if (bson->bson_type && bson->bson_type != (_type)) { \
_bson_json_read_set_error (reader, \
"Invalid key \"%s\". Looking for values " \
"for type \"%s\", got \"%s\"", \
(_key), \
_bson_json_type_name (bson->bson_type), \
_bson_json_type_name (_type)); \
return; \
} \
bson->bson_type = (_type); \
bson->bson_state = (_state); \
}
static void
_bson_json_read_set_error (bson_json_reader_t *reader, const char *fmt, ...)
BSON_GNUC_PRINTF (2, 3);
static void
_bson_json_read_set_error (bson_json_reader_t *reader, /* IN */
const char *fmt, /* IN */
...)
{
va_list ap;
if (reader->error) {
reader->error->domain = BSON_ERROR_JSON;
reader->error->code = BSON_JSON_ERROR_READ_INVALID_PARAM;
va_start (ap, fmt);
bson_vsnprintf (
reader->error->message, sizeof reader->error->message, fmt, ap);
va_end (ap);
reader->error->message[sizeof reader->error->message - 1] = '\0';
}
reader->bson.read_state = BSON_JSON_ERROR;
jsonsl_stop (reader->json);
}
static void
_bson_json_read_corrupt (bson_json_reader_t *reader, const char *fmt, ...)
BSON_GNUC_PRINTF (2, 3);
static void
_bson_json_read_corrupt (bson_json_reader_t *reader, /* IN */
const char *fmt, /* IN */
...)
{
va_list ap;
if (reader->error) {
reader->error->domain = BSON_ERROR_JSON;
reader->error->code = BSON_JSON_ERROR_READ_CORRUPT_JS;
va_start (ap, fmt);
bson_vsnprintf (
reader->error->message, sizeof reader->error->message, fmt, ap);
va_end (ap);
reader->error->message[sizeof reader->error->message - 1] = '\0';
}
reader->bson.read_state = BSON_JSON_ERROR;
jsonsl_stop (reader->json);
}
static void
_bson_json_buf_ensure (bson_json_buf_t *buf, /* IN */
size_t len) /* IN */
{
if (buf->n_bytes < len) {
bson_free (buf->buf);
buf->n_bytes = bson_next_power_of_two (len);
buf->buf = bson_malloc (buf->n_bytes);
}
}
static void
_bson_json_buf_set (bson_json_buf_t *buf, const void *from, size_t len)
{
_bson_json_buf_ensure (buf, len + 1);
memcpy (buf->buf, from, len);
buf->buf[len] = '\0';
buf->len = len;
}
static void
_bson_json_buf_append (bson_json_buf_t *buf, const void *from, size_t len)
{
size_t len_with_null = len + 1;
if (buf->len == 0) {
_bson_json_buf_ensure (buf, len_with_null);
} else if (buf->n_bytes < buf->len + len_with_null) {
buf->n_bytes = bson_next_power_of_two (buf->len + len_with_null);
buf->buf = bson_realloc (buf->buf, buf->n_bytes);
}
memcpy (buf->buf + buf->len, from, len);
buf->len += len;
buf->buf[buf->len] = '\0';
}
static const char *
_bson_json_type_name (bson_type_t type)
{
switch (type) {
case BSON_TYPE_EOD:
return "end of document";
case BSON_TYPE_DOUBLE:
return "double";
case BSON_TYPE_UTF8:
return "utf-8";
case BSON_TYPE_DOCUMENT:
return "document";
case BSON_TYPE_ARRAY:
return "array";
case BSON_TYPE_BINARY:
return "binary";
case BSON_TYPE_UNDEFINED:
return "undefined";
case BSON_TYPE_OID:
return "objectid";
case BSON_TYPE_BOOL:
return "bool";
case BSON_TYPE_DATE_TIME:
return "datetime";
case BSON_TYPE_NULL:
return "null";
case BSON_TYPE_REGEX:
return "regex";
case BSON_TYPE_DBPOINTER:
return "dbpointer";
case BSON_TYPE_CODE:
return "code";
case BSON_TYPE_SYMBOL:
return "symbol";
case BSON_TYPE_CODEWSCOPE:
return "code with scope";
case BSON_TYPE_INT32:
return "int32";
case BSON_TYPE_TIMESTAMP:
return "timestamp";
case BSON_TYPE_INT64:
return "int64";
case BSON_TYPE_DECIMAL128:
return "decimal128";
case BSON_TYPE_MAXKEY:
return "maxkey";
case BSON_TYPE_MINKEY:
return "minkey";
default:
return "";
}
}
static void
_bson_json_read_fixup_key (bson_json_reader_bson_t *bson) /* IN */
{
bson_json_read_state_t rs = bson->read_state;
if (bson->n >= 0 && STACK_IS_ARRAY && rs == BSON_JSON_REGULAR) {
_bson_json_buf_ensure (&bson->key_buf, 12);
bson->key_buf.len = bson_uint32_to_string (
STACK_I, &bson->key, (char *) bson->key_buf.buf, 12);
STACK_I++;
}
}
static void
_bson_json_read_null (bson_json_reader_t *reader)
{
BASIC_CB_PREAMBLE;
BASIC_CB_BAIL_IF_NOT_NORMAL ("null");
bson_append_null (STACK_BSON_CHILD, key, (int) len);
}
static void
_bson_json_read_boolean (bson_json_reader_t *reader, /* IN */
int val) /* IN */
{
BASIC_CB_PREAMBLE;
if (bson->read_state == BSON_JSON_IN_BSON_TYPE &&
bson->bson_state == BSON_JSON_LF_UNDEFINED) {
bson->bson_type_data.undefined.has_undefined = true;
return;
}
BASIC_CB_BAIL_IF_NOT_NORMAL ("boolean");
bson_append_bool (STACK_BSON_CHILD, key, (int) len, val);
}
/* sign is -1 or 1 */
static void
_bson_json_read_integer (bson_json_reader_t *reader, uint64_t val, int64_t sign)
{
bson_json_read_state_t rs;
bson_json_read_bson_state_t bs;
BASIC_CB_PREAMBLE;
if (sign == 1 && val > INT64_MAX) {
_bson_json_read_set_error (
reader, "Number \"%" PRIu64 "\" is out of range", val);
return;
} else if (sign == -1 && val > ((uint64_t) INT64_MAX + 1)) {
_bson_json_read_set_error (
reader, "Number \"-%" PRIu64 "\" is out of range", val);
return;
}
rs = bson->read_state;
bs = bson->bson_state;
if (rs == BSON_JSON_REGULAR) {
BASIC_CB_BAIL_IF_NOT_NORMAL ("integer");
if (val <= INT32_MAX || (sign == -1 && val <= (uint64_t) INT32_MAX + 1)) {
bson_append_int32 (
STACK_BSON_CHILD, key, (int) len, (int) (val * sign));
} else if (sign == -1) {
bson_append_int64 (STACK_BSON_CHILD, key, (int) len, (int64_t) -val);
} else {
bson_append_int64 (STACK_BSON_CHILD, key, (int) len, (int64_t) val);
}
} else if (rs == BSON_JSON_IN_BSON_TYPE ||
rs == BSON_JSON_IN_BSON_TYPE_TIMESTAMP_VALUES) {
switch (bs) {
case BSON_JSON_LF_DATE:
bson->bson_type_data.date.has_date = true;
bson->bson_type_data.date.date = sign * val;
break;
case BSON_JSON_LF_TIMESTAMP_T:
if (sign == -1) {
_bson_json_read_set_error (
reader, "Invalid timestamp value: \"-%" PRIu64 "\"", val);
return;
}
bson->bson_type_data.timestamp.has_t = true;
bson->bson_type_data.timestamp.t = (uint32_t) val;
break;
case BSON_JSON_LF_TIMESTAMP_I:
if (sign == -1) {
_bson_json_read_set_error (
reader, "Invalid timestamp value: \"-%" PRIu64 "\"", val);
return;
}
bson->bson_type_data.timestamp.has_i = true;
bson->bson_type_data.timestamp.i = (uint32_t) val;
break;
case BSON_JSON_LF_MINKEY:
if (sign == -1) {
_bson_json_read_set_error (
reader, "Invalid MinKey value: \"-%" PRIu64 "\"", val);
return;
} else if (val != 1) {
_bson_json_read_set_error (
reader, "Invalid MinKey value: \"%" PRIu64 "\"", val);
}
bson->bson_type_data.minkey.has_minkey = true;
break;
case BSON_JSON_LF_MAXKEY:
if (sign == -1) {
_bson_json_read_set_error (
reader, "Invalid MinKey value: \"-%" PRIu64 "\"", val);
return;
} else if (val != 1) {
_bson_json_read_set_error (
reader, "Invalid MinKey value: \"%" PRIu64 "\"", val);
}
bson->bson_type_data.maxkey.has_maxkey = true;
break;
case BSON_JSON_LF_INT32:
case BSON_JSON_LF_INT64:
_bson_json_read_set_error (
reader,
"Invalid state for integer read: %s, "
"expected number as quoted string like \"123\"",
bson_state_names[bs]);
break;
case BSON_JSON_LF_REGEX:
case BSON_JSON_LF_OPTIONS:
case BSON_JSON_LF_REGULAR_EXPRESSION_PATTERN:
case BSON_JSON_LF_REGULAR_EXPRESSION_OPTIONS:
case BSON_JSON_LF_CODE:
case BSON_JSON_LF_SCOPE:
case BSON_JSON_LF_OID:
case BSON_JSON_LF_BINARY:
case BSON_JSON_LF_TYPE:
case BSON_JSON_LF_UNDEFINED:
case BSON_JSON_LF_DOUBLE:
case BSON_JSON_LF_DECIMAL128:
case BSON_JSON_LF_DBPOINTER:
case BSON_JSON_LF_SYMBOL:
case BSON_JSON_LF_DBREF:
default:
_bson_json_read_set_error (reader,
"Unexpected integer %s%" PRIu64
" in type \"%s\"",
sign == -1 ? "-" : "",
val,
_bson_json_type_name (bson->bson_type));
}
} else {
_bson_json_read_set_error (reader,
"Unexpected integer %s%" PRIu64
" in state \"%s\"",
sign == -1 ? "-" : "",
val,
read_state_names[rs]);
}
}
static bool
_bson_json_parse_double (bson_json_reader_t *reader,
const char *val,
size_t vlen,
double *d)
{
errno = 0;
*d = strtod (val, NULL);
#ifdef _MSC_VER
/* Microsoft's strtod parses "NaN", "Infinity", "-Infinity" as 0 */
if (*d == 0.0) {
if (!_strnicmp (val, "nan", vlen)) {
#ifdef NAN
*d = NAN;
#else
/* Visual Studio 2010 doesn't define NAN or INFINITY
* https://msdn.microsoft.com/en-us/library/w22adx1s(v=vs.100).aspx */
unsigned long nan[2] = {0xffffffff, 0x7fffffff};
*d = *(double *) nan;
#endif
return true;
} else if (!_strnicmp (val, "infinity", vlen)) {
#ifdef INFINITY
*d = INFINITY;
#else
unsigned long inf[2] = {0x00000000, 0x7ff00000};
*d = *(double *) inf;
#endif
return true;
} else if (!_strnicmp (val, "-infinity", vlen)) {
#ifdef INFINITY
*d = -INFINITY;
#else
unsigned long inf[2] = {0x00000000, 0xfff00000};
*d = *(double *) inf;
#endif
return true;
}
}
if ((*d == HUGE_VAL || *d == -HUGE_VAL) && errno == ERANGE) {
_bson_json_read_set_error (
reader, "Number \"%.*s\" is out of range", (int) vlen, val);
return false;
}
#else
/* not MSVC - set err on overflow, but avoid err for infinity */
if ((*d == HUGE_VAL || *d == -HUGE_VAL) && errno == ERANGE &&
strncasecmp (val, "infinity", vlen) &&
strncasecmp (val, "-infinity", vlen)) {
_bson_json_read_set_error (
reader, "Number \"%.*s\" is out of range", (int) vlen, val);
return false;
}
#endif /* _MSC_VER */
return true;
}
static void
_bson_json_read_double (bson_json_reader_t *reader, /* IN */
double val) /* IN */
{
BASIC_CB_PREAMBLE;
BASIC_CB_BAIL_IF_NOT_NORMAL ("double");
bson_append_double (STACK_BSON_CHILD, key, (int) len, val);
}
static bool
_bson_json_read_int64_or_set_error (bson_json_reader_t *reader, /* IN */
const unsigned char *val, /* IN */
size_t vlen, /* IN */
int64_t *v64) /* OUT */
{
bson_json_reader_bson_t *bson = &reader->bson;
char *endptr = NULL;
_bson_json_read_fixup_key (bson);
errno = 0;
*v64 = bson_ascii_strtoll ((const char *) val, &endptr, 10);
if (((*v64 == INT64_MIN) || (*v64 == INT64_MAX)) && (errno == ERANGE)) {
_bson_json_read_set_error (reader, "Number \"%s\" is out of range", val);
return false;
}
if (endptr != ((const char *) val + vlen)) {
_bson_json_read_set_error (reader, "Number \"%s\" is invalid", val);
return false;
}
return true;
}
/* parse a value for "base64", "subType" or legacy "$binary" or "$type" */
static void
_bson_json_parse_binary_elem (bson_json_reader_t *reader,
const char *val_w_null,
size_t vlen)
{
bson_json_read_bson_state_t bs;
bson_json_bson_data_t *data;
int binary_len;
BASIC_CB_PREAMBLE;
bs = bson->bson_state;
data = &bson->bson_type_data;
if (bs == BSON_JSON_LF_BINARY) {
data->binary.has_binary = true;
binary_len = b64_pton (val_w_null, NULL, 0);
if (binary_len < 0) {
_bson_json_read_set_error (
reader,
"Invalid input string \"%s\", looking for base64-encoded binary",
val_w_null);
}
_bson_json_buf_ensure (&bson->bson_type_buf[0], (size_t) binary_len + 1);
b64_pton (
val_w_null, bson->bson_type_buf[0].buf, (size_t) binary_len + 1);
bson->bson_type_buf[0].len = (size_t) binary_len;
} else if (bs == BSON_JSON_LF_TYPE) {
data->binary.has_subtype = true;
if (SSCANF (val_w_null, "%02x", &data->binary.type) != 1) {
if (!data->binary.is_legacy || data->binary.has_binary) {
/* misformatted subtype, like {$binary: {base64: "", subType: "x"}},
* or legacy {$binary: "", $type: "x"} */
_bson_json_read_set_error (
reader,
"Invalid input string \"%s\", looking for binary subtype",
val_w_null);
} else {
/* actually a query operator: {x: {$type: "array"}}*/
bson->read_state = BSON_JSON_REGULAR;
STACK_PUSH_DOC (bson_append_document_begin (
STACK_BSON_PARENT, key, (int) len, STACK_BSON_CHILD));
bson_append_utf8 (STACK_BSON_CHILD,
"$type",
5,
(const char *) val_w_null,
(int) vlen);
}
}
}
}
static void
_bson_json_read_string (bson_json_reader_t *reader, /* IN */
const unsigned char *val, /* IN */
size_t vlen) /* IN */
{
bson_json_read_state_t rs;
bson_json_read_bson_state_t bs;
BASIC_CB_PREAMBLE;
rs = bson->read_state;
bs = bson->bson_state;
if (!bson_utf8_validate ((const char *) val, vlen, true /*allow null*/)) {
_bson_json_read_corrupt (reader, "invalid bytes in UTF8 string");
return;
}
if (rs == BSON_JSON_REGULAR) {
BASIC_CB_BAIL_IF_NOT_NORMAL ("string");
bson_append_utf8 (
STACK_BSON_CHILD, key, (int) len, (const char *) val, (int) vlen);
} else if (rs == BSON_JSON_IN_BSON_TYPE_SCOPE_STARTMAP ||
rs == BSON_JSON_IN_BSON_TYPE_DBPOINTER_STARTMAP) {
_bson_json_read_set_error (reader,
"Invalid read of \"%s\" in state \"%s\"",
val,
read_state_names[rs]);
} else if (rs == BSON_JSON_IN_BSON_TYPE_BINARY_VALUES) {
const char *val_w_null;
_bson_json_buf_set (&bson->bson_type_buf[2], val, vlen);
val_w_null = (const char *) bson->bson_type_buf[2].buf;
_bson_json_parse_binary_elem (reader, val_w_null, vlen);
} else if (rs == BSON_JSON_IN_BSON_TYPE ||
rs == BSON_JSON_IN_BSON_TYPE_TIMESTAMP_VALUES ||
rs == BSON_JSON_IN_BSON_TYPE_REGEX_VALUES ||
rs == BSON_JSON_IN_BSON_TYPE_DATE_NUMBERLONG) {
const char *val_w_null;
_bson_json_buf_set (&bson->bson_type_buf[2], val, vlen);
val_w_null = (const char *) bson->bson_type_buf[2].buf;
switch (bs) {
case BSON_JSON_LF_REGEX:
bson->bson_type_data.regex.is_legacy = true;
/* FALL THROUGH */
case BSON_JSON_LF_REGULAR_EXPRESSION_PATTERN:
bson->bson_type_data.regex.has_pattern = true;
_bson_json_buf_set (&bson->bson_type_buf[0], val, vlen);
break;
case BSON_JSON_LF_OPTIONS:
bson->bson_type_data.regex.is_legacy = true;
/* FALL THROUGH */
case BSON_JSON_LF_REGULAR_EXPRESSION_OPTIONS:
bson->bson_type_data.regex.has_options = true;
_bson_json_buf_set (&bson->bson_type_buf[1], val, vlen);
break;
case BSON_JSON_LF_OID:
if (vlen != 24) {
goto BAD_PARSE;
}
bson->bson_type_data.oid.has_oid = true;
bson_oid_init_from_string (&bson->bson_type_data.oid.oid, val_w_null);
break;
case BSON_JSON_LF_BINARY:
case BSON_JSON_LF_TYPE:
bson->bson_type_data.binary.is_legacy = true;
_bson_json_parse_binary_elem (reader, val_w_null, vlen);
break;
case BSON_JSON_LF_INT32: {
int64_t v64;
if (!_bson_json_read_int64_or_set_error (reader, val, vlen, &v64)) {
/* the error is set, return and let the reader exit */
return;
}
if (v64 < INT32_MIN || v64 > INT32_MAX) {
goto BAD_PARSE;
}
if (bson->read_state == BSON_JSON_IN_BSON_TYPE) {
bson->bson_type_data.v_int32.value = (int32_t) v64;
} else {
goto BAD_PARSE;
}
} break;
case BSON_JSON_LF_INT64: {
int64_t v64;
if (!_bson_json_read_int64_or_set_error (reader, val, vlen, &v64)) {
/* the error is set, return and let the reader exit */
return;
}
if (bson->read_state == BSON_JSON_IN_BSON_TYPE) {
bson->bson_type_data.v_int64.value = v64;
} else if (bson->read_state ==
BSON_JSON_IN_BSON_TYPE_DATE_NUMBERLONG) {
bson->bson_type_data.date.has_date = true;
bson->bson_type_data.date.date = v64;
} else {
goto BAD_PARSE;
}
} break;
case BSON_JSON_LF_DOUBLE: {
_bson_json_parse_double (reader,
(const char *) val,
vlen,
&bson->bson_type_data.v_double.value);
} break;
case BSON_JSON_LF_DATE: {
int64_t v64;
if (!_bson_iso8601_date_parse (
(char *) val, (int) vlen, &v64, reader->error)) {
jsonsl_stop (reader->json);
} else {
bson->bson_type_data.date.has_date = true;
bson->bson_type_data.date.date = v64;
}
} break;
case BSON_JSON_LF_DECIMAL128: {
bson_decimal128_t decimal128;
bson_decimal128_from_string (val_w_null, &decimal128);
if (bson->read_state == BSON_JSON_IN_BSON_TYPE) {
bson->bson_type_data.v_decimal128.value = decimal128;
} else {
goto BAD_PARSE;
}
} break;
case BSON_JSON_LF_CODE:
_bson_json_buf_set (&bson->code_data.code_buf, val, vlen);
break;
case BSON_JSON_LF_SYMBOL:
bson_append_symbol (
STACK_BSON_CHILD, key, (int) len, (const char *) val, (int) vlen);
break;
case BSON_JSON_LF_DBREF:
/* the "$ref" of a {$ref: "...", $id: ... }, append normally */
bson_append_utf8 (
STACK_BSON_CHILD, key, (int) len, (const char *) val, (int) vlen);
bson->read_state = BSON_JSON_REGULAR;
break;
case BSON_JSON_LF_SCOPE:
case BSON_JSON_LF_TIMESTAMP_T:
case BSON_JSON_LF_TIMESTAMP_I:
case BSON_JSON_LF_UNDEFINED:
case BSON_JSON_LF_MINKEY:
case BSON_JSON_LF_MAXKEY:
case BSON_JSON_LF_DBPOINTER:
default:
goto BAD_PARSE;
}
return;
BAD_PARSE:
_bson_json_read_set_error (reader,
"Invalid input string \"%s\", looking for %s",
val_w_null,
bson_state_names[bs]);
} else {
_bson_json_read_set_error (
reader, "Invalid state to look for string: %s", read_state_names[rs]);
}
}
static void
_bson_json_read_start_map (bson_json_reader_t *reader) /* IN */
{
BASIC_CB_PREAMBLE;
if (bson->read_state == BSON_JSON_IN_BSON_TYPE) {
if (bson->bson_state == BSON_JSON_LF_DATE) {
bson->read_state = BSON_JSON_IN_BSON_TYPE_DATE_NUMBERLONG;
} else if (bson->bson_state == BSON_JSON_LF_BINARY) {
bson->read_state = BSON_JSON_IN_BSON_TYPE_BINARY_VALUES;
} else if (bson->bson_state == BSON_JSON_LF_TYPE) {
/* special case, we started parsing {$type: {$numberInt: "2"}} and we
* expected a legacy Binary format. now we see the second "{", so
* backtrack and parse $type query operator. */
bson->read_state = BSON_JSON_IN_START_MAP;
STACK_PUSH_DOC (bson_append_document_begin (
STACK_BSON_PARENT, key, len, STACK_BSON_CHILD));
_bson_json_save_map_key (bson, (const uint8_t *) "$type", 5);
}
} else if (bson->read_state == BSON_JSON_IN_BSON_TYPE_TIMESTAMP_STARTMAP) {
bson->read_state = BSON_JSON_IN_BSON_TYPE_TIMESTAMP_VALUES;
} else if (bson->read_state == BSON_JSON_IN_BSON_TYPE_SCOPE_STARTMAP) {
bson->read_state = BSON_JSON_IN_SCOPE;
} else if (bson->read_state == BSON_JSON_IN_BSON_TYPE_DBPOINTER_STARTMAP) {
bson->read_state = BSON_JSON_IN_DBPOINTER;
} else if (bson->read_state == BSON_JSON_IN_BSON_TYPE_REGEX_STARTMAP) {
bson->read_state = BSON_JSON_IN_BSON_TYPE_REGEX_VALUES;
} else {
bson->read_state = BSON_JSON_IN_START_MAP;
}
/* silence some warnings */
(void) len;
(void) key;
}
static bool
_is_known_key (const char *key, size_t len)
{
bool ret;
#define IS_KEY(k) (len == strlen (k) && (0 == memcmp (k, key, len)))
ret = (IS_KEY ("$regularExpression") || IS_KEY ("$regex") ||
IS_KEY ("$options") || IS_KEY ("$code") || IS_KEY ("$scope") ||
IS_KEY ("$oid") || IS_KEY ("$binary") || IS_KEY ("$type") ||
IS_KEY ("$date") || IS_KEY ("$undefined") || IS_KEY ("$maxKey") ||
IS_KEY ("$minKey") || IS_KEY ("$timestamp") ||
IS_KEY ("$numberInt") || IS_KEY ("$numberLong") ||
IS_KEY ("$numberDouble") || IS_KEY ("$numberDecimal") ||
IS_KEY ("$numberInt") || IS_KEY ("$numberLong") ||
IS_KEY ("$numberDouble") || IS_KEY ("$numberDecimal") ||
IS_KEY ("$dbPointer") || IS_KEY ("$symbol"));
#undef IS_KEY
return ret;
}
static void
_bson_json_save_map_key (bson_json_reader_bson_t *bson,
const uint8_t *val,
size_t len)
{
_bson_json_buf_set (&bson->key_buf, val, len);
bson->key = (const char *) bson->key_buf.buf;
}
static void
_bson_json_read_code_or_scope_key (bson_json_reader_bson_t *bson,
bool is_scope,
const uint8_t *val,
size_t len)
{
bson_json_code_t *code = &bson->code_data;
if (code->in_scope) {
/* we're reading something weirdly nested, e.g. we just read "$code" in
* "$scope: {x: {$code: {}}}". just create the subdoc within the scope. */
bson->read_state = BSON_JSON_REGULAR;
STACK_PUSH_DOC (bson_append_document_begin (STACK_BSON_PARENT,
bson->key,
(int) bson->key_buf.len,
STACK_BSON_CHILD));
_bson_json_save_map_key (bson, val, len);
} else {
if (!bson->code_data.key_buf.len) {
/* save the key, e.g. {"key": {"$code": "return x", "$scope":{"x":1}}},
* in case it is overwritten while parsing scope sub-object */
_bson_json_buf_set (
&bson->code_data.key_buf, bson->key_buf.buf, bson->key_buf.len);
}
if (is_scope) {
bson->bson_type = BSON_TYPE_CODEWSCOPE;
bson->read_state = BSON_JSON_IN_BSON_TYPE_SCOPE_STARTMAP;
bson->bson_state = BSON_JSON_LF_SCOPE;
bson->code_data.has_scope = true;
} else {
bson->bson_type = BSON_TYPE_CODE;
bson->bson_state = BSON_JSON_LF_CODE;
bson->code_data.has_code = true;
}
}
}
static void
_bson_json_bad_key_in_type (bson_json_reader_t *reader, /* IN */
const uint8_t *val) /* IN */
{
bson_json_reader_bson_t *bson = &reader->bson;
_bson_json_read_set_error (
reader,
"Invalid key \"%s\". Looking for values for type \"%s\"",
val,
_bson_json_type_name (bson->bson_type));
}
static void
_bson_json_read_map_key (bson_json_reader_t *reader, /* IN */
const uint8_t *val, /* IN */
size_t len) /* IN */
{
bson_json_reader_bson_t *bson = &reader->bson;
if (!bson_utf8_validate ((const char *) val, len, true /* allow null */)) {
_bson_json_read_corrupt (reader, "invalid bytes in UTF8 string");
return;
}
if (bson->read_state == BSON_JSON_IN_START_MAP) {
if (len > 0 && val[0] == '$' && _is_known_key ((const char *) val, len) &&
bson->n >= 0 /* key is in subdocument */) {
bson->read_state = BSON_JSON_IN_BSON_TYPE;
bson->bson_type = (bson_type_t) 0;
memset (&bson->bson_type_data, 0, sizeof bson->bson_type_data);
} else {
bson->read_state = BSON_JSON_REGULAR;
STACK_PUSH_DOC (bson_append_document_begin (STACK_BSON_PARENT,
bson->key,
(int) bson->key_buf.len,
STACK_BSON_CHILD));
}
} else if (bson->read_state == BSON_JSON_IN_SCOPE) {
/* we've read "key" in {$code: "", $scope: {key: ""}}*/
bson->read_state = BSON_JSON_REGULAR;
STACK_PUSH_SCOPE (bson_init (STACK_BSON_CHILD));
_bson_json_save_map_key (bson, val, len);
} else if (bson->read_state == BSON_JSON_IN_DBPOINTER) {
/* we've read "$ref" or "$id" in {$dbPointer: {$ref: ..., $id: ...}} */
bson->read_state = BSON_JSON_REGULAR;
STACK_PUSH_DBPOINTER (bson_init (STACK_BSON_CHILD));
_bson_json_save_map_key (bson, val, len);
}
if (bson->read_state == BSON_JSON_IN_BSON_TYPE) {
if
HANDLE_OPTION ("$regex", BSON_TYPE_REGEX, BSON_JSON_LF_REGEX)
else if
HANDLE_OPTION ("$options", BSON_TYPE_REGEX, BSON_JSON_LF_OPTIONS)
else if
HANDLE_OPTION ("$oid", BSON_TYPE_OID, BSON_JSON_LF_OID)
else if
HANDLE_OPTION ("$binary", BSON_TYPE_BINARY, BSON_JSON_LF_BINARY)
else if
HANDLE_OPTION ("$type", BSON_TYPE_BINARY, BSON_JSON_LF_TYPE)
else if
HANDLE_OPTION ("$date", BSON_TYPE_DATE_TIME, BSON_JSON_LF_DATE)
else if
HANDLE_OPTION (
"$undefined", BSON_TYPE_UNDEFINED, BSON_JSON_LF_UNDEFINED)
else if
HANDLE_OPTION ("$minKey", BSON_TYPE_MINKEY, BSON_JSON_LF_MINKEY)
else if
HANDLE_OPTION ("$maxKey", BSON_TYPE_MAXKEY, BSON_JSON_LF_MAXKEY)
else if
HANDLE_OPTION ("$numberInt", BSON_TYPE_INT32, BSON_JSON_LF_INT32)
else if
HANDLE_OPTION ("$numberLong", BSON_TYPE_INT64, BSON_JSON_LF_INT64)
else if
HANDLE_OPTION ("$numberDouble", BSON_TYPE_DOUBLE, BSON_JSON_LF_DOUBLE)
else if
HANDLE_OPTION ("$symbol", BSON_TYPE_SYMBOL, BSON_JSON_LF_SYMBOL)
else if
HANDLE_OPTION (
"$numberDecimal", BSON_TYPE_DECIMAL128, BSON_JSON_LF_DECIMAL128)
else if (!strcmp ("$timestamp", (const char *) val)) {
bson->bson_type = BSON_TYPE_TIMESTAMP;
bson->read_state = BSON_JSON_IN_BSON_TYPE_TIMESTAMP_STARTMAP;
} else if (!strcmp ("$regularExpression", (const char *) val)) {
bson->bson_type = BSON_TYPE_REGEX;
bson->read_state = BSON_JSON_IN_BSON_TYPE_REGEX_STARTMAP;
} else if (!strcmp ("$dbPointer", (const char *) val)) {
/* start parsing "key": {"$dbPointer": {...}}, save "key" for later */
_bson_json_buf_set (
&bson->dbpointer_key, bson->key_buf.buf, bson->key_buf.len);
bson->bson_type = BSON_TYPE_DBPOINTER;
bson->read_state = BSON_JSON_IN_BSON_TYPE_DBPOINTER_STARTMAP;
} else if (!strcmp ("$code", (const char *) val)) {
_bson_json_read_code_or_scope_key (
bson, false /* is_scope */, val, len);
} else if (!strcmp ("$scope", (const char *) val)) {
_bson_json_read_code_or_scope_key (
bson, true /* is_scope */, val, len);
} else {
_bson_json_bad_key_in_type (reader, val);
}
} else if (bson->read_state == BSON_JSON_IN_BSON_TYPE_DATE_NUMBERLONG) {
if
HANDLE_OPTION ("$numberLong", BSON_TYPE_DATE_TIME, BSON_JSON_LF_INT64)
else {
_bson_json_bad_key_in_type (reader, val);
}
} else if (bson->read_state == BSON_JSON_IN_BSON_TYPE_TIMESTAMP_VALUES) {
if
HANDLE_OPTION ("t", BSON_TYPE_TIMESTAMP, BSON_JSON_LF_TIMESTAMP_T)
else if
HANDLE_OPTION ("i", BSON_TYPE_TIMESTAMP, BSON_JSON_LF_TIMESTAMP_I)
else {
_bson_json_bad_key_in_type (reader, val);
}
} else if (bson->read_state == BSON_JSON_IN_BSON_TYPE_REGEX_VALUES) {
if
HANDLE_OPTION (
"pattern", BSON_TYPE_REGEX, BSON_JSON_LF_REGULAR_EXPRESSION_PATTERN)
else if
HANDLE_OPTION (
"options", BSON_TYPE_REGEX, BSON_JSON_LF_REGULAR_EXPRESSION_OPTIONS)
else {
_bson_json_bad_key_in_type (reader, val);
}
} else if (bson->read_state == BSON_JSON_IN_BSON_TYPE_BINARY_VALUES) {
if
HANDLE_OPTION ("base64", BSON_TYPE_BINARY, BSON_JSON_LF_BINARY)
else if
HANDLE_OPTION ("subType", BSON_TYPE_BINARY, BSON_JSON_LF_TYPE)
else {
_bson_json_bad_key_in_type (reader, val);
}
} else {
_bson_json_save_map_key (bson, val, len);
/* in x: {$ref: "collection", $id: {$oid: "..."}, $db: "..." } */
if (bson->n > 0) {
if (!strcmp ("$ref", (const char *) val)) {
STACK_HAS_REF = true;
bson->read_state = BSON_JSON_IN_BSON_TYPE;
bson->bson_state = BSON_JSON_LF_DBREF;
} else if (!strcmp ("$id", (const char *) val)) {
STACK_HAS_ID = true;
} else if (!strcmp ("$db", (const char *) val)) {
bson->read_state = BSON_JSON_IN_BSON_TYPE;
bson->bson_state = BSON_JSON_LF_DBREF;
}
}
}
}
static void
_bson_json_read_append_binary (bson_json_reader_t *reader, /* IN */
bson_json_reader_bson_t *bson) /* IN */
{
bson_json_bson_data_t *data = &bson->bson_type_data;
if (data->binary.is_legacy) {
if (!data->binary.has_binary) {
_bson_json_read_set_error (
reader,
"Missing \"$binary\" after \"$type\" reading type \"binary\"");
return;
} else if (!data->binary.has_subtype) {
_bson_json_read_set_error (
reader,
"Missing \"$type\" after \"$binary\" reading type \"binary\"");
return;
}
} else {
if (!data->binary.has_binary) {
_bson_json_read_set_error (
reader,
"Missing \"base64\" after \"subType\" reading type \"binary\"");
return;
} else if (!data->binary.has_subtype) {
_bson_json_read_set_error (
reader,
"Missing \"subType\" after \"base64\" reading type \"binary\"");
return;
}
}
if (!bson_append_binary (STACK_BSON_CHILD,
bson->key,
(int) bson->key_buf.len,
data->binary.type,
bson->bson_type_buf[0].buf,
(uint32_t) bson->bson_type_buf[0].len)) {
_bson_json_read_set_error (reader, "Error storing binary data");
}
}
static void
_bson_json_read_append_regex (bson_json_reader_t *reader, /* IN */
bson_json_reader_bson_t *bson) /* IN */
{
bson_json_bson_data_t *data = &bson->bson_type_data;
if (data->regex.is_legacy) {
if (!data->regex.has_pattern) {
_bson_json_read_set_error (reader,
"Missing \"$regex\" after \"$options\"");
return;
}
if (!data->regex.has_options) {
_bson_json_read_set_error (reader,
"Missing \"$options\" after \"$regex\"");
return;
}
} else if (!data->regex.has_pattern) {
_bson_json_read_set_error (
reader, "Missing \"pattern\" after \"options\" in regular expression");
return;
} else if (!data->regex.has_options) {
_bson_json_read_set_error (
reader, "Missing \"options\" after \"pattern\" in regular expression");
return;
}
if (!bson_append_regex (STACK_BSON_CHILD,
bson->key,
(int) bson->key_buf.len,
(char *) bson->bson_type_buf[0].buf,
(char *) bson->bson_type_buf[1].buf)) {
_bson_json_read_set_error (reader, "Error storing regex");
}
}
static void
_bson_json_read_append_code (bson_json_reader_t *reader, /* IN */
bson_json_reader_bson_t *bson) /* IN */
{
bson_json_code_t *code_data;
char *code = NULL;
bson_t *scope = NULL;
bool r;
code_data = &bson->code_data;
BSON_ASSERT (!code_data->in_scope);
if (!code_data->has_code) {
_bson_json_read_set_error (reader, "Missing $code after $scope");
return;
}
code = (char *) code_data->code_buf.buf;
if (code_data->has_scope) {
scope = STACK_BSON (1);
}
/* creates BSON "code" elem, or "code with scope" if scope is not NULL */
r = bson_append_code_with_scope (STACK_BSON_CHILD,
(const char *) code_data->key_buf.buf,
(int) code_data->key_buf.len,
code,
scope);
if (!r) {
_bson_json_read_set_error (reader, "Error storing Javascript code");
}
if (scope) {
bson_destroy (scope);
}
/* keep the buffer but truncate it */
code_data->key_buf.len = 0;
code_data->has_code = code_data->has_scope = false;
}
static void
_bson_json_read_append_dbpointer (bson_json_reader_t *reader, /* IN */
bson_json_reader_bson_t *bson) /* IN */
{
bson_t *db_pointer;
bson_iter_t iter;
const char *ns = NULL;
const bson_oid_t *oid = NULL;
bool r;
BSON_ASSERT (reader->bson.dbpointer_key.buf);
db_pointer = STACK_BSON (1);
if (!bson_iter_init (&iter, db_pointer)) {
_bson_json_read_set_error (reader, "Error storing DBPointer");
return;
}
while (bson_iter_next (&iter)) {
if (!strcmp (bson_iter_key (&iter), "$id")) {
if (!BSON_ITER_HOLDS_OID (&iter)) {
_bson_json_read_set_error (
reader, "$dbPointer.$id must be like {\"$oid\": ...\"}");
return;
}
oid = bson_iter_oid (&iter);
} else if (!strcmp (bson_iter_key (&iter), "$ref")) {
if (!BSON_ITER_HOLDS_UTF8 (&iter)) {
_bson_json_read_set_error (
reader,
"$dbPointer.$ref must be a string like \"db.collection\"");
return;
}
ns = bson_iter_utf8 (&iter, NULL);
} else {
_bson_json_read_set_error (reader,
"$dbPointer contains invalid key: \"%s\"",
bson_iter_key (&iter));
return;
}
}
if (!oid || !ns) {
_bson_json_read_set_error (reader,
"$dbPointer requires both $id and $ref");
return;
}
r = bson_append_dbpointer (STACK_BSON_CHILD,
(char *) reader->bson.dbpointer_key.buf,
(int) reader->bson.dbpointer_key.len,
ns,
oid);
if (!r) {
_bson_json_read_set_error (reader, "Error storing DBPointer");
}
}
static void
_bson_json_read_append_oid (bson_json_reader_t *reader, /* IN */
bson_json_reader_bson_t *bson) /* IN */
{
if (!bson_append_oid (STACK_BSON_CHILD,
bson->key,
(int) bson->key_buf.len,
&bson->bson_type_data.oid.oid)) {
_bson_json_read_set_error (reader, "Error storing ObjectId");
}
}
static void
_bson_json_read_append_date_time (bson_json_reader_t *reader, /* IN */
bson_json_reader_bson_t *bson) /* IN */
{
if (!bson_append_date_time (STACK_BSON_CHILD,
bson->key,
(int) bson->key_buf.len,
bson->bson_type_data.date.date)) {
_bson_json_read_set_error (reader, "Error storing datetime");
}
}
static void
_bson_json_read_append_timestamp (bson_json_reader_t *reader, /* IN */
bson_json_reader_bson_t *bson) /* IN */
{
if (!bson->bson_type_data.timestamp.has_t) {
_bson_json_read_set_error (
reader, "Missing t after $timestamp in BSON_TYPE_TIMESTAMP");
return;
} else if (!bson->bson_type_data.timestamp.has_i) {
_bson_json_read_set_error (
reader, "Missing i after $timestamp in BSON_TYPE_TIMESTAMP");
return;
}
bson_append_timestamp (STACK_BSON_CHILD,
bson->key,
(int) bson->key_buf.len,
bson->bson_type_data.timestamp.t,
bson->bson_type_data.timestamp.i);
}
static void
_bad_extended_json (bson_json_reader_t *reader)
{
_bson_json_read_corrupt (reader, "Invalid MongoDB extended JSON");
}
static void
_bson_json_read_end_map (bson_json_reader_t *reader) /* IN */
{
bson_json_reader_bson_t *bson = &reader->bson;
if (bson->read_state == BSON_JSON_IN_START_MAP) {
bson->read_state = BSON_JSON_REGULAR;
STACK_PUSH_DOC (bson_append_document_begin (STACK_BSON_PARENT,
bson->key,
(int) bson->key_buf.len,
STACK_BSON_CHILD));
} else if (bson->read_state == BSON_JSON_IN_BSON_TYPE_SCOPE_STARTMAP) {
bson->read_state = BSON_JSON_REGULAR;
STACK_PUSH_SCOPE (bson_init (STACK_BSON_CHILD));
} else if (bson->read_state == BSON_JSON_IN_BSON_TYPE_DBPOINTER_STARTMAP) {
/* we've read last "}" in "{$dbPointer: {$id: ..., $ref: ...}}" */
_bson_json_read_append_dbpointer (reader, bson);
bson->read_state = BSON_JSON_REGULAR;
return;
}
if (bson->read_state == BSON_JSON_IN_BSON_TYPE) {
if (!bson->key) {
/* invalid, like {$numberLong: "1"} at the document top level */
_bad_extended_json (reader);
return;
}
bson->read_state = BSON_JSON_REGULAR;
switch (bson->bson_type) {
case BSON_TYPE_REGEX:
_bson_json_read_append_regex (reader, bson);
break;
case BSON_TYPE_CODE:
case BSON_TYPE_CODEWSCOPE:
/* we've read the closing "}" in "{$code: ..., $scope: ...}" */
_bson_json_read_append_code (reader, bson);
break;
case BSON_TYPE_OID:
_bson_json_read_append_oid (reader, bson);
break;
case BSON_TYPE_BINARY:
_bson_json_read_append_binary (reader, bson);
break;
case BSON_TYPE_DATE_TIME:
_bson_json_read_append_date_time (reader, bson);
break;
case BSON_TYPE_UNDEFINED:
bson_append_undefined (
STACK_BSON_CHILD, bson->key, (int) bson->key_buf.len);
break;
case BSON_TYPE_MINKEY:
bson_append_minkey (
STACK_BSON_CHILD, bson->key, (int) bson->key_buf.len);
break;
case BSON_TYPE_MAXKEY:
bson_append_maxkey (
STACK_BSON_CHILD, bson->key, (int) bson->key_buf.len);
break;
case BSON_TYPE_INT32:
bson_append_int32 (STACK_BSON_CHILD,
bson->key,
(int) bson->key_buf.len,
bson->bson_type_data.v_int32.value);
break;
case BSON_TYPE_INT64:
bson_append_int64 (STACK_BSON_CHILD,
bson->key,
(int) bson->key_buf.len,
bson->bson_type_data.v_int64.value);
break;
case BSON_TYPE_DOUBLE:
bson_append_double (STACK_BSON_CHILD,
bson->key,
(int) bson->key_buf.len,
bson->bson_type_data.v_double.value);
break;
case BSON_TYPE_DECIMAL128:
bson_append_decimal128 (STACK_BSON_CHILD,
bson->key,
(int) bson->key_buf.len,
&bson->bson_type_data.v_decimal128.value);
break;
case BSON_TYPE_DBPOINTER:
/* shouldn't set type to DBPointer unless inside $dbPointer: {...} */
_bson_json_read_set_error (
reader,
"Internal error: shouldn't be in state BSON_TYPE_DBPOINTER");
break;
case BSON_TYPE_SYMBOL:
break;
case BSON_TYPE_EOD:
case BSON_TYPE_UTF8:
case BSON_TYPE_DOCUMENT:
case BSON_TYPE_ARRAY:
case BSON_TYPE_BOOL:
case BSON_TYPE_NULL:
case BSON_TYPE_TIMESTAMP:
default:
_bson_json_read_set_error (
reader,
"Internal error: can't parse JSON wrapper for type \"%s\"",
_bson_json_type_name (bson->bson_type));
break;
}
} else if (bson->read_state == BSON_JSON_IN_BSON_TYPE_TIMESTAMP_VALUES) {
if (!bson->key) {
_bad_extended_json (reader);
return;
}
bson->read_state = BSON_JSON_IN_BSON_TYPE_TIMESTAMP_ENDMAP;
_bson_json_read_append_timestamp (reader, bson);
return;
} else if (bson->read_state == BSON_JSON_IN_BSON_TYPE_REGEX_VALUES) {
if (!bson->key) {
_bad_extended_json (reader);
return;
}
bson->read_state = BSON_JSON_IN_BSON_TYPE_REGEX_ENDMAP;
_bson_json_read_append_regex (reader, bson);
return;
} else if (bson->read_state == BSON_JSON_IN_BSON_TYPE_BINARY_VALUES) {
if (!bson->key) {
_bad_extended_json (reader);
return;
}
bson->read_state = BSON_JSON_IN_BSON_TYPE_BINARY_ENDMAP;
_bson_json_read_append_binary (reader, bson);
return;
} else if (bson->read_state == BSON_JSON_IN_BSON_TYPE_TIMESTAMP_ENDMAP) {
bson->read_state = BSON_JSON_REGULAR;
} else if (bson->read_state == BSON_JSON_IN_BSON_TYPE_REGEX_ENDMAP) {
bson->read_state = BSON_JSON_REGULAR;
} else if (bson->read_state == BSON_JSON_IN_BSON_TYPE_BINARY_ENDMAP) {
bson->read_state = BSON_JSON_REGULAR;
} else if (bson->read_state == BSON_JSON_IN_BSON_TYPE_DATE_NUMBERLONG) {
if (!bson->key) {
_bad_extended_json (reader);
return;
}
bson->read_state = BSON_JSON_IN_BSON_TYPE_DATE_ENDMAP;
_bson_json_read_append_date_time (reader, bson);
return;
} else if (bson->read_state == BSON_JSON_IN_BSON_TYPE_DATE_ENDMAP) {
bson->read_state = BSON_JSON_REGULAR;
} else if (bson->read_state == BSON_JSON_REGULAR) {
if (STACK_IS_SCOPE) {
bson->read_state = BSON_JSON_IN_BSON_TYPE;
bson->bson_type = BSON_TYPE_CODE;
STACK_POP_SCOPE;
} else if (STACK_IS_DBPOINTER) {
bson->read_state = BSON_JSON_IN_BSON_TYPE_DBPOINTER_STARTMAP;
STACK_POP_DBPOINTER;
} else {
if (STACK_HAS_ID != STACK_HAS_REF) {
_bson_json_read_set_error (
reader, "%s", "DBRef object must have both $ref and $id keys");
}
STACK_POP_DOC (
bson_append_document_end (STACK_BSON_PARENT, STACK_BSON_CHILD));
}
if (bson->n == -1) {
bson->read_state = BSON_JSON_DONE;
}
} else if (bson->read_state == BSON_JSON_IN_SCOPE) {
/* empty $scope */
BSON_ASSERT (bson->code_data.has_scope);
STACK_PUSH_SCOPE (bson_init (STACK_BSON_CHILD));
STACK_POP_SCOPE;
bson->read_state = BSON_JSON_IN_BSON_TYPE;
bson->bson_type = BSON_TYPE_CODE;
} else if (bson->read_state == BSON_JSON_IN_DBPOINTER) {
/* empty $dbPointer??? */
_bson_json_read_set_error (reader, "Empty $dbPointer");
} else {
_bson_json_read_set_error (
reader, "Invalid state \"%s\"", read_state_names[bson->read_state]);
}
}
static void
_bson_json_read_start_array (bson_json_reader_t *reader) /* IN */
{
const char *key;
size_t len;
bson_json_reader_bson_t *bson = &reader->bson;
+ if (bson->read_state != BSON_JSON_REGULAR) {
+ _bson_json_read_set_error (reader,
+ "Invalid read of \"[\" in state \"%s\"",
+ read_state_names[bson->read_state]);
+ return;
+ }
+
if (bson->n < 0) {
STACK_PUSH_ARRAY (_noop ());
} else {
_bson_json_read_fixup_key (bson);
key = bson->key;
len = bson->key_buf.len;
- if (bson->read_state != BSON_JSON_REGULAR) {
- _bson_json_read_set_error (reader,
- "Invalid read of \"[\" in state \"%s\"",
- read_state_names[bson->read_state]);
- return;
- }
-
STACK_PUSH_ARRAY (bson_append_array_begin (
STACK_BSON_PARENT, key, (int) len, STACK_BSON_CHILD));
}
}
static void
_bson_json_read_end_array (bson_json_reader_t *reader) /* IN */
{
bson_json_reader_bson_t *bson = &reader->bson;
if (bson->read_state != BSON_JSON_REGULAR) {
_bson_json_read_set_error (reader,
"Invalid read of \"]\" in state \"%s\"",
read_state_names[bson->read_state]);
return;
}
STACK_POP_ARRAY (
bson_append_array_end (STACK_BSON_PARENT, STACK_BSON_CHILD));
if (bson->n == -1) {
bson->read_state = BSON_JSON_DONE;
}
}
/* put unescaped text in reader->bson.unescaped, or set reader->error.
* json_text has length len and it is not null-terminated. */
static bool
_bson_json_unescape (bson_json_reader_t *reader,
struct jsonsl_state_st *state,
const char *json_text,
ssize_t len)
{
bson_json_reader_bson_t *reader_bson;
jsonsl_error_t err;
reader_bson = &reader->bson;
/* add 1 for NULL */
_bson_json_buf_ensure (&reader_bson->unescaped, (size_t) len + 1);
/* length of unescaped str is always <= len */
reader_bson->unescaped.len = jsonsl_util_unescape (
json_text, (char *) reader_bson->unescaped.buf, (size_t) len, NULL, &err);
if (err != JSONSL_ERROR_SUCCESS) {
bson_set_error (reader->error,
BSON_ERROR_JSON,
BSON_JSON_ERROR_READ_CORRUPT_JS,
"error near position %d: \"%s\"",
(int) state->pos_begin,
jsonsl_strerror (err));
return false;
}
reader_bson->unescaped.buf[reader_bson->unescaped.len] = '\0';
return true;
}
/* read the buffered JSON plus new data, and fill out @len with its length */
static const char *
_get_json_text (jsonsl_t json, /* IN */
struct jsonsl_state_st *state, /* IN */
const char *buf /* IN */,
ssize_t *len /* OUT */)
{
bson_json_reader_t *reader;
ssize_t bytes_available;
reader = (bson_json_reader_t *) json->data;
BSON_ASSERT (state->pos_cur > state->pos_begin);
*len = (ssize_t) (state->pos_cur - state->pos_begin);
bytes_available = buf - json->base;
if (*len <= bytes_available) {
/* read directly from stream, not from saved JSON */
return buf - (size_t) *len;
} else {
/* combine saved text with new data from the jsonsl_t */
ssize_t append = buf - json->base;
if (append > 0) {
_bson_json_buf_append (
&reader->tok_accumulator, buf - append, (size_t) append);
}
return (const char *) reader->tok_accumulator.buf;
}
}
static void
_push_callback (jsonsl_t json,
jsonsl_action_t action,
struct jsonsl_state_st *state,
const char *buf)
{
bson_json_reader_t *reader = (bson_json_reader_t *) json->data;
switch (state->type) {
case JSONSL_T_STRING:
case JSONSL_T_HKEY:
case JSONSL_T_SPECIAL:
case JSONSL_T_UESCAPE:
reader->json_text_pos = state->pos_begin;
break;
case JSONSL_T_OBJECT:
_bson_json_read_start_map (reader);
break;
case JSONSL_T_LIST:
_bson_json_read_start_array (reader);
break;
default:
break;
}
}
static void
_pop_callback (jsonsl_t json,
jsonsl_action_t action,
struct jsonsl_state_st *state,
const char *buf)
{
bson_json_reader_t *reader;
bson_json_reader_bson_t *reader_bson;
ssize_t len;
double d;
const char *obj_text;
reader = (bson_json_reader_t *) json->data;
reader_bson = &reader->bson;
switch (state->type) {
case JSONSL_T_HKEY:
case JSONSL_T_STRING:
obj_text = _get_json_text (json, state, buf, &len);
BSON_ASSERT (obj_text[0] == '"');
/* remove start/end quotes, replace backslash-escapes, null-terminate */
/* you'd think it would be faster to check if state->nescapes > 0 first,
* but tests show no improvement */
if (!_bson_json_unescape (reader, state, obj_text + 1, len - 1)) {
/* reader->error is set */
jsonsl_stop (json);
break;
}
if (state->type == JSONSL_T_HKEY) {
_bson_json_read_map_key (
reader, reader_bson->unescaped.buf, reader_bson->unescaped.len);
} else {
_bson_json_read_string (
reader, reader_bson->unescaped.buf, reader_bson->unescaped.len);
}
break;
case JSONSL_T_OBJECT:
_bson_json_read_end_map (reader);
break;
case JSONSL_T_LIST:
_bson_json_read_end_array (reader);
break;
case JSONSL_T_SPECIAL:
obj_text = _get_json_text (json, state, buf, &len);
if (state->special_flags & JSONSL_SPECIALf_NUMNOINT) {
if (_bson_json_parse_double (reader, obj_text, (size_t) len, &d)) {
_bson_json_read_double (reader, d);
}
} else if (state->special_flags & JSONSL_SPECIALf_NUMERIC) {
/* jsonsl puts the unsigned value in state->nelem */
_bson_json_read_integer (
reader,
state->nelem,
state->special_flags & JSONSL_SPECIALf_SIGNED ? -1 : 1);
} else if (state->special_flags & JSONSL_SPECIALf_BOOLEAN) {
_bson_json_read_boolean (reader, obj_text[0] == 't' ? 1 : 0);
} else if (state->special_flags & JSONSL_SPECIALf_NULL) {
_bson_json_read_null (reader);
}
break;
default:
break;
}
reader->json_text_pos = -1;
reader->tok_accumulator.len = 0;
}
static int
_error_callback (jsonsl_t json,
jsonsl_error_t err,
struct jsonsl_state_st *state,
char *errat)
{
bson_json_reader_t *reader = (bson_json_reader_t *) json->data;
if (err == JSONSL_ERROR_CANT_INSERT && *errat == '{') {
/* start the next document */
reader->should_reset = true;
reader->advance = errat - json->base;
return 0;
- } else if (err == JSONSL_ERROR_WEIRD_WHITESPACE && *errat == '\0') {
- /* embedded NULL is ok */
- json->pos++;
- return 1;
}
bson_set_error (reader->error,
BSON_ERROR_JSON,
BSON_JSON_ERROR_READ_CORRUPT_JS,
"Got parse error at \"%c\", position %d: \"%s\"",
*errat,
(int) json->pos,
jsonsl_strerror (err));
return 0;
}
/*
*--------------------------------------------------------------------------
*
* bson_json_reader_read --
*
* Read the next json document from @reader and write its value
* into @bson. @bson will be allocated as part of this process.
*
* @bson MUST be initialized before calling this function as it
* will not be initialized automatically. The reasoning for this
* is so that you can chain together bson_json_reader_t with
* other components like bson_writer_t.
*
* Returns:
* 1 if successful and data was read.
* 0 if successful and no data was read.
* -1 if there was an error and @error is set.
*
* Side effects:
* @error may be set.
*
*--------------------------------------------------------------------------
*/
int
bson_json_reader_read (bson_json_reader_t *reader, /* IN */
bson_t *bson, /* IN */
bson_error_t *error) /* OUT */
{
bson_json_reader_producer_t *p;
ssize_t start_pos;
ssize_t r;
ssize_t buf_offset;
ssize_t accum;
bson_error_t error_tmp;
int ret = 0;
BSON_ASSERT (reader);
BSON_ASSERT (bson);
p = &reader->producer;
reader->bson.bson = bson;
reader->bson.n = -1;
reader->bson.read_state = BSON_JSON_REGULAR;
reader->error = error ? error : &error_tmp;
memset (reader->error, 0, sizeof (bson_error_t));
for (;;) {
start_pos = reader->json->pos;
if (p->bytes_read > 0) {
/* leftover data from previous JSON doc in the stream */
r = p->bytes_read;
} else {
/* read a chunk of bytes by executing the callback */
r = p->cb (p->data, p->buf, p->buf_size);
}
if (r < 0) {
if (error) {
bson_set_error (error,
BSON_ERROR_JSON,
BSON_JSON_ERROR_READ_CB_FAILURE,
"reader cb failed");
}
ret = -1;
goto cleanup;
} else if (r == 0) {
break;
} else {
ret = 1;
p->bytes_read = (size_t) r;
jsonsl_feed (reader->json, (const jsonsl_char_t *) p->buf, (size_t) r);
if (reader->should_reset) {
/* end of a document */
jsonsl_reset (reader->json);
reader->should_reset = false;
/* advance past already-parsed data */
memmove (p->buf, p->buf + reader->advance, r - reader->advance);
p->bytes_read -= reader->advance;
ret = 1;
goto cleanup;
}
if (reader->error->domain) {
ret = -1;
goto cleanup;
}
/* accumulate a key or string value */
if (reader->json_text_pos != -1) {
if (reader->json_text_pos < reader->json->pos) {
accum = BSON_MIN (reader->json->pos - reader->json_text_pos, r);
/* if this chunk stopped mid-token, buf_offset is how far into
* our current chunk the token begins. */
buf_offset = AT_LEAST_0 (reader->json_text_pos - start_pos);
_bson_json_buf_append (&reader->tok_accumulator,
p->buf + buf_offset,
(size_t) accum);
}
}
p->bytes_read = 0;
}
}
cleanup:
if (ret == 1 && reader->bson.read_state != BSON_JSON_DONE) {
/* data ended in the middle */
_bson_json_read_corrupt (reader, "%s", "Incomplete JSON");
return -1;
}
return ret;
}
bson_json_reader_t *
bson_json_reader_new (void *data, /* IN */
bson_json_reader_cb cb, /* IN */
bson_json_destroy_cb dcb, /* IN */
bool allow_multiple, /* unused */
size_t buf_size) /* IN */
{
bson_json_reader_t *r;
bson_json_reader_producer_t *p;
r = bson_malloc0 (sizeof *r);
r->json = jsonsl_new (STACK_MAX);
r->json->error_callback = _error_callback;
r->json->action_callback_PUSH = _push_callback;
r->json->action_callback_POP = _pop_callback;
r->json->data = r;
r->json_text_pos = -1;
jsonsl_enable_all_callbacks (r->json);
p = &r->producer;
p->data = data;
p->cb = cb;
p->dcb = dcb;
p->buf_size = buf_size ? buf_size : BSON_JSON_DEFAULT_BUF_SIZE;
p->buf = bson_malloc (p->buf_size);
return r;
}
void
bson_json_reader_destroy (bson_json_reader_t *reader) /* IN */
{
int i;
bson_json_reader_producer_t *p = &reader->producer;
bson_json_reader_bson_t *b = &reader->bson;
if (reader->producer.dcb) {
reader->producer.dcb (reader->producer.data);
}
bson_free (p->buf);
bson_free (b->key_buf.buf);
bson_free (b->unescaped.buf);
bson_free (b->dbpointer_key.buf);
for (i = 0; i < 3; i++) {
bson_free (b->bson_type_buf[i].buf);
}
_bson_json_code_cleanup (&b->code_data);
jsonsl_destroy (reader->json);
bson_free (reader->tok_accumulator.buf);
bson_free (reader);
}
typedef struct {
const uint8_t *data;
size_t len;
size_t bytes_parsed;
} bson_json_data_reader_t;
static ssize_t
_bson_json_data_reader_cb (void *_ctx, uint8_t *buf, size_t len)
{
size_t bytes;
bson_json_data_reader_t *ctx = (bson_json_data_reader_t *) _ctx;
if (!ctx->data) {
return -1;
}
bytes = BSON_MIN (len, ctx->len - ctx->bytes_parsed);
memcpy (buf, ctx->data + ctx->bytes_parsed, bytes);
ctx->bytes_parsed += bytes;
return bytes;
}
bson_json_reader_t *
bson_json_data_reader_new (bool allow_multiple, /* IN */
size_t size) /* IN */
{
bson_json_data_reader_t *dr = bson_malloc0 (sizeof *dr);
return bson_json_reader_new (
dr, &_bson_json_data_reader_cb, &bson_free, allow_multiple, size);
}
void
bson_json_data_reader_ingest (bson_json_reader_t *reader, /* IN */
const uint8_t *data, /* IN */
size_t len) /* IN */
{
bson_json_data_reader_t *ctx =
(bson_json_data_reader_t *) reader->producer.data;
ctx->data = data;
ctx->len = len;
ctx->bytes_parsed = 0;
}
bson_t *
bson_new_from_json (const uint8_t *data, /* IN */
ssize_t len, /* IN */
bson_error_t *error) /* OUT */
{
bson_json_reader_t *reader;
bson_t *bson;
int r;
BSON_ASSERT (data);
if (len < 0) {
len = (ssize_t) strlen ((const char *) data);
}
bson = bson_new ();
reader = bson_json_data_reader_new (false, BSON_JSON_DEFAULT_BUF_SIZE);
bson_json_data_reader_ingest (reader, data, len);
r = bson_json_reader_read (reader, bson, error);
bson_json_reader_destroy (reader);
if (r == 0) {
bson_set_error (error,
BSON_ERROR_JSON,
BSON_JSON_ERROR_READ_INVALID_PARAM,
"Empty JSON string");
}
if (r != 1) {
bson_destroy (bson);
return NULL;
}
return bson;
}
bool
bson_init_from_json (bson_t *bson, /* OUT */
const char *data, /* IN */
ssize_t len, /* IN */
bson_error_t *error) /* OUT */
{
bson_json_reader_t *reader;
int r;
BSON_ASSERT (bson);
BSON_ASSERT (data);
if (len < 0) {
len = strlen (data);
}
bson_init (bson);
reader = bson_json_data_reader_new (false, BSON_JSON_DEFAULT_BUF_SIZE);
bson_json_data_reader_ingest (reader, (const uint8_t *) data, len);
r = bson_json_reader_read (reader, bson, error);
bson_json_reader_destroy (reader);
if (r == 0) {
bson_set_error (error,
BSON_ERROR_JSON,
BSON_JSON_ERROR_READ_INVALID_PARAM,
"Empty JSON string");
}
if (r != 1) {
bson_destroy (bson);
return false;
}
return true;
}
static void
_bson_json_reader_handle_fd_destroy (void *handle) /* IN */
{
bson_json_reader_handle_fd_t *fd = handle;
if (fd) {
if ((fd->fd != -1) && fd->do_close) {
#ifdef _WIN32
_close (fd->fd);
#else
close (fd->fd);
#endif
}
bson_free (fd);
}
}
static ssize_t
_bson_json_reader_handle_fd_read (void *handle, /* IN */
uint8_t *buf, /* IN */
size_t len) /* IN */
{
bson_json_reader_handle_fd_t *fd = handle;
ssize_t ret = -1;
if (fd && (fd->fd != -1)) {
again:
#ifdef BSON_OS_WIN32
ret = _read (fd->fd, buf, (unsigned int) len);
#else
ret = read (fd->fd, buf, len);
#endif
if ((ret == -1) && (errno == EAGAIN)) {
goto again;
}
}
return ret;
}
bson_json_reader_t *
bson_json_reader_new_from_fd (int fd, /* IN */
bool close_on_destroy) /* IN */
{
bson_json_reader_handle_fd_t *handle;
BSON_ASSERT (fd != -1);
handle = bson_malloc0 (sizeof *handle);
handle->fd = fd;
handle->do_close = close_on_destroy;
return bson_json_reader_new (handle,
_bson_json_reader_handle_fd_read,
_bson_json_reader_handle_fd_destroy,
true,
BSON_JSON_DEFAULT_BUF_SIZE);
}
bson_json_reader_t *
bson_json_reader_new_from_file (const char *path, /* IN */
bson_error_t *error) /* OUT */
{
char errmsg_buf[BSON_ERROR_BUFFER_SIZE];
char *errmsg;
int fd = -1;
BSON_ASSERT (path);
#ifdef BSON_OS_WIN32
_sopen_s (&fd, path, (_O_RDONLY | _O_BINARY), _SH_DENYNO, _S_IREAD);
#else
fd = open (path, O_RDONLY);
#endif
if (fd == -1) {
errmsg = bson_strerror_r (errno, errmsg_buf, sizeof errmsg_buf);
bson_set_error (
error, BSON_ERROR_READER, BSON_ERROR_READER_BADFD, "%s", errmsg);
return NULL;
}
return bson_json_reader_new_from_fd (fd, true);
}
diff --git a/mongodb-1.3.4/src/libbson/src/bson/bson-json.h b/mongodb-1.4.2/src/libbson/src/bson/bson-json.h
similarity index 100%
rename from mongodb-1.3.4/src/libbson/src/bson/bson-json.h
rename to mongodb-1.4.2/src/libbson/src/bson/bson-json.h
diff --git a/mongodb-1.3.4/src/libbson/src/bson/bson-keys.c b/mongodb-1.4.2/src/libbson/src/bson/bson-keys.c
similarity index 100%
rename from mongodb-1.3.4/src/libbson/src/bson/bson-keys.c
rename to mongodb-1.4.2/src/libbson/src/bson/bson-keys.c
diff --git a/mongodb-1.3.4/src/libbson/src/bson/bson-keys.h b/mongodb-1.4.2/src/libbson/src/bson/bson-keys.h
similarity index 100%
rename from mongodb-1.3.4/src/libbson/src/bson/bson-keys.h
rename to mongodb-1.4.2/src/libbson/src/bson/bson-keys.h
diff --git a/mongodb-1.3.4/src/libbson/src/bson/bson-macros.h b/mongodb-1.4.2/src/libbson/src/bson/bson-macros.h
similarity index 93%
rename from mongodb-1.3.4/src/libbson/src/bson/bson-macros.h
rename to mongodb-1.4.2/src/libbson/src/bson/bson-macros.h
index b4517fa9..b0c07525 100644
--- a/mongodb-1.3.4/src/libbson/src/bson/bson-macros.h
+++ b/mongodb-1.4.2/src/libbson/src/bson/bson-macros.h
@@ -1,286 +1,296 @@
/*
* Copyright 2013 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef BSON_MACROS_H
#define BSON_MACROS_H
#if !defined(BSON_INSIDE) && !defined(BSON_COMPILATION)
#error "Only <bson.h> can be included directly."
#endif
#include <stdio.h>
#ifdef __cplusplus
#include <algorithm>
#endif
#include "bson-config.h"
#if BSON_OS == 1
#define BSON_OS_UNIX
#elif BSON_OS == 2
#define BSON_OS_WIN32
#else
#error "Unknown operating system."
#endif
#ifdef __cplusplus
#define BSON_BEGIN_DECLS extern "C" {
#define BSON_END_DECLS }
#else
#define BSON_BEGIN_DECLS
#define BSON_END_DECLS
#endif
#if defined (__GNUC__)
#define BSON_GNUC_CHECK_VERSION(major, minor) \
((__GNUC__ > (major)) || \
((__GNUC__ == (major)) && (__GNUC_MINOR__ >= (minor))))
#else
#define BSON_GNUC_CHECK_VERSION(major, minor) 0
#endif
#if defined (__GNUC__)
#define BSON_GNUC_IS_VERSION(major, minor) \
((__GNUC__ == (major)) && (__GNUC_MINOR__ == (minor)))
#else
#define BSON_GNUC_IS_VERSION(major, minor) 0
#endif
/* Decorate public functions:
* - if BSON_STATIC, we're compiling a program that uses libbson as a static
* library, don't decorate functions
* - else if BSON_COMPILATION, we're compiling a static or shared libbson, mark
* public functions for export from the shared lib (which has no effect on
* the static lib)
* - else, we're compiling a program that uses libbson as a shared library,
* mark public functions as DLL imports for Microsoft Visual C
*/
#ifdef _MSC_VER
/*
* Microsoft Visual C
*/
#ifdef BSON_STATIC
#define BSON_API
#elif defined(BSON_COMPILATION)
#define BSON_API __declspec(dllexport)
#else
#define BSON_API __declspec(dllimport)
#endif
#define BSON_CALL __cdecl
#elif defined(__GNUC__)
/*
* GCC
*/
#ifdef BSON_STATIC
#define BSON_API
#elif defined(BSON_COMPILATION)
#define BSON_API __attribute__ ((visibility ("default")))
#else
#define BSON_API
#endif
#define BSON_CALL
#else
/*
* Other compilers
*/
#define BSON_API
#define BSON_CALL
#endif
#define BSON_EXPORT(type) BSON_API type BSON_CALL
#ifdef MIN
#define BSON_MIN MIN
#elif defined(__cplusplus)
#define BSON_MIN(a, b) ((std::min) (a, b))
#elif defined(_MSC_VER)
#define BSON_MIN(a, b) ((a) < (b) ? (a) : (b))
#else
#define BSON_MIN(a, b) (((a) < (b)) ? (a) : (b))
#endif
#ifdef MAX
#define BSON_MAX MAX
#elif defined(__cplusplus)
#define BSON_MAX(a, b) ((std::max) (a, b))
#elif defined(_MSC_VER)
#define BSON_MAX(a, b) ((a) > (b) ? (a) : (b))
#else
#define BSON_MAX(a, b) (((a) > (b)) ? (a) : (b))
#endif
#ifdef ABS
#define BSON_ABS ABS
#else
#define BSON_ABS(a) (((a) < 0) ? ((a) * -1) : (a))
#endif
#ifdef _MSC_VER
#ifdef _WIN64
#define BSON_ALIGN_OF_PTR 8
#else
#define BSON_ALIGN_OF_PTR 4
#endif
#else
#define BSON_ALIGN_OF_PTR (sizeof (void *))
#endif
#ifdef BSON_EXTRA_ALIGN
#if defined(_MSC_VER)
#define BSON_ALIGNED_BEGIN(_N) __declspec(align (_N))
#define BSON_ALIGNED_END(_N)
#else
#define BSON_ALIGNED_BEGIN(_N)
#define BSON_ALIGNED_END(_N) __attribute__ ((aligned (_N)))
#endif
#else
#if defined(_MSC_VER)
#define BSON_ALIGNED_BEGIN(_N) __declspec(align (BSON_ALIGN_OF_PTR))
#define BSON_ALIGNED_END(_N)
#else
#define BSON_ALIGNED_BEGIN(_N)
#define BSON_ALIGNED_END(_N) \
__attribute__ ( \
(aligned ((_N) > BSON_ALIGN_OF_PTR ? BSON_ALIGN_OF_PTR : (_N))))
#endif
#endif
#define bson_str_empty(s) (!s[0])
#define bson_str_empty0(s) (!s || !s[0])
#if defined(_WIN32)
#define BSON_FUNC __FUNCTION__
#elif defined(__STDC_VERSION__) && __STDC_VERSION__ < 199901L
#define BSON_FUNC __FUNCTION__
#else
#define BSON_FUNC __func__
#endif
#define BSON_ASSERT(test) \
do { \
if (!(BSON_LIKELY (test))) { \
fprintf (stderr, \
"%s:%d %s(): precondition failed: %s\n", \
__FILE__, \
__LINE__, \
BSON_FUNC, \
#test); \
abort (); \
} \
} while (0)
-
+/* obsolete macros, preserved for compatibility */
#define BSON_STATIC_ASSERT(s) BSON_STATIC_ASSERT_ (s, __LINE__)
#define BSON_STATIC_ASSERT_JOIN(a, b) BSON_STATIC_ASSERT_JOIN2 (a, b)
#define BSON_STATIC_ASSERT_JOIN2(a, b) a##b
#define BSON_STATIC_ASSERT_(s, l) \
typedef char BSON_STATIC_ASSERT_JOIN (static_assert_test_, \
__LINE__)[(s) ? 1 : -1]
+/* modern macros */
+#define BSON_STATIC_ASSERT2(_name, _s) \
+ BSON_STATIC_ASSERT2_ (_s, __LINE__, _name)
+#define BSON_STATIC_ASSERT_JOIN3(_a, _b, _name) \
+ BSON_STATIC_ASSERT_JOIN4 (_a, _b, _name)
+#define BSON_STATIC_ASSERT_JOIN4(_a, _b, _name) _a##_b##_name
+#define BSON_STATIC_ASSERT2_(_s, _l, _name) \
+ typedef char BSON_STATIC_ASSERT_JOIN3 ( \
+ static_assert_test_, __LINE__, _name)[(_s) ? 1 : -1]
+
#if defined(__GNUC__)
#define BSON_GNUC_CONST __attribute__ ((const))
#define BSON_GNUC_WARN_UNUSED_RESULT __attribute__ ((warn_unused_result))
#else
#define BSON_GNUC_CONST
#define BSON_GNUC_WARN_UNUSED_RESULT
#endif
#if BSON_GNUC_CHECK_VERSION(4, 0) && !defined(_WIN32)
#define BSON_GNUC_NULL_TERMINATED __attribute__ ((sentinel))
#define BSON_GNUC_INTERNAL __attribute__ ((visibility ("hidden")))
#else
#define BSON_GNUC_NULL_TERMINATED
#define BSON_GNUC_INTERNAL
#endif
#if defined(__GNUC__)
#define BSON_LIKELY(x) __builtin_expect (!!(x), 1)
#define BSON_UNLIKELY(x) __builtin_expect (!!(x), 0)
#else
#define BSON_LIKELY(v) v
#define BSON_UNLIKELY(v) v
#endif
#if defined(__clang__)
#define BSON_GNUC_PRINTF(f, v) __attribute__ ((format (printf, f, v)))
#elif BSON_GNUC_CHECK_VERSION(4, 4)
#define BSON_GNUC_PRINTF(f, v) __attribute__ ((format (gnu_printf, f, v)))
#else
#define BSON_GNUC_PRINTF(f, v)
#endif
#if defined(__LP64__) || defined(_LP64)
#define BSON_WORD_SIZE 64
#else
#define BSON_WORD_SIZE 32
#endif
#if defined(_MSC_VER)
#define BSON_INLINE __inline
#else
#define BSON_INLINE __inline__
#endif
#ifdef _MSC_VER
#define BSON_ENSURE_ARRAY_PARAM_SIZE(_n)
#define BSON_TYPEOF decltype
#else
#define BSON_ENSURE_ARRAY_PARAM_SIZE(_n) static(_n)
#define BSON_TYPEOF typeof
#endif
#if BSON_GNUC_CHECK_VERSION(3, 1)
#define BSON_GNUC_DEPRECATED __attribute__ ((__deprecated__))
#else
#define BSON_GNUC_DEPRECATED
#endif
#if BSON_GNUC_CHECK_VERSION(4, 5)
#define BSON_GNUC_DEPRECATED_FOR(f) \
__attribute__ ((deprecated ("Use " #f " instead")))
#else
#define BSON_GNUC_DEPRECATED_FOR(f) BSON_GNUC_DEPRECATED
#endif
#endif /* BSON_MACROS_H */
diff --git a/mongodb-1.3.4/src/libbson/src/bson/bson-md5.c b/mongodb-1.4.2/src/libbson/src/bson/bson-md5.c
similarity index 100%
rename from mongodb-1.3.4/src/libbson/src/bson/bson-md5.c
rename to mongodb-1.4.2/src/libbson/src/bson/bson-md5.c
diff --git a/mongodb-1.3.4/src/libbson/src/bson/bson-md5.h b/mongodb-1.4.2/src/libbson/src/bson/bson-md5.h
similarity index 100%
rename from mongodb-1.3.4/src/libbson/src/bson/bson-md5.h
rename to mongodb-1.4.2/src/libbson/src/bson/bson-md5.h
diff --git a/mongodb-1.3.4/src/libbson/src/bson/bson-memory.c b/mongodb-1.4.2/src/libbson/src/bson/bson-memory.c
similarity index 100%
rename from mongodb-1.3.4/src/libbson/src/bson/bson-memory.c
rename to mongodb-1.4.2/src/libbson/src/bson/bson-memory.c
diff --git a/mongodb-1.3.4/src/libbson/src/bson/bson-memory.h b/mongodb-1.4.2/src/libbson/src/bson/bson-memory.h
similarity index 100%
rename from mongodb-1.3.4/src/libbson/src/bson/bson-memory.h
rename to mongodb-1.4.2/src/libbson/src/bson/bson-memory.h
diff --git a/mongodb-1.3.4/src/libbson/src/bson/bson-oid.c b/mongodb-1.4.2/src/libbson/src/bson/bson-oid.c
similarity index 100%
rename from mongodb-1.3.4/src/libbson/src/bson/bson-oid.c
rename to mongodb-1.4.2/src/libbson/src/bson/bson-oid.c
diff --git a/mongodb-1.3.4/src/libbson/src/bson/bson-oid.h b/mongodb-1.4.2/src/libbson/src/bson/bson-oid.h
similarity index 100%
rename from mongodb-1.3.4/src/libbson/src/bson/bson-oid.h
rename to mongodb-1.4.2/src/libbson/src/bson/bson-oid.h
diff --git a/mongodb-1.3.4/src/libbson/src/bson/bson-private.h b/mongodb-1.4.2/src/libbson/src/bson/bson-private.h
similarity index 94%
rename from mongodb-1.3.4/src/libbson/src/bson/bson-private.h
rename to mongodb-1.4.2/src/libbson/src/bson/bson-private.h
index aeaee3a9..e20950ad 100644
--- a/mongodb-1.3.4/src/libbson/src/bson/bson-private.h
+++ b/mongodb-1.4.2/src/libbson/src/bson/bson-private.h
@@ -1,94 +1,94 @@
/*
* Copyright 2013 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef BSON_PRIVATE_H
#define BSON_PRIVATE_H
#include "bson-macros.h"
#include "bson-memory.h"
#include "bson-types.h"
#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)
#define BEGIN_IGNORE_DEPRECATIONS \
_Pragma ("GCC diagnostic push") \
_Pragma ("GCC diagnostic ignored \"-Wdeprecated-declarations\"")
#define END_IGNORE_DEPRECATIONS _Pragma ("GCC diagnostic pop")
#elif defined(__clang__)
#define BEGIN_IGNORE_DEPRECATIONS \
_Pragma ("clang diagnostic push") \
_Pragma ("clang diagnostic ignored \"-Wdeprecated-declarations\"")
#define END_IGNORE_DEPRECATIONS _Pragma ("clang diagnostic pop")
#else
#define BEGIN_IGNORE_DEPRECATIONS
#define END_IGNORE_DEPRECATIONS
#endif
BSON_BEGIN_DECLS
typedef enum {
BSON_FLAG_NONE = 0,
BSON_FLAG_INLINE = (1 << 0),
BSON_FLAG_STATIC = (1 << 1),
BSON_FLAG_RDONLY = (1 << 2),
BSON_FLAG_CHILD = (1 << 3),
BSON_FLAG_IN_CHILD = (1 << 4),
BSON_FLAG_NO_FREE = (1 << 5),
} bson_flags_t;
#define BSON_INLINE_DATA_SIZE 120
BSON_ALIGNED_BEGIN (128)
typedef struct {
bson_flags_t flags;
uint32_t len;
uint8_t data[BSON_INLINE_DATA_SIZE];
} bson_impl_inline_t BSON_ALIGNED_END (128);
-BSON_STATIC_ASSERT (sizeof (bson_impl_inline_t) == 128);
+BSON_STATIC_ASSERT2 (impl_inline_t, sizeof (bson_impl_inline_t) == 128);
BSON_ALIGNED_BEGIN (128)
typedef struct {
bson_flags_t flags; /* flags describing the bson_t */
uint32_t len; /* length of bson document in bytes */
bson_t *parent; /* parent bson if a child */
uint32_t depth; /* Subdocument depth. */
uint8_t **buf; /* pointer to buffer pointer */
size_t *buflen; /* pointer to buffer length */
size_t offset; /* our offset inside *buf */
uint8_t *alloc; /* buffer that we own. */
size_t alloclen; /* length of buffer that we own. */
bson_realloc_func realloc; /* our realloc implementation */
void *realloc_func_ctx; /* context for our realloc func */
} bson_impl_alloc_t BSON_ALIGNED_END (128);
-BSON_STATIC_ASSERT (sizeof (bson_impl_alloc_t) <= 128);
+BSON_STATIC_ASSERT2 (impl_alloc_t, sizeof (bson_impl_alloc_t) <= 128);
#define BSON_REGEX_OPTIONS_SORTED "ilmsux"
BSON_END_DECLS
#endif /* BSON_PRIVATE_H */
diff --git a/mongodb-1.3.4/src/libbson/src/bson/bson-reader.c b/mongodb-1.4.2/src/libbson/src/bson/bson-reader.c
similarity index 100%
rename from mongodb-1.3.4/src/libbson/src/bson/bson-reader.c
rename to mongodb-1.4.2/src/libbson/src/bson/bson-reader.c
diff --git a/mongodb-1.3.4/src/libbson/src/bson/bson-reader.h b/mongodb-1.4.2/src/libbson/src/bson/bson-reader.h
similarity index 100%
rename from mongodb-1.3.4/src/libbson/src/bson/bson-reader.h
rename to mongodb-1.4.2/src/libbson/src/bson/bson-reader.h
diff --git a/mongodb-1.3.4/src/libbson/src/bson/bson-stdint-win32.h b/mongodb-1.4.2/src/libbson/src/bson/bson-stdint-win32.h
similarity index 100%
rename from mongodb-1.3.4/src/libbson/src/bson/bson-stdint-win32.h
rename to mongodb-1.4.2/src/libbson/src/bson/bson-stdint-win32.h
diff --git a/mongodb-1.3.4/src/libbson/src/bson/bson-stdint.h b/mongodb-1.4.2/src/libbson/src/bson/bson-stdint.h
similarity index 89%
rename from mongodb-1.3.4/src/libbson/src/bson/bson-stdint.h
rename to mongodb-1.4.2/src/libbson/src/bson/bson-stdint.h
index d81be316..d887df19 100644
--- a/mongodb-1.3.4/src/libbson/src/bson/bson-stdint.h
+++ b/mongodb-1.4.2/src/libbson/src/bson/bson-stdint.h
@@ -1,17 +1,17 @@
#ifndef ___SRC_LIBBSON_SRC_BSON_BSON_STDINT_H
#define ___SRC_LIBBSON_SRC_BSON_BSON_STDINT_H 1
#ifndef _GENERATED_STDINT_H
#define _GENERATED_STDINT_H " "
-/* generated using a gnu compiler version cc (Ubuntu 5.4.0-6ubuntu1~16.04.5) 5.4.0 20160609 Copyright (C) 2015 Free Software Foundation, Inc. This is free software; see the source for copying conditions. There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. */
+/* generated using a gnu compiler version cc (Ubuntu 5.4.0-6ubuntu1~16.04.9) 5.4.0 20160609 Copyright (C) 2015 Free Software Foundation, Inc. This is free software; see the source for copying conditions. There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. */
#include <stdint.h>
/* system headers have good uint64_t */
#ifndef _HAVE_UINT64_T
#define _HAVE_UINT64_T
#endif
/* once */
#endif
#endif
diff --git a/mongodb-1.3.4/src/libbson/src/bson/bson-string.c b/mongodb-1.4.2/src/libbson/src/bson/bson-string.c
similarity index 100%
rename from mongodb-1.3.4/src/libbson/src/bson/bson-string.c
rename to mongodb-1.4.2/src/libbson/src/bson/bson-string.c
diff --git a/mongodb-1.3.4/src/libbson/src/bson/bson-string.h b/mongodb-1.4.2/src/libbson/src/bson/bson-string.h
similarity index 100%
rename from mongodb-1.3.4/src/libbson/src/bson/bson-string.h
rename to mongodb-1.4.2/src/libbson/src/bson/bson-string.h
diff --git a/mongodb-1.3.4/src/libbson/src/bson/bson-thread-private.h b/mongodb-1.4.2/src/libbson/src/bson/bson-thread-private.h
similarity index 100%
rename from mongodb-1.3.4/src/libbson/src/bson/bson-thread-private.h
rename to mongodb-1.4.2/src/libbson/src/bson/bson-thread-private.h
diff --git a/mongodb-1.3.4/src/libbson/src/bson/bson-timegm-private.h b/mongodb-1.4.2/src/libbson/src/bson/bson-timegm-private.h
similarity index 100%
rename from mongodb-1.3.4/src/libbson/src/bson/bson-timegm-private.h
rename to mongodb-1.4.2/src/libbson/src/bson/bson-timegm-private.h
diff --git a/mongodb-1.3.4/src/libbson/src/bson/bson-timegm.c b/mongodb-1.4.2/src/libbson/src/bson/bson-timegm.c
similarity index 100%
rename from mongodb-1.3.4/src/libbson/src/bson/bson-timegm.c
rename to mongodb-1.4.2/src/libbson/src/bson/bson-timegm.c
diff --git a/mongodb-1.3.4/src/libbson/src/bson/bson-types.h b/mongodb-1.4.2/src/libbson/src/bson/bson-types.h
similarity index 98%
rename from mongodb-1.3.4/src/libbson/src/bson/bson-types.h
rename to mongodb-1.4.2/src/libbson/src/bson/bson-types.h
index f5c8e77f..41a6e251 100644
--- a/mongodb-1.3.4/src/libbson/src/bson/bson-types.h
+++ b/mongodb-1.4.2/src/libbson/src/bson/bson-types.h
@@ -1,546 +1,546 @@
/*
* Copyright 2013 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef BSON_TYPES_H
#define BSON_TYPES_H
#if !defined(BSON_INSIDE) && !defined(BSON_COMPILATION)
#error "Only <bson.h> can be included directly."
#endif
#include <stdlib.h>
#include <sys/types.h>
#include "bson-macros.h"
#include "bson-config.h"
#include "bson-compat.h"
#include "bson-endian.h"
BSON_BEGIN_DECLS
/*
*--------------------------------------------------------------------------
*
* bson_unichar_t --
*
* bson_unichar_t provides an unsigned 32-bit type for containing
* unicode characters. When iterating UTF-8 sequences, this should
* be used to avoid losing the high-bits of non-ascii characters.
*
* See also:
* bson_string_append_unichar()
*
*--------------------------------------------------------------------------
*/
typedef uint32_t bson_unichar_t;
/**
* bson_context_flags_t:
*
* This enumeration is used to configure a bson_context_t.
*
* %BSON_CONTEXT_NONE: Use default options.
* %BSON_CONTEXT_THREAD_SAFE: Context will be called from multiple threads.
* %BSON_CONTEXT_DISABLE_PID_CACHE: Call getpid() instead of caching the
* result of getpid() when initializing the context.
* %BSON_CONTEXT_DISABLE_HOST_CACHE: Call gethostname() instead of caching the
* result of gethostname() when initializing the context.
*/
typedef enum {
BSON_CONTEXT_NONE = 0,
BSON_CONTEXT_THREAD_SAFE = (1 << 0),
BSON_CONTEXT_DISABLE_HOST_CACHE = (1 << 1),
BSON_CONTEXT_DISABLE_PID_CACHE = (1 << 2),
#ifdef BSON_HAVE_SYSCALL_TID
BSON_CONTEXT_USE_TASK_ID = (1 << 3),
#endif
} bson_context_flags_t;
/**
* bson_context_t:
*
* This structure manages context for the bson library. It handles
* configuration for thread-safety and other performance related requirements.
* Consumers will create a context and may use multiple under a variety of
* situations.
*
* If your program calls fork(), you should initialize a new bson_context_t
* using bson_context_init().
*
* If you are using threading, it is suggested that you use a bson_context_t
* per thread for best performance. Alternatively, you can initialize the
* bson_context_t with BSON_CONTEXT_THREAD_SAFE, although a performance penalty
* will be incurred.
*
* Many functions will require that you provide a bson_context_t such as OID
* generation.
*
- * This structure is oqaque in that you cannot see the contents of the
+ * This structure is opaque in that you cannot see the contents of the
* structure. However, it is stack allocatable in that enough padding is
* provided in _bson_context_t to hold the structure.
*/
typedef struct _bson_context_t bson_context_t;
/**
* bson_t:
*
* This structure manages a buffer whose contents are a properly formatted
* BSON document. You may perform various transforms on the BSON documents.
* Additionally, it can be iterated over using bson_iter_t.
*
* See bson_iter_init() for iterating the contents of a bson_t.
*
* When building a bson_t structure using the various append functions,
* memory allocations may occur. That is performed using power of two
* allocations and realloc().
*
* See http://bsonspec.org for the BSON document spec.
*
* This structure is meant to fit in two sequential 64-byte cachelines.
*/
BSON_ALIGNED_BEGIN (128)
typedef struct _bson_t {
uint32_t flags; /* Internal flags for the bson_t. */
uint32_t len; /* Length of BSON data. */
uint8_t padding[120]; /* Padding for stack allocation. */
} bson_t BSON_ALIGNED_END (128);
/**
* BSON_INITIALIZER:
*
* This macro can be used to initialize a #bson_t structure on the stack
* without calling bson_init().
*
* |[
* bson_t b = BSON_INITIALIZER;
* ]|
*/
#define BSON_INITIALIZER \
{ \
3, 5, \
{ \
5 \
} \
}
-BSON_STATIC_ASSERT (sizeof (bson_t) == 128);
+BSON_STATIC_ASSERT2 (bson_t, sizeof (bson_t) == 128);
/**
* bson_oid_t:
*
* This structure contains the binary form of a BSON Object Id as specified
* on http://bsonspec.org. If you would like the bson_oid_t in string form
* see bson_oid_to_string() or bson_oid_to_string_r().
*/
typedef struct {
uint8_t bytes[12];
} bson_oid_t;
-BSON_STATIC_ASSERT (sizeof (bson_oid_t) == 12);
+BSON_STATIC_ASSERT2 (oid_t, sizeof (bson_oid_t) == 12);
/**
* bson_decimal128_t:
*
* @high The high-order bytes of the decimal128. This field contains sign,
* combination bits, exponent, and part of the coefficient continuation.
* @low The low-order bytes of the decimal128. This field contains the second
* part of the coefficient continuation.
*
* This structure is a boxed type containing the value for the BSON decimal128
* type. The structure stores the 128 bits such that they correspond to the
* native format for the IEEE decimal128 type, if it is implemented.
**/
typedef struct {
#if BSON_BYTE_ORDER == BSON_LITTLE_ENDIAN
uint64_t low;
uint64_t high;
#elif BSON_BYTE_ORDER == BSON_BIG_ENDIAN
uint64_t high;
uint64_t low;
#endif
} bson_decimal128_t;
/**
* bson_validate_flags_t:
*
* This enumeration is used for validation of BSON documents. It allows
* selective control on what you wish to validate.
*
* %BSON_VALIDATE_NONE: No additional validation occurs.
* %BSON_VALIDATE_UTF8: Check that strings are valid UTF-8.
* %BSON_VALIDATE_DOLLAR_KEYS: Check that keys do not start with $.
* %BSON_VALIDATE_DOT_KEYS: Check that keys do not contain a period.
* %BSON_VALIDATE_UTF8_ALLOW_NULL: Allow NUL bytes in UTF-8 text.
* %BSON_VALIDATE_EMPTY_KEYS: Prohibit zero-length field names
*/
typedef enum {
BSON_VALIDATE_NONE = 0,
BSON_VALIDATE_UTF8 = (1 << 0),
BSON_VALIDATE_DOLLAR_KEYS = (1 << 1),
BSON_VALIDATE_DOT_KEYS = (1 << 2),
BSON_VALIDATE_UTF8_ALLOW_NULL = (1 << 3),
BSON_VALIDATE_EMPTY_KEYS = (1 << 4),
} bson_validate_flags_t;
/**
* bson_type_t:
*
* This enumeration contains all of the possible types within a BSON document.
* Use bson_iter_type() to fetch the type of a field while iterating over it.
*/
typedef enum {
BSON_TYPE_EOD = 0x00,
BSON_TYPE_DOUBLE = 0x01,
BSON_TYPE_UTF8 = 0x02,
BSON_TYPE_DOCUMENT = 0x03,
BSON_TYPE_ARRAY = 0x04,
BSON_TYPE_BINARY = 0x05,
BSON_TYPE_UNDEFINED = 0x06,
BSON_TYPE_OID = 0x07,
BSON_TYPE_BOOL = 0x08,
BSON_TYPE_DATE_TIME = 0x09,
BSON_TYPE_NULL = 0x0A,
BSON_TYPE_REGEX = 0x0B,
BSON_TYPE_DBPOINTER = 0x0C,
BSON_TYPE_CODE = 0x0D,
BSON_TYPE_SYMBOL = 0x0E,
BSON_TYPE_CODEWSCOPE = 0x0F,
BSON_TYPE_INT32 = 0x10,
BSON_TYPE_TIMESTAMP = 0x11,
BSON_TYPE_INT64 = 0x12,
BSON_TYPE_DECIMAL128 = 0x13,
BSON_TYPE_MAXKEY = 0x7F,
BSON_TYPE_MINKEY = 0xFF,
} bson_type_t;
/**
* bson_subtype_t:
*
* This enumeration contains the various subtypes that may be used in a binary
* field. See http://bsonspec.org for more information.
*/
typedef enum {
BSON_SUBTYPE_BINARY = 0x00,
BSON_SUBTYPE_FUNCTION = 0x01,
BSON_SUBTYPE_BINARY_DEPRECATED = 0x02,
BSON_SUBTYPE_UUID_DEPRECATED = 0x03,
BSON_SUBTYPE_UUID = 0x04,
BSON_SUBTYPE_MD5 = 0x05,
BSON_SUBTYPE_USER = 0x80,
} bson_subtype_t;
/*
*--------------------------------------------------------------------------
*
* bson_value_t --
*
* A boxed type to contain various bson_type_t types.
*
* See also:
* bson_value_copy()
* bson_value_destroy()
*
*--------------------------------------------------------------------------
*/
BSON_ALIGNED_BEGIN (8)
typedef struct _bson_value_t {
bson_type_t value_type;
int32_t padding;
union {
bson_oid_t v_oid;
int64_t v_int64;
int32_t v_int32;
int8_t v_int8;
double v_double;
bool v_bool;
int64_t v_datetime;
struct {
uint32_t timestamp;
uint32_t increment;
} v_timestamp;
struct {
char *str;
uint32_t len;
} v_utf8;
struct {
uint8_t *data;
uint32_t data_len;
} v_doc;
struct {
uint8_t *data;
uint32_t data_len;
bson_subtype_t subtype;
} v_binary;
struct {
char *regex;
char *options;
} v_regex;
struct {
char *collection;
uint32_t collection_len;
bson_oid_t oid;
} v_dbpointer;
struct {
char *code;
uint32_t code_len;
} v_code;
struct {
char *code;
uint8_t *scope_data;
uint32_t code_len;
uint32_t scope_len;
} v_codewscope;
struct {
char *symbol;
uint32_t len;
} v_symbol;
bson_decimal128_t v_decimal128;
} value;
} bson_value_t BSON_ALIGNED_END (8);
/**
* bson_iter_t:
*
* This structure manages iteration over a bson_t structure. It keeps track
* of the location of the current key and value within the buffer. Using the
* various functions to get the value of the iter will read from these
* locations.
*
* This structure is safe to discard on the stack. No cleanup is necessary
* after using it.
*/
BSON_ALIGNED_BEGIN (128)
typedef struct {
const uint8_t *raw; /* The raw buffer being iterated. */
uint32_t len; /* The length of raw. */
uint32_t off; /* The offset within the buffer. */
uint32_t type; /* The offset of the type byte. */
uint32_t key; /* The offset of the key byte. */
uint32_t d1; /* The offset of the first data byte. */
uint32_t d2; /* The offset of the second data byte. */
uint32_t d3; /* The offset of the third data byte. */
uint32_t d4; /* The offset of the fourth data byte. */
uint32_t next_off; /* The offset of the next field. */
uint32_t err_off; /* The offset of the error. */
bson_value_t value; /* Internal value for various state. */
} bson_iter_t BSON_ALIGNED_END (128);
/**
* bson_reader_t:
*
* This structure is used to iterate over a sequence of BSON documents. It
* allows for them to be iterated with the possibility of no additional
* memory allocations under certain circumstances such as reading from an
* incoming mongo packet.
*/
BSON_ALIGNED_BEGIN (BSON_ALIGN_OF_PTR)
typedef struct {
uint32_t type;
/*< private >*/
} bson_reader_t BSON_ALIGNED_END (BSON_ALIGN_OF_PTR);
/**
* bson_visitor_t:
*
* This structure contains a series of pointers that can be executed for
* each field of a BSON document based on the field type.
*
* For example, if an int32 field is found, visit_int32 will be called.
*
* When visiting each field using bson_iter_visit_all(), you may provide a
* data pointer that will be provided with each callback. This might be useful
* if you are marshaling to another language.
*
* You may pre-maturely stop the visitation of fields by returning true in your
* visitor. Returning false will continue visitation to further fields.
*/
BSON_ALIGNED_BEGIN (8)
typedef struct {
/* run before / after descending into a document */
bool (*visit_before) (const bson_iter_t *iter, const char *key, void *data);
bool (*visit_after) (const bson_iter_t *iter, const char *key, void *data);
/* corrupt BSON, or unsupported type and visit_unsupported_type not set */
void (*visit_corrupt) (const bson_iter_t *iter, void *data);
/* normal bson field callbacks */
bool (*visit_double) (const bson_iter_t *iter,
const char *key,
double v_double,
void *data);
bool (*visit_utf8) (const bson_iter_t *iter,
const char *key,
size_t v_utf8_len,
const char *v_utf8,
void *data);
bool (*visit_document) (const bson_iter_t *iter,
const char *key,
const bson_t *v_document,
void *data);
bool (*visit_array) (const bson_iter_t *iter,
const char *key,
const bson_t *v_array,
void *data);
bool (*visit_binary) (const bson_iter_t *iter,
const char *key,
bson_subtype_t v_subtype,
size_t v_binary_len,
const uint8_t *v_binary,
void *data);
/* normal field with deprecated "Undefined" BSON type */
bool (*visit_undefined) (const bson_iter_t *iter,
const char *key,
void *data);
bool (*visit_oid) (const bson_iter_t *iter,
const char *key,
const bson_oid_t *v_oid,
void *data);
bool (*visit_bool) (const bson_iter_t *iter,
const char *key,
bool v_bool,
void *data);
bool (*visit_date_time) (const bson_iter_t *iter,
const char *key,
int64_t msec_since_epoch,
void *data);
bool (*visit_null) (const bson_iter_t *iter, const char *key, void *data);
bool (*visit_regex) (const bson_iter_t *iter,
const char *key,
const char *v_regex,
const char *v_options,
void *data);
bool (*visit_dbpointer) (const bson_iter_t *iter,
const char *key,
size_t v_collection_len,
const char *v_collection,
const bson_oid_t *v_oid,
void *data);
bool (*visit_code) (const bson_iter_t *iter,
const char *key,
size_t v_code_len,
const char *v_code,
void *data);
bool (*visit_symbol) (const bson_iter_t *iter,
const char *key,
size_t v_symbol_len,
const char *v_symbol,
void *data);
bool (*visit_codewscope) (const bson_iter_t *iter,
const char *key,
size_t v_code_len,
const char *v_code,
const bson_t *v_scope,
void *data);
bool (*visit_int32) (const bson_iter_t *iter,
const char *key,
int32_t v_int32,
void *data);
bool (*visit_timestamp) (const bson_iter_t *iter,
const char *key,
uint32_t v_timestamp,
uint32_t v_increment,
void *data);
bool (*visit_int64) (const bson_iter_t *iter,
const char *key,
int64_t v_int64,
void *data);
bool (*visit_maxkey) (const bson_iter_t *iter, const char *key, void *data);
bool (*visit_minkey) (const bson_iter_t *iter, const char *key, void *data);
/* if set, called instead of visit_corrupt when an apparently valid BSON
* includes an unrecognized field type (reading future version of BSON) */
void (*visit_unsupported_type) (const bson_iter_t *iter,
const char *key,
uint32_t type_code,
void *data);
bool (*visit_decimal128) (const bson_iter_t *iter,
const char *key,
const bson_decimal128_t *v_decimal128,
void *data);
void *padding[7];
} bson_visitor_t BSON_ALIGNED_END (8);
#define BSON_ERROR_BUFFER_SIZE 504
BSON_ALIGNED_BEGIN (8)
typedef struct _bson_error_t {
uint32_t domain;
uint32_t code;
char message[BSON_ERROR_BUFFER_SIZE];
} bson_error_t BSON_ALIGNED_END (8);
-BSON_STATIC_ASSERT (sizeof (bson_error_t) == 512);
+BSON_STATIC_ASSERT2 (error_t, sizeof (bson_error_t) == 512);
/**
* bson_next_power_of_two:
* @v: A 32-bit unsigned integer of required bytes.
*
* Determines the next larger power of two for the value of @v
* in a constant number of operations.
*
* It is up to the caller to guarantee this will not overflow.
*
* Returns: The next power of 2 from @v.
*/
static BSON_INLINE size_t
bson_next_power_of_two (size_t v)
{
v--;
v |= v >> 1;
v |= v >> 2;
v |= v >> 4;
v |= v >> 8;
v |= v >> 16;
#if BSON_WORD_SIZE == 64
v |= v >> 32;
#endif
v++;
return v;
}
static BSON_INLINE bool
bson_is_power_of_two (uint32_t v)
{
return ((v != 0) && ((v & (v - 1)) == 0));
}
BSON_END_DECLS
#endif /* BSON_TYPES_H */
diff --git a/mongodb-1.3.4/src/libbson/src/bson/bson-utf8.c b/mongodb-1.4.2/src/libbson/src/bson/bson-utf8.c
similarity index 99%
rename from mongodb-1.3.4/src/libbson/src/bson/bson-utf8.c
rename to mongodb-1.4.2/src/libbson/src/bson/bson-utf8.c
index aef25d9c..e917299c 100644
--- a/mongodb-1.3.4/src/libbson/src/bson/bson-utf8.c
+++ b/mongodb-1.4.2/src/libbson/src/bson/bson-utf8.c
@@ -1,475 +1,478 @@
/*
* Copyright 2013 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <string.h>
#include "bson-memory.h"
#include "bson-string.h"
#include "bson-utf8.h"
/*
*--------------------------------------------------------------------------
*
* _bson_utf8_get_sequence --
*
* Determine the sequence length of the first UTF-8 character in
* @utf8. The sequence length is stored in @seq_length and the mask
* for the first character is stored in @first_mask.
*
* Returns:
* None.
*
* Side effects:
* @seq_length is set.
* @first_mask is set.
*
*--------------------------------------------------------------------------
*/
static BSON_INLINE void
_bson_utf8_get_sequence (const char *utf8, /* IN */
uint8_t *seq_length, /* OUT */
uint8_t *first_mask) /* OUT */
{
unsigned char c = *(const unsigned char *) utf8;
uint8_t m;
uint8_t n;
/*
* See the following[1] for a description of what the given multi-byte
* sequences will be based on the bits set of the first byte. We also need
* to mask the first byte based on that. All subsequent bytes are masked
* against 0x3F.
*
* [1] http://www.joelonsoftware.com/articles/Unicode.html
*/
if ((c & 0x80) == 0) {
n = 1;
m = 0x7F;
} else if ((c & 0xE0) == 0xC0) {
n = 2;
m = 0x1F;
} else if ((c & 0xF0) == 0xE0) {
n = 3;
m = 0x0F;
} else if ((c & 0xF8) == 0xF0) {
n = 4;
m = 0x07;
} else if ((c & 0xFC) == 0xF8) {
n = 5;
m = 0x03;
} else if ((c & 0xFE) == 0xFC) {
n = 6;
m = 0x01;
} else {
n = 0;
m = 0;
}
*seq_length = n;
*first_mask = m;
}
/*
*--------------------------------------------------------------------------
*
* bson_utf8_validate --
*
* Validates that @utf8 is a valid UTF-8 string.
*
* If @allow_null is true, then \0 is allowed within @utf8_len bytes
* of @utf8. Generally, this is bad practice since the main point of
* UTF-8 strings is that they can be used with strlen() and friends.
* However, some languages such as Python can send UTF-8 encoded
* strings with NUL's in them.
*
* Parameters:
* @utf8: A UTF-8 encoded string.
* @utf8_len: The length of @utf8 in bytes.
* @allow_null: If \0 is allowed within @utf8, exclusing trailing \0.
*
* Returns:
* true if @utf8 is valid UTF-8. otherwise false.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
bool
bson_utf8_validate (const char *utf8, /* IN */
size_t utf8_len, /* IN */
bool allow_null) /* IN */
{
bson_unichar_t c;
uint8_t first_mask;
uint8_t seq_length;
unsigned i;
unsigned j;
BSON_ASSERT (utf8);
for (i = 0; i < utf8_len; i += seq_length) {
_bson_utf8_get_sequence (&utf8[i], &seq_length, &first_mask);
/*
* Ensure we have a valid multi-byte sequence length.
*/
if (!seq_length) {
return false;
}
/*
* Ensure we have enough bytes left.
*/
if ((utf8_len - i) < seq_length) {
return false;
}
/*
* Also calculate the next char as a unichar so we can
* check code ranges for non-shortest form.
*/
c = utf8[i] & first_mask;
/*
* Check the high-bits for each additional sequence byte.
*/
for (j = i + 1; j < (i + seq_length); j++) {
c = (c << 6) | (utf8[j] & 0x3F);
if ((utf8[j] & 0xC0) != 0x80) {
return false;
}
}
/*
* Check for NULL bytes afterwards.
*
* Hint: if you want to optimize this function, starting here to do
* this in the same pass as the data above would probably be a good
* idea. You would add a branch into the inner loop, but save possibly
* on cache-line bouncing on larger strings. Just a thought.
*/
if (!allow_null) {
for (j = 0; j < seq_length; j++) {
if (((i + j) > utf8_len) || !utf8[i + j]) {
return false;
}
}
}
/*
* Code point wont fit in utf-16, not allowed.
*/
if (c > 0x0010FFFF) {
return false;
}
/*
* Byte is in reserved range for UTF-16 high-marks
* for surrogate pairs.
*/
if ((c & 0xFFFFF800) == 0xD800) {
return false;
}
/*
* Check non-shortest form unicode.
*/
switch (seq_length) {
case 1:
if (c <= 0x007F) {
continue;
}
return false;
case 2:
if ((c >= 0x0080) && (c <= 0x07FF)) {
continue;
} else if (c == 0) {
/* Two-byte representation for NULL. */
+ if (!allow_null) {
+ return false;
+ }
continue;
}
return false;
case 3:
if (((c >= 0x0800) && (c <= 0x0FFF)) ||
((c >= 0x1000) && (c <= 0xFFFF))) {
continue;
}
return false;
case 4:
if (((c >= 0x10000) && (c <= 0x3FFFF)) ||
((c >= 0x40000) && (c <= 0xFFFFF)) ||
((c >= 0x100000) && (c <= 0x10FFFF))) {
continue;
}
return false;
default:
return false;
}
}
return true;
}
/*
*--------------------------------------------------------------------------
*
* bson_utf8_escape_for_json --
*
* Allocates a new string matching @utf8 except that special
* characters in JSON will be escaped. The resulting string is also
* UTF-8 encoded.
*
* Both " and \ characters will be escaped. Additionally, if a NUL
* byte is found before @utf8_len bytes, it will be converted to the
* two byte UTF-8 sequence.
*
* Parameters:
* @utf8: A UTF-8 encoded string.
* @utf8_len: The length of @utf8 in bytes or -1 if NUL terminated.
*
* Returns:
* A newly allocated string that should be freed with bson_free().
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
char *
bson_utf8_escape_for_json (const char *utf8, /* IN */
ssize_t utf8_len) /* IN */
{
bson_unichar_t c;
bson_string_t *str;
bool length_provided = true;
const char *end;
BSON_ASSERT (utf8);
str = bson_string_new (NULL);
if (utf8_len < 0) {
length_provided = false;
utf8_len = strlen (utf8);
}
end = utf8 + utf8_len;
while (utf8 < end) {
c = bson_utf8_get_char (utf8);
switch (c) {
case '\\':
case '"':
bson_string_append_c (str, '\\');
bson_string_append_unichar (str, c);
break;
case '\b':
bson_string_append (str, "\\b");
break;
case '\f':
bson_string_append (str, "\\f");
break;
case '\n':
bson_string_append (str, "\\n");
break;
case '\r':
bson_string_append (str, "\\r");
break;
case '\t':
bson_string_append (str, "\\t");
break;
default:
if (c < ' ') {
bson_string_append_printf (str, "\\u%04x", (unsigned) c);
} else {
bson_string_append_unichar (str, c);
}
break;
}
if (c) {
utf8 = bson_utf8_next_char (utf8);
} else {
if (length_provided && !*utf8) {
/* we escaped nil as '\u0000', now advance past it */
utf8++;
} else {
/* invalid UTF-8 */
bson_string_free (str, true);
return NULL;
}
}
}
return bson_string_free (str, false);
}
/*
*--------------------------------------------------------------------------
*
* bson_utf8_get_char --
*
* Fetches the next UTF-8 character from the UTF-8 sequence.
*
* Parameters:
* @utf8: A string containing validated UTF-8.
*
* Returns:
* A 32-bit bson_unichar_t reprsenting the multi-byte sequence.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
bson_unichar_t
bson_utf8_get_char (const char *utf8) /* IN */
{
bson_unichar_t c;
uint8_t mask;
uint8_t num;
int i;
BSON_ASSERT (utf8);
_bson_utf8_get_sequence (utf8, &num, &mask);
c = (*utf8) & mask;
for (i = 1; i < num; i++) {
c = (c << 6) | (utf8[i] & 0x3F);
}
return c;
}
/*
*--------------------------------------------------------------------------
*
* bson_utf8_next_char --
*
* Returns an incremented pointer to the beginning of the next
* multi-byte sequence in @utf8.
*
* Parameters:
* @utf8: A string containing validated UTF-8.
*
* Returns:
* An incremented pointer in @utf8.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
const char *
bson_utf8_next_char (const char *utf8) /* IN */
{
uint8_t mask;
uint8_t num;
BSON_ASSERT (utf8);
_bson_utf8_get_sequence (utf8, &num, &mask);
return utf8 + num;
}
/*
*--------------------------------------------------------------------------
*
* bson_utf8_from_unichar --
*
* Converts the unichar to a sequence of utf8 bytes and stores those
* in @utf8. The number of bytes in the sequence are stored in @len.
*
* Parameters:
* @unichar: A bson_unichar_t.
* @utf8: A location for the multi-byte sequence.
* @len: A location for number of bytes stored in @utf8.
*
* Returns:
* None.
*
* Side effects:
* @utf8 is set.
* @len is set.
*
*--------------------------------------------------------------------------
*/
void
bson_utf8_from_unichar (bson_unichar_t unichar, /* IN */
char utf8[BSON_ENSURE_ARRAY_PARAM_SIZE (6)], /* OUT */
uint32_t *len) /* OUT */
{
BSON_ASSERT (utf8);
BSON_ASSERT (len);
if (unichar <= 0x7F) {
utf8[0] = unichar;
*len = 1;
} else if (unichar <= 0x7FF) {
*len = 2;
utf8[0] = 0xC0 | ((unichar >> 6) & 0x3F);
utf8[1] = 0x80 | ((unichar) &0x3F);
} else if (unichar <= 0xFFFF) {
*len = 3;
utf8[0] = 0xE0 | ((unichar >> 12) & 0xF);
utf8[1] = 0x80 | ((unichar >> 6) & 0x3F);
utf8[2] = 0x80 | ((unichar) &0x3F);
} else if (unichar <= 0x1FFFFF) {
*len = 4;
utf8[0] = 0xF0 | ((unichar >> 18) & 0x7);
utf8[1] = 0x80 | ((unichar >> 12) & 0x3F);
utf8[2] = 0x80 | ((unichar >> 6) & 0x3F);
utf8[3] = 0x80 | ((unichar) &0x3F);
} else if (unichar <= 0x3FFFFFF) {
*len = 5;
utf8[0] = 0xF8 | ((unichar >> 24) & 0x3);
utf8[1] = 0x80 | ((unichar >> 18) & 0x3F);
utf8[2] = 0x80 | ((unichar >> 12) & 0x3F);
utf8[3] = 0x80 | ((unichar >> 6) & 0x3F);
utf8[4] = 0x80 | ((unichar) &0x3F);
} else if (unichar <= 0x7FFFFFFF) {
*len = 6;
utf8[0] = 0xFC | ((unichar >> 31) & 0x1);
utf8[1] = 0x80 | ((unichar >> 25) & 0x3F);
utf8[2] = 0x80 | ((unichar >> 19) & 0x3F);
utf8[3] = 0x80 | ((unichar >> 13) & 0x3F);
utf8[4] = 0x80 | ((unichar >> 7) & 0x3F);
utf8[5] = 0x80 | ((unichar) &0x1);
} else {
*len = 0;
}
}
diff --git a/mongodb-1.3.4/src/libbson/src/bson/bson-utf8.h b/mongodb-1.4.2/src/libbson/src/bson/bson-utf8.h
similarity index 100%
rename from mongodb-1.3.4/src/libbson/src/bson/bson-utf8.h
rename to mongodb-1.4.2/src/libbson/src/bson/bson-utf8.h
diff --git a/mongodb-1.3.4/src/libbson/src/bson/bson-value.c b/mongodb-1.4.2/src/libbson/src/bson/bson-value.c
similarity index 100%
rename from mongodb-1.3.4/src/libbson/src/bson/bson-value.c
rename to mongodb-1.4.2/src/libbson/src/bson/bson-value.c
diff --git a/mongodb-1.3.4/src/libbson/src/bson/bson-value.h b/mongodb-1.4.2/src/libbson/src/bson/bson-value.h
similarity index 100%
rename from mongodb-1.3.4/src/libbson/src/bson/bson-value.h
rename to mongodb-1.4.2/src/libbson/src/bson/bson-value.h
diff --git a/mongodb-1.3.4/src/libbson/src/bson/bson-version-functions.c b/mongodb-1.4.2/src/libbson/src/bson/bson-version-functions.c
similarity index 100%
rename from mongodb-1.3.4/src/libbson/src/bson/bson-version-functions.c
rename to mongodb-1.4.2/src/libbson/src/bson/bson-version-functions.c
diff --git a/mongodb-1.3.4/src/libbson/src/bson/bson-version-functions.h b/mongodb-1.4.2/src/libbson/src/bson/bson-version-functions.h
similarity index 100%
rename from mongodb-1.3.4/src/libbson/src/bson/bson-version-functions.h
rename to mongodb-1.4.2/src/libbson/src/bson/bson-version-functions.h
diff --git a/mongodb-1.3.4/src/libbson/src/bson/bson-version.h b/mongodb-1.4.2/src/libbson/src/bson/bson-version.h
similarity index 94%
rename from mongodb-1.3.4/src/libbson/src/bson/bson-version.h
rename to mongodb-1.4.2/src/libbson/src/bson/bson-version.h
index 13a45336..cc5663d1 100644
--- a/mongodb-1.3.4/src/libbson/src/bson/bson-version.h
+++ b/mongodb-1.4.2/src/libbson/src/bson/bson-version.h
@@ -1,101 +1,101 @@
/*
* Copyright 2013 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#if !defined (BSON_INSIDE) && !defined (BSON_COMPILATION)
#error "Only <bson.h> can be included directly."
#endif
#ifndef BSON_VERSION_H
#define BSON_VERSION_H
/**
* BSON_MAJOR_VERSION:
*
* BSON major version component (e.g. 1 if %BSON_VERSION is 1.2.3)
*/
#define BSON_MAJOR_VERSION (1)
/**
* BSON_MINOR_VERSION:
*
* BSON minor version component (e.g. 2 if %BSON_VERSION is 1.2.3)
*/
-#define BSON_MINOR_VERSION (8)
+#define BSON_MINOR_VERSION (9)
/**
* BSON_MICRO_VERSION:
*
* BSON micro version component (e.g. 3 if %BSON_VERSION is 1.2.3)
*/
-#define BSON_MICRO_VERSION (2)
+#define BSON_MICRO_VERSION (3)
/**
* BSON_PRERELEASE_VERSION:
*
* BSON prerelease version component (e.g. rc0 if %BSON_VERSION is 1.2.3-rc0)
*/
#define BSON_PRERELEASE_VERSION ()
/**
* BSON_VERSION:
*
* BSON version.
*/
-#define BSON_VERSION (1.8.2)
+#define BSON_VERSION (1.9.3)
/**
* BSON_VERSION_S:
*
* BSON version, encoded as a string, useful for printing and
* concatenation.
*/
-#define BSON_VERSION_S "1.8.2"
+#define BSON_VERSION_S "1.9.3"
/**
* BSON_VERSION_HEX:
*
* BSON version, encoded as an hexadecimal number, useful for
* integer comparisons.
*/
#define BSON_VERSION_HEX (BSON_MAJOR_VERSION << 24 | \
BSON_MINOR_VERSION << 16 | \
BSON_MICRO_VERSION << 8)
/**
* BSON_CHECK_VERSION:
* @major: required major version
* @minor: required minor version
* @micro: required micro version
*
* Compile-time version checking. Evaluates to %TRUE if the version
* of BSON is greater than the required one.
*/
#define BSON_CHECK_VERSION(major,minor,micro) \
(BSON_MAJOR_VERSION > (major) || \
(BSON_MAJOR_VERSION == (major) && BSON_MINOR_VERSION > (minor)) || \
(BSON_MAJOR_VERSION == (major) && BSON_MINOR_VERSION == (minor) && \
BSON_MICRO_VERSION >= (micro)))
#endif /* BSON_VERSION_H */
diff --git a/mongodb-1.3.4/src/libbson/src/bson/bson-version.h.in b/mongodb-1.4.2/src/libbson/src/bson/bson-version.h.in
similarity index 100%
rename from mongodb-1.3.4/src/libbson/src/bson/bson-version.h.in
rename to mongodb-1.4.2/src/libbson/src/bson/bson-version.h.in
diff --git a/mongodb-1.3.4/src/libbson/src/bson/bson-writer.c b/mongodb-1.4.2/src/libbson/src/bson/bson-writer.c
similarity index 100%
rename from mongodb-1.3.4/src/libbson/src/bson/bson-writer.c
rename to mongodb-1.4.2/src/libbson/src/bson/bson-writer.c
diff --git a/mongodb-1.3.4/src/libbson/src/bson/bson-writer.h b/mongodb-1.4.2/src/libbson/src/bson/bson-writer.h
similarity index 100%
rename from mongodb-1.3.4/src/libbson/src/bson/bson-writer.h
rename to mongodb-1.4.2/src/libbson/src/bson/bson-writer.h
diff --git a/mongodb-1.3.4/src/libbson/src/bson/bson.c b/mongodb-1.4.2/src/libbson/src/bson/bson.c
similarity index 99%
rename from mongodb-1.3.4/src/libbson/src/bson/bson.c
rename to mongodb-1.4.2/src/libbson/src/bson/bson.c
index b23904c9..70f4effd 100644
--- a/mongodb-1.3.4/src/libbson/src/bson/bson.c
+++ b/mongodb-1.4.2/src/libbson/src/bson/bson.c
@@ -1,3477 +1,3477 @@
/*
* Copyright 2013 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "bson.h"
#include "bson-config.h"
#include "b64_ntop.h"
#include "bson-private.h"
#include "bson-string.h"
#include "bson-iso8601-private.h"
#include <string.h>
#include <math.h>
#ifndef BSON_MAX_RECURSION
#define BSON_MAX_RECURSION 200
#endif
typedef enum {
BSON_VALIDATE_PHASE_START,
BSON_VALIDATE_PHASE_TOP,
BSON_VALIDATE_PHASE_LF_REF_KEY,
BSON_VALIDATE_PHASE_LF_REF_UTF8,
BSON_VALIDATE_PHASE_LF_ID_KEY,
BSON_VALIDATE_PHASE_LF_DB_KEY,
BSON_VALIDATE_PHASE_LF_DB_UTF8,
BSON_VALIDATE_PHASE_NOT_DBREF,
} bson_validate_phase_t;
typedef enum {
BSON_JSON_MODE_LEGACY,
BSON_JSON_MODE_CANONICAL,
BSON_JSON_MODE_RELAXED,
} bson_json_mode_t;
/*
* Structures.
*/
typedef struct {
bson_validate_flags_t flags;
ssize_t err_offset;
bson_validate_phase_t phase;
bson_error_t error;
} bson_validate_state_t;
typedef struct {
uint32_t count;
bool keys;
ssize_t *err_offset;
uint32_t depth;
bson_string_t *str;
bson_json_mode_t mode;
} bson_json_state_t;
/*
* Forward declarations.
*/
static bool
_bson_as_json_visit_array (const bson_iter_t *iter,
const char *key,
const bson_t *v_array,
void *data);
static bool
_bson_as_json_visit_document (const bson_iter_t *iter,
const char *key,
const bson_t *v_document,
void *data);
static char *
_bson_as_json_visit_all (const bson_t *bson,
size_t *length,
bson_json_mode_t mode);
/*
* Globals.
*/
static const uint8_t gZero;
/*
*--------------------------------------------------------------------------
*
* _bson_impl_inline_grow --
*
* Document growth implementation for documents that currently
* contain stack based buffers. The document may be switched to
* a malloc based buffer.
*
* Returns:
* true if successful; otherwise false indicating INT_MAX overflow.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
static bool
_bson_impl_inline_grow (bson_impl_inline_t *impl, /* IN */
size_t size) /* IN */
{
bson_impl_alloc_t *alloc = (bson_impl_alloc_t *) impl;
uint8_t *data;
size_t req;
if (((size_t) impl->len + size) <= sizeof impl->data) {
return true;
}
req = bson_next_power_of_two (impl->len + size);
if (req <= INT32_MAX) {
data = bson_malloc (req);
memcpy (data, impl->data, impl->len);
alloc->flags &= ~BSON_FLAG_INLINE;
alloc->parent = NULL;
alloc->depth = 0;
alloc->buf = &alloc->alloc;
alloc->buflen = &alloc->alloclen;
alloc->offset = 0;
alloc->alloc = data;
alloc->alloclen = req;
alloc->realloc = bson_realloc_ctx;
alloc->realloc_func_ctx = NULL;
return true;
}
return false;
}
/*
*--------------------------------------------------------------------------
*
* _bson_impl_alloc_grow --
*
* Document growth implementation for documents containing malloc
* based buffers.
*
* Returns:
* true if successful; otherwise false indicating INT_MAX overflow.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
static bool
_bson_impl_alloc_grow (bson_impl_alloc_t *impl, /* IN */
size_t size) /* IN */
{
size_t req;
/*
* Determine how many bytes we need for this document in the buffer
* including necessary trailing bytes for parent documents.
*/
req = (impl->offset + impl->len + size + impl->depth);
if (req <= *impl->buflen) {
return true;
}
req = bson_next_power_of_two (req);
if ((req <= INT32_MAX) && impl->realloc) {
*impl->buf = impl->realloc (*impl->buf, req, impl->realloc_func_ctx);
*impl->buflen = req;
return true;
}
return false;
}
/*
*--------------------------------------------------------------------------
*
* _bson_grow --
*
* Grows the bson_t structure to be large enough to contain @size
* bytes.
*
* Returns:
* true if successful, false if the size would overflow.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
static bool
_bson_grow (bson_t *bson, /* IN */
uint32_t size) /* IN */
{
if ((bson->flags & BSON_FLAG_INLINE)) {
return _bson_impl_inline_grow ((bson_impl_inline_t *) bson, size);
}
return _bson_impl_alloc_grow ((bson_impl_alloc_t *) bson, size);
}
/*
*--------------------------------------------------------------------------
*
* _bson_data --
*
* A helper function to return the contents of the bson document
* taking into account the polymorphic nature of bson_t.
*
* Returns:
* A buffer which should not be modified or freed.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
static BSON_INLINE uint8_t *
_bson_data (const bson_t *bson) /* IN */
{
if ((bson->flags & BSON_FLAG_INLINE)) {
return ((bson_impl_inline_t *) bson)->data;
} else {
bson_impl_alloc_t *impl = (bson_impl_alloc_t *) bson;
return (*impl->buf) + impl->offset;
}
}
/*
*--------------------------------------------------------------------------
*
* _bson_encode_length --
*
* Helper to encode the length of the bson_t in the first 4 bytes
* of the bson document. Little endian format is used as specified
* by bsonspec.
*
* Returns:
* None.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
static BSON_INLINE void
_bson_encode_length (bson_t *bson) /* IN */
{
#if BSON_BYTE_ORDER == BSON_LITTLE_ENDIAN
memcpy (_bson_data (bson), &bson->len, sizeof (bson->len));
#else
uint32_t length_le = BSON_UINT32_TO_LE (bson->len);
memcpy (_bson_data (bson), &length_le, sizeof (length_le));
#endif
}
/*
*--------------------------------------------------------------------------
*
* _bson_append_va --
*
* Appends the length,buffer pairs to the bson_t. @n_bytes is an
* optimization to perform one array growth rather than many small
* growths.
*
* @bson: A bson_t
* @n_bytes: The number of bytes to append to the document.
* @n_pairs: The number of length,buffer pairs.
* @first_len: Length of first buffer.
* @first_data: First buffer.
* @args: va_list of additional tuples.
*
* Returns:
* true if the bytes were appended successfully.
* false if it bson would overflow INT_MAX.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
static BSON_INLINE bool
_bson_append_va (bson_t *bson, /* IN */
uint32_t n_bytes, /* IN */
uint32_t n_pairs, /* IN */
uint32_t first_len, /* IN */
const uint8_t *first_data, /* IN */
va_list args) /* IN */
{
const uint8_t *data;
uint32_t data_len;
uint8_t *buf;
BSON_ASSERT (!(bson->flags & BSON_FLAG_IN_CHILD));
BSON_ASSERT (!(bson->flags & BSON_FLAG_RDONLY));
if (BSON_UNLIKELY (!_bson_grow (bson, n_bytes))) {
return false;
}
data = first_data;
data_len = first_len;
buf = _bson_data (bson) + bson->len - 1;
do {
n_pairs--;
memcpy (buf, data, data_len);
bson->len += data_len;
buf += data_len;
if (n_pairs) {
data_len = va_arg (args, uint32_t);
data = va_arg (args, const uint8_t *);
}
} while (n_pairs);
_bson_encode_length (bson);
*buf = '\0';
return true;
}
/*
*--------------------------------------------------------------------------
*
* _bson_append --
*
* Variadic function to append length,buffer pairs to a bson_t. If the
* append would cause the bson_t to overflow a 32-bit length, it will
* return false and no append will have occurred.
*
* Parameters:
* @bson: A bson_t.
* @n_pairs: Number of length,buffer pairs.
* @n_bytes: the total number of bytes being appended.
* @first_len: Length of first buffer.
* @first_data: First buffer.
*
* Returns:
* true if successful; otherwise false indicating INT_MAX overflow.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
static bool
_bson_append (bson_t *bson, /* IN */
uint32_t n_pairs, /* IN */
uint32_t n_bytes, /* IN */
uint32_t first_len, /* IN */
const uint8_t *first_data, /* IN */
...)
{
va_list args;
bool ok;
BSON_ASSERT (n_pairs);
BSON_ASSERT (first_len);
BSON_ASSERT (first_data);
/*
* Check to see if this append would overflow 32-bit signed integer. I know
* what you're thinking. BSON uses a signed 32-bit length field? Yeah. It
* does.
*/
if (BSON_UNLIKELY (n_bytes > (BSON_MAX_SIZE - bson->len))) {
return false;
}
va_start (args, first_data);
ok = _bson_append_va (bson, n_bytes, n_pairs, first_len, first_data, args);
va_end (args);
return ok;
}
/*
*--------------------------------------------------------------------------
*
* _bson_append_bson_begin --
*
* Begin appending a subdocument or subarray to the document using
* the key provided by @key.
*
* If @key_length is < 0, then strlen() will be called on @key
* to determine the length.
*
* @key_type MUST be either BSON_TYPE_DOCUMENT or BSON_TYPE_ARRAY.
*
* Returns:
* true if successful; otherwise false indicating INT_MAX overflow.
*
* Side effects:
* @child is initialized if true is returned.
*
*--------------------------------------------------------------------------
*/
static bool
_bson_append_bson_begin (bson_t *bson, /* IN */
const char *key, /* IN */
int key_length, /* IN */
bson_type_t child_type, /* IN */
bson_t *child) /* OUT */
{
const uint8_t type = child_type;
const uint8_t empty[5] = {5};
bson_impl_alloc_t *aparent = (bson_impl_alloc_t *) bson;
bson_impl_alloc_t *achild = (bson_impl_alloc_t *) child;
BSON_ASSERT (!(bson->flags & BSON_FLAG_RDONLY));
BSON_ASSERT (!(bson->flags & BSON_FLAG_IN_CHILD));
BSON_ASSERT (key);
BSON_ASSERT ((child_type == BSON_TYPE_DOCUMENT) ||
(child_type == BSON_TYPE_ARRAY));
BSON_ASSERT (child);
if (key_length < 0) {
key_length = (int) strlen (key);
}
/*
* If the parent is an inline bson_t, then we need to convert
* it to a heap allocated buffer. This makes extending buffers
* of child bson documents much simpler logic, as they can just
* realloc the *buf pointer.
*/
if ((bson->flags & BSON_FLAG_INLINE)) {
BSON_ASSERT (bson->len <= 120);
if (!_bson_grow (bson, 128 - bson->len)) {
return false;
}
BSON_ASSERT (!(bson->flags & BSON_FLAG_INLINE));
}
/*
* Append the type and key for the field.
*/
if (!_bson_append (bson,
4,
(1 + key_length + 1 + 5),
1,
&type,
key_length,
key,
1,
&gZero,
5,
empty)) {
return false;
}
/*
* Mark the document as working on a child document so that no
* further modifications can happen until the caller has called
* bson_append_{document,array}_end().
*/
bson->flags |= BSON_FLAG_IN_CHILD;
/*
* Initialize the child bson_t structure and point it at the parents
* buffers. This allows us to realloc directly from the child without
* walking up to the parent bson_t.
*/
achild->flags = (BSON_FLAG_CHILD | BSON_FLAG_NO_FREE | BSON_FLAG_STATIC);
if ((bson->flags & BSON_FLAG_CHILD)) {
achild->depth = ((bson_impl_alloc_t *) bson)->depth + 1;
} else {
achild->depth = 1;
}
achild->parent = bson;
achild->buf = aparent->buf;
achild->buflen = aparent->buflen;
achild->offset = aparent->offset + aparent->len - 1 - 5;
achild->len = 5;
achild->alloc = NULL;
achild->alloclen = 0;
achild->realloc = aparent->realloc;
achild->realloc_func_ctx = aparent->realloc_func_ctx;
return true;
}
/*
*--------------------------------------------------------------------------
*
* _bson_append_bson_end --
*
* Complete a call to _bson_append_bson_begin.
*
* Returns:
* true if successful.
*
* Side effects:
* @child is destroyed and no longer valid after calling this
* function.
*
*--------------------------------------------------------------------------
*/
static bool
_bson_append_bson_end (bson_t *bson, /* IN */
bson_t *child) /* IN */
{
BSON_ASSERT (bson);
BSON_ASSERT ((bson->flags & BSON_FLAG_IN_CHILD));
BSON_ASSERT (!(child->flags & BSON_FLAG_IN_CHILD));
/*
* Unmark the IN_CHILD flag.
*/
bson->flags &= ~BSON_FLAG_IN_CHILD;
/*
* Now that we are done building the sub-document, add the size to the
* parent, not including the default 5 byte empty document already added.
*/
bson->len = (bson->len + child->len - 5);
/*
* Ensure we have a \0 byte at the end and proper length encoded at
* the beginning of the document.
*/
_bson_data (bson)[bson->len - 1] = '\0';
_bson_encode_length (bson);
return true;
}
/*
*--------------------------------------------------------------------------
*
* bson_append_array_begin --
*
* Start appending a new array.
*
* Use @child to append to the data area for the given field.
*
* It is a programming error to call any other bson function on
* @bson until bson_append_array_end() has been called. It is
* valid to call bson_append*() functions on @child.
*
* This function is useful to allow building nested documents using
* a single buffer owned by the top-level bson document.
*
* Returns:
* true if successful; otherwise false and @child is invalid.
*
* Side effects:
* @child is initialized if true is returned.
*
*--------------------------------------------------------------------------
*/
bool
bson_append_array_begin (bson_t *bson, /* IN */
const char *key, /* IN */
int key_length, /* IN */
bson_t *child) /* IN */
{
BSON_ASSERT (bson);
BSON_ASSERT (key);
BSON_ASSERT (child);
return _bson_append_bson_begin (
bson, key, key_length, BSON_TYPE_ARRAY, child);
}
/*
*--------------------------------------------------------------------------
*
* bson_append_array_end --
*
* Complete a call to bson_append_array_begin().
*
* It is safe to append other fields to @bson after calling this
* function.
*
* Returns:
* true if successful.
*
* Side effects:
* @child is invalid after calling this function.
*
*--------------------------------------------------------------------------
*/
bool
bson_append_array_end (bson_t *bson, /* IN */
bson_t *child) /* IN */
{
BSON_ASSERT (bson);
BSON_ASSERT (child);
return _bson_append_bson_end (bson, child);
}
/*
*--------------------------------------------------------------------------
*
* bson_append_document_begin --
*
* Start appending a new document.
*
* Use @child to append to the data area for the given field.
*
* It is a programming error to call any other bson function on
* @bson until bson_append_document_end() has been called. It is
* valid to call bson_append*() functions on @child.
*
* This function is useful to allow building nested documents using
* a single buffer owned by the top-level bson document.
*
* Returns:
* true if successful; otherwise false and @child is invalid.
*
* Side effects:
* @child is initialized if true is returned.
*
*--------------------------------------------------------------------------
*/
bool
bson_append_document_begin (bson_t *bson, /* IN */
const char *key, /* IN */
int key_length, /* IN */
bson_t *child) /* IN */
{
BSON_ASSERT (bson);
BSON_ASSERT (key);
BSON_ASSERT (child);
return _bson_append_bson_begin (
bson, key, key_length, BSON_TYPE_DOCUMENT, child);
}
/*
*--------------------------------------------------------------------------
*
* bson_append_document_end --
*
* Complete a call to bson_append_document_begin().
*
* It is safe to append new fields to @bson after calling this
* function, if true is returned.
*
* Returns:
* true if successful; otherwise false indicating INT_MAX overflow.
*
* Side effects:
* @child is destroyed and invalid after calling this function.
*
*--------------------------------------------------------------------------
*/
bool
bson_append_document_end (bson_t *bson, /* IN */
bson_t *child) /* IN */
{
BSON_ASSERT (bson);
BSON_ASSERT (child);
return _bson_append_bson_end (bson, child);
}
/*
*--------------------------------------------------------------------------
*
* bson_append_array --
*
* Append an array to @bson.
*
* Generally, bson_append_array_begin() will result in faster code
* since few buffers need to be malloced.
*
* Returns:
* true if successful; otherwise false indicating INT_MAX overflow.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
bool
bson_append_array (bson_t *bson, /* IN */
const char *key, /* IN */
int key_length, /* IN */
const bson_t *array) /* IN */
{
static const uint8_t type = BSON_TYPE_ARRAY;
BSON_ASSERT (bson);
BSON_ASSERT (key);
BSON_ASSERT (array);
if (key_length < 0) {
key_length = (int) strlen (key);
}
/*
* Let's be a bit pedantic and ensure the array has properly formatted key
* names. We will verify this simply by checking the first element for "0"
* if the array is non-empty.
*/
if (array && !bson_empty (array)) {
bson_iter_t iter;
if (bson_iter_init (&iter, array) && bson_iter_next (&iter)) {
if (0 != strcmp ("0", bson_iter_key (&iter))) {
fprintf (stderr,
"%s(): invalid array detected. first element of array "
"parameter is not \"0\".\n",
BSON_FUNC);
}
}
}
return _bson_append (bson,
4,
(1 + key_length + 1 + array->len),
1,
&type,
key_length,
key,
1,
&gZero,
array->len,
_bson_data (array));
}
/*
*--------------------------------------------------------------------------
*
* bson_append_binary --
*
* Append binary data to @bson. The field will have the
* BSON_TYPE_BINARY type.
*
* Parameters:
* @subtype: the BSON Binary Subtype. See bsonspec.org for more
* information.
* @binary: a pointer to the raw binary data.
* @length: the size of @binary in bytes.
*
* Returns:
* true if successful; otherwise false.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
bool
bson_append_binary (bson_t *bson, /* IN */
const char *key, /* IN */
int key_length, /* IN */
bson_subtype_t subtype, /* IN */
const uint8_t *binary, /* IN */
uint32_t length) /* IN */
{
static const uint8_t type = BSON_TYPE_BINARY;
uint32_t length_le;
uint32_t deprecated_length_le;
uint8_t subtype8 = 0;
BSON_ASSERT (bson);
BSON_ASSERT (key);
BSON_ASSERT (binary);
if (key_length < 0) {
key_length = (int) strlen (key);
}
subtype8 = subtype;
if (subtype == BSON_SUBTYPE_BINARY_DEPRECATED) {
length_le = BSON_UINT32_TO_LE (length + 4);
deprecated_length_le = BSON_UINT32_TO_LE (length);
return _bson_append (bson,
7,
(1 + key_length + 1 + 4 + 1 + 4 + length),
1,
&type,
key_length,
key,
1,
&gZero,
4,
&length_le,
1,
&subtype8,
4,
&deprecated_length_le,
length,
binary);
} else {
length_le = BSON_UINT32_TO_LE (length);
return _bson_append (bson,
6,
(1 + key_length + 1 + 4 + 1 + length),
1,
&type,
key_length,
key,
1,
&gZero,
4,
&length_le,
1,
&subtype8,
length,
binary);
}
}
/*
*--------------------------------------------------------------------------
*
* bson_append_bool --
*
* Append a new field to @bson with the name @key. The value is
* a boolean indicated by @value.
*
* Returns:
* true if succesful; otherwise false.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
bool
bson_append_bool (bson_t *bson, /* IN */
const char *key, /* IN */
int key_length, /* IN */
bool value) /* IN */
{
static const uint8_t type = BSON_TYPE_BOOL;
uint8_t abyte = !!value;
BSON_ASSERT (bson);
BSON_ASSERT (key);
if (key_length < 0) {
key_length = (int) strlen (key);
}
return _bson_append (bson,
4,
(1 + key_length + 1 + 1),
1,
&type,
key_length,
key,
1,
&gZero,
1,
&abyte);
}
/*
*--------------------------------------------------------------------------
*
* bson_append_code --
*
* Append a new field to @bson containing javascript code.
*
* @javascript MUST be a zero terminated UTF-8 string. It MUST NOT
* containing embedded \0 characters.
*
* Returns:
* true if successful; otherwise false.
*
* Side effects:
* None.
*
* See also:
* bson_append_code_with_scope().
*
*--------------------------------------------------------------------------
*/
bool
bson_append_code (bson_t *bson, /* IN */
const char *key, /* IN */
int key_length, /* IN */
const char *javascript) /* IN */
{
static const uint8_t type = BSON_TYPE_CODE;
uint32_t length;
uint32_t length_le;
BSON_ASSERT (bson);
BSON_ASSERT (key);
BSON_ASSERT (javascript);
if (key_length < 0) {
key_length = (int) strlen (key);
}
length = (int) strlen (javascript) + 1;
length_le = BSON_UINT32_TO_LE (length);
return _bson_append (bson,
5,
(1 + key_length + 1 + 4 + length),
1,
&type,
key_length,
key,
1,
&gZero,
4,
&length_le,
length,
javascript);
}
/*
*--------------------------------------------------------------------------
*
* bson_append_code_with_scope --
*
* Append a new field to @bson containing javascript code with
* supplied scope.
*
* Returns:
* true if successful; otherwise false.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
bool
bson_append_code_with_scope (bson_t *bson, /* IN */
const char *key, /* IN */
int key_length, /* IN */
const char *javascript, /* IN */
const bson_t *scope) /* IN */
{
static const uint8_t type = BSON_TYPE_CODEWSCOPE;
uint32_t codews_length_le;
uint32_t codews_length;
uint32_t js_length_le;
uint32_t js_length;
BSON_ASSERT (bson);
BSON_ASSERT (key);
BSON_ASSERT (javascript);
if (scope == NULL) {
return bson_append_code (bson, key, key_length, javascript);
}
if (key_length < 0) {
key_length = (int) strlen (key);
}
js_length = (int) strlen (javascript) + 1;
js_length_le = BSON_UINT32_TO_LE (js_length);
codews_length = 4 + 4 + js_length + scope->len;
codews_length_le = BSON_UINT32_TO_LE (codews_length);
return _bson_append (bson,
7,
(1 + key_length + 1 + 4 + 4 + js_length + scope->len),
1,
&type,
key_length,
key,
1,
&gZero,
4,
&codews_length_le,
4,
&js_length_le,
js_length,
javascript,
scope->len,
_bson_data (scope));
}
/*
*--------------------------------------------------------------------------
*
* bson_append_dbpointer --
*
* This BSON data type is DEPRECATED.
*
* Append a BSON dbpointer field to @bson.
*
* Returns:
* true if successful; otherwise false.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
bool
bson_append_dbpointer (bson_t *bson, /* IN */
const char *key, /* IN */
int key_length, /* IN */
const char *collection, /* IN */
const bson_oid_t *oid)
{
static const uint8_t type = BSON_TYPE_DBPOINTER;
uint32_t length;
uint32_t length_le;
BSON_ASSERT (bson);
BSON_ASSERT (key);
BSON_ASSERT (collection);
BSON_ASSERT (oid);
if (key_length < 0) {
key_length = (int) strlen (key);
}
length = (int) strlen (collection) + 1;
length_le = BSON_UINT32_TO_LE (length);
return _bson_append (bson,
6,
(1 + key_length + 1 + 4 + length + 12),
1,
&type,
key_length,
key,
1,
&gZero,
4,
&length_le,
length,
collection,
12,
oid);
}
/*
*--------------------------------------------------------------------------
*
* bson_append_document --
*
* Append a new field to @bson containing a BSON document.
*
* In general, using bson_append_document_begin() results in faster
* code and less memory fragmentation.
*
* Returns:
* true if successful; otherwise false.
*
* Side effects:
* None.
*
* See also:
* bson_append_document_begin().
*
*--------------------------------------------------------------------------
*/
bool
bson_append_document (bson_t *bson, /* IN */
const char *key, /* IN */
int key_length, /* IN */
const bson_t *value) /* IN */
{
static const uint8_t type = BSON_TYPE_DOCUMENT;
BSON_ASSERT (bson);
BSON_ASSERT (key);
BSON_ASSERT (value);
if (key_length < 0) {
key_length = (int) strlen (key);
}
return _bson_append (bson,
4,
(1 + key_length + 1 + value->len),
1,
&type,
key_length,
key,
1,
&gZero,
value->len,
_bson_data (value));
}
bool
bson_append_double (bson_t *bson, const char *key, int key_length, double value)
{
static const uint8_t type = BSON_TYPE_DOUBLE;
BSON_ASSERT (bson);
BSON_ASSERT (key);
if (key_length < 0) {
key_length = (int) strlen (key);
}
#if BSON_BYTE_ORDER == BSON_BIG_ENDIAN
value = BSON_DOUBLE_TO_LE (value);
#endif
return _bson_append (bson,
4,
(1 + key_length + 1 + 8),
1,
&type,
key_length,
key,
1,
&gZero,
8,
&value);
}
bool
bson_append_int32 (bson_t *bson, const char *key, int key_length, int32_t value)
{
static const uint8_t type = BSON_TYPE_INT32;
uint32_t value_le;
BSON_ASSERT (bson);
BSON_ASSERT (key);
if (key_length < 0) {
key_length = (int) strlen (key);
}
value_le = BSON_UINT32_TO_LE (value);
return _bson_append (bson,
4,
(1 + key_length + 1 + 4),
1,
&type,
key_length,
key,
1,
&gZero,
4,
&value_le);
}
bool
bson_append_int64 (bson_t *bson, const char *key, int key_length, int64_t value)
{
static const uint8_t type = BSON_TYPE_INT64;
uint64_t value_le;
BSON_ASSERT (bson);
BSON_ASSERT (key);
if (key_length < 0) {
key_length = (int) strlen (key);
}
value_le = BSON_UINT64_TO_LE (value);
return _bson_append (bson,
4,
(1 + key_length + 1 + 8),
1,
&type,
key_length,
key,
1,
&gZero,
8,
&value_le);
}
bool
bson_append_decimal128 (bson_t *bson,
const char *key,
int key_length,
const bson_decimal128_t *value)
{
static const uint8_t type = BSON_TYPE_DECIMAL128;
uint64_t value_le[2];
BSON_ASSERT (bson);
BSON_ASSERT (key);
BSON_ASSERT (value);
if (key_length < 0) {
key_length = (int) strlen (key);
}
value_le[0] = BSON_UINT64_TO_LE (value->low);
value_le[1] = BSON_UINT64_TO_LE (value->high);
return _bson_append (bson,
4,
(1 + key_length + 1 + 16),
1,
&type,
key_length,
key,
1,
&gZero,
16,
value_le);
}
bool
bson_append_iter (bson_t *bson,
const char *key,
int key_length,
const bson_iter_t *iter)
{
bool ret = false;
BSON_ASSERT (bson);
BSON_ASSERT (iter);
if (!key) {
key = bson_iter_key (iter);
key_length = -1;
}
switch (bson_iter_type_unsafe (iter)) {
case BSON_TYPE_EOD:
return false;
case BSON_TYPE_DOUBLE:
ret = bson_append_double (bson, key, key_length, bson_iter_double (iter));
break;
case BSON_TYPE_UTF8: {
uint32_t len = 0;
const char *str;
str = bson_iter_utf8 (iter, &len);
ret = bson_append_utf8 (bson, key, key_length, str, len);
} break;
case BSON_TYPE_DOCUMENT: {
const uint8_t *buf = NULL;
uint32_t len = 0;
bson_t doc;
bson_iter_document (iter, &len, &buf);
if (bson_init_static (&doc, buf, len)) {
ret = bson_append_document (bson, key, key_length, &doc);
bson_destroy (&doc);
}
} break;
case BSON_TYPE_ARRAY: {
const uint8_t *buf = NULL;
uint32_t len = 0;
bson_t doc;
bson_iter_array (iter, &len, &buf);
if (bson_init_static (&doc, buf, len)) {
ret = bson_append_array (bson, key, key_length, &doc);
bson_destroy (&doc);
}
} break;
case BSON_TYPE_BINARY: {
const uint8_t *binary = NULL;
bson_subtype_t subtype = BSON_SUBTYPE_BINARY;
uint32_t len = 0;
bson_iter_binary (iter, &subtype, &len, &binary);
ret = bson_append_binary (bson, key, key_length, subtype, binary, len);
} break;
case BSON_TYPE_UNDEFINED:
ret = bson_append_undefined (bson, key, key_length);
break;
case BSON_TYPE_OID:
ret = bson_append_oid (bson, key, key_length, bson_iter_oid (iter));
break;
case BSON_TYPE_BOOL:
ret = bson_append_bool (bson, key, key_length, bson_iter_bool (iter));
break;
case BSON_TYPE_DATE_TIME:
ret = bson_append_date_time (
bson, key, key_length, bson_iter_date_time (iter));
break;
case BSON_TYPE_NULL:
ret = bson_append_null (bson, key, key_length);
break;
case BSON_TYPE_REGEX: {
const char *regex;
const char *options;
regex = bson_iter_regex (iter, &options);
ret = bson_append_regex (bson, key, key_length, regex, options);
} break;
case BSON_TYPE_DBPOINTER: {
const bson_oid_t *oid;
uint32_t len;
const char *collection;
bson_iter_dbpointer (iter, &len, &collection, &oid);
ret = bson_append_dbpointer (bson, key, key_length, collection, oid);
} break;
case BSON_TYPE_CODE: {
uint32_t len;
const char *code;
code = bson_iter_code (iter, &len);
ret = bson_append_code (bson, key, key_length, code);
} break;
case BSON_TYPE_SYMBOL: {
uint32_t len;
const char *symbol;
symbol = bson_iter_symbol (iter, &len);
ret = bson_append_symbol (bson, key, key_length, symbol, len);
} break;
case BSON_TYPE_CODEWSCOPE: {
const uint8_t *scope = NULL;
uint32_t scope_len = 0;
uint32_t len = 0;
const char *javascript = NULL;
bson_t doc;
javascript = bson_iter_codewscope (iter, &len, &scope_len, &scope);
if (bson_init_static (&doc, scope, scope_len)) {
ret = bson_append_code_with_scope (
bson, key, key_length, javascript, &doc);
bson_destroy (&doc);
}
} break;
case BSON_TYPE_INT32:
ret = bson_append_int32 (bson, key, key_length, bson_iter_int32 (iter));
break;
case BSON_TYPE_TIMESTAMP: {
uint32_t ts;
uint32_t inc;
bson_iter_timestamp (iter, &ts, &inc);
ret = bson_append_timestamp (bson, key, key_length, ts, inc);
} break;
case BSON_TYPE_INT64:
ret = bson_append_int64 (bson, key, key_length, bson_iter_int64 (iter));
break;
case BSON_TYPE_DECIMAL128: {
bson_decimal128_t dec;
if (!bson_iter_decimal128 (iter, &dec)) {
return false;
}
ret = bson_append_decimal128 (bson, key, key_length, &dec);
} break;
case BSON_TYPE_MAXKEY:
ret = bson_append_maxkey (bson, key, key_length);
break;
case BSON_TYPE_MINKEY:
ret = bson_append_minkey (bson, key, key_length);
break;
default:
break;
}
return ret;
}
bool
bson_append_maxkey (bson_t *bson, const char *key, int key_length)
{
static const uint8_t type = BSON_TYPE_MAXKEY;
BSON_ASSERT (bson);
BSON_ASSERT (key);
if (key_length < 0) {
key_length = (int) strlen (key);
}
return _bson_append (
bson, 3, (1 + key_length + 1), 1, &type, key_length, key, 1, &gZero);
}
bool
bson_append_minkey (bson_t *bson, const char *key, int key_length)
{
static const uint8_t type = BSON_TYPE_MINKEY;
BSON_ASSERT (bson);
BSON_ASSERT (key);
if (key_length < 0) {
key_length = (int) strlen (key);
}
return _bson_append (
bson, 3, (1 + key_length + 1), 1, &type, key_length, key, 1, &gZero);
}
bool
bson_append_null (bson_t *bson, const char *key, int key_length)
{
static const uint8_t type = BSON_TYPE_NULL;
BSON_ASSERT (bson);
BSON_ASSERT (key);
if (key_length < 0) {
key_length = (int) strlen (key);
}
return _bson_append (
bson, 3, (1 + key_length + 1), 1, &type, key_length, key, 1, &gZero);
}
bool
bson_append_oid (bson_t *bson,
const char *key,
int key_length,
const bson_oid_t *value)
{
static const uint8_t type = BSON_TYPE_OID;
BSON_ASSERT (bson);
BSON_ASSERT (key);
BSON_ASSERT (value);
if (key_length < 0) {
key_length = (int) strlen (key);
}
return _bson_append (bson,
4,
(1 + key_length + 1 + 12),
1,
&type,
key_length,
key,
1,
&gZero,
12,
value);
}
/*
*--------------------------------------------------------------------------
*
* _bson_append_regex_options_sorted --
*
* Helper to append regex options to a buffer in a sorted order.
* Any duplicate or unsupported options will be ignored.
*
* Parameters:
* @buffer: Buffer to which sorted options will be appended
* @options: Regex options
*
* Returns:
* None.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
static BSON_INLINE void
_bson_append_regex_options_sorted (bson_string_t *buffer, /* IN */
const char *options) /* IN */
{
const char *c;
for (c = BSON_REGEX_OPTIONS_SORTED; *c; c++) {
if (strchr (options, *c)) {
bson_string_append_c (buffer, *c);
}
}
}
bool
bson_append_regex (bson_t *bson,
const char *key,
int key_length,
const char *regex,
const char *options)
{
static const uint8_t type = BSON_TYPE_REGEX;
uint32_t regex_len;
bson_string_t *options_sorted;
bool r;
BSON_ASSERT (bson);
BSON_ASSERT (key);
if (key_length < 0) {
key_length = (int) strlen (key);
}
if (!regex) {
regex = "";
}
if (!options) {
options = "";
}
regex_len = (int) strlen (regex) + 1;
options_sorted = bson_string_new (NULL);
_bson_append_regex_options_sorted (options_sorted, options);
r = _bson_append (bson,
5,
- (1 + key_length + 1 + regex_len + options_sorted->len),
+ (1 + key_length + 1 + regex_len + options_sorted->len + 1),
1,
&type,
key_length,
key,
1,
&gZero,
regex_len,
regex,
options_sorted->len + 1,
options_sorted->str);
bson_string_free (options_sorted, true);
return r;
}
bool
bson_append_utf8 (
bson_t *bson, const char *key, int key_length, const char *value, int length)
{
static const uint8_t type = BSON_TYPE_UTF8;
uint32_t length_le;
BSON_ASSERT (bson);
BSON_ASSERT (key);
if (BSON_UNLIKELY (!value)) {
return bson_append_null (bson, key, key_length);
}
if (BSON_UNLIKELY (key_length < 0)) {
key_length = (int) strlen (key);
}
if (BSON_UNLIKELY (length < 0)) {
length = (int) strlen (value);
}
length_le = BSON_UINT32_TO_LE (length + 1);
return _bson_append (bson,
6,
(1 + key_length + 1 + 4 + length + 1),
1,
&type,
key_length,
key,
1,
&gZero,
4,
&length_le,
length,
value,
1,
&gZero);
}
bool
bson_append_symbol (
bson_t *bson, const char *key, int key_length, const char *value, int length)
{
static const uint8_t type = BSON_TYPE_SYMBOL;
uint32_t length_le;
BSON_ASSERT (bson);
BSON_ASSERT (key);
if (!value) {
return bson_append_null (bson, key, key_length);
}
if (key_length < 0) {
key_length = (int) strlen (key);
}
if (length < 0) {
length = (int) strlen (value);
}
length_le = BSON_UINT32_TO_LE (length + 1);
return _bson_append (bson,
6,
(1 + key_length + 1 + 4 + length + 1),
1,
&type,
key_length,
key,
1,
&gZero,
4,
&length_le,
length,
value,
1,
&gZero);
}
bool
bson_append_time_t (bson_t *bson, const char *key, int key_length, time_t value)
{
#ifdef BSON_OS_WIN32
struct timeval tv = {(long) value, 0};
#else
struct timeval tv = {value, 0};
#endif
BSON_ASSERT (bson);
BSON_ASSERT (key);
return bson_append_timeval (bson, key, key_length, &tv);
}
bool
bson_append_timestamp (bson_t *bson,
const char *key,
int key_length,
uint32_t timestamp,
uint32_t increment)
{
static const uint8_t type = BSON_TYPE_TIMESTAMP;
uint64_t value;
BSON_ASSERT (bson);
BSON_ASSERT (key);
if (key_length < 0) {
key_length = (int) strlen (key);
}
value = ((((uint64_t) timestamp) << 32) | ((uint64_t) increment));
value = BSON_UINT64_TO_LE (value);
return _bson_append (bson,
4,
(1 + key_length + 1 + 8),
1,
&type,
key_length,
key,
1,
&gZero,
8,
&value);
}
bool
bson_append_now_utc (bson_t *bson, const char *key, int key_length)
{
BSON_ASSERT (bson);
BSON_ASSERT (key);
BSON_ASSERT (key_length >= -1);
return bson_append_time_t (bson, key, key_length, time (NULL));
}
bool
bson_append_date_time (bson_t *bson,
const char *key,
int key_length,
int64_t value)
{
static const uint8_t type = BSON_TYPE_DATE_TIME;
uint64_t value_le;
BSON_ASSERT (bson);
BSON_ASSERT (key);
if (key_length < 0) {
key_length = (int) strlen (key);
}
value_le = BSON_UINT64_TO_LE (value);
return _bson_append (bson,
4,
(1 + key_length + 1 + 8),
1,
&type,
key_length,
key,
1,
&gZero,
8,
&value_le);
}
bool
bson_append_timeval (bson_t *bson,
const char *key,
int key_length,
struct timeval *value)
{
uint64_t unix_msec;
BSON_ASSERT (bson);
BSON_ASSERT (key);
BSON_ASSERT (value);
unix_msec =
(((uint64_t) value->tv_sec) * 1000UL) + (value->tv_usec / 1000UL);
return bson_append_date_time (bson, key, key_length, unix_msec);
}
bool
bson_append_undefined (bson_t *bson, const char *key, int key_length)
{
static const uint8_t type = BSON_TYPE_UNDEFINED;
BSON_ASSERT (bson);
BSON_ASSERT (key);
if (key_length < 0) {
key_length = (int) strlen (key);
}
return _bson_append (
bson, 3, (1 + key_length + 1), 1, &type, key_length, key, 1, &gZero);
}
bool
bson_append_value (bson_t *bson,
const char *key,
int key_length,
const bson_value_t *value)
{
bson_t local;
bool ret = false;
BSON_ASSERT (bson);
BSON_ASSERT (key);
BSON_ASSERT (value);
switch (value->value_type) {
case BSON_TYPE_DOUBLE:
ret = bson_append_double (bson, key, key_length, value->value.v_double);
break;
case BSON_TYPE_UTF8:
ret = bson_append_utf8 (bson,
key,
key_length,
value->value.v_utf8.str,
value->value.v_utf8.len);
break;
case BSON_TYPE_DOCUMENT:
if (bson_init_static (
&local, value->value.v_doc.data, value->value.v_doc.data_len)) {
ret = bson_append_document (bson, key, key_length, &local);
bson_destroy (&local);
}
break;
case BSON_TYPE_ARRAY:
if (bson_init_static (
&local, value->value.v_doc.data, value->value.v_doc.data_len)) {
ret = bson_append_array (bson, key, key_length, &local);
bson_destroy (&local);
}
break;
case BSON_TYPE_BINARY:
ret = bson_append_binary (bson,
key,
key_length,
value->value.v_binary.subtype,
value->value.v_binary.data,
value->value.v_binary.data_len);
break;
case BSON_TYPE_UNDEFINED:
ret = bson_append_undefined (bson, key, key_length);
break;
case BSON_TYPE_OID:
ret = bson_append_oid (bson, key, key_length, &value->value.v_oid);
break;
case BSON_TYPE_BOOL:
ret = bson_append_bool (bson, key, key_length, value->value.v_bool);
break;
case BSON_TYPE_DATE_TIME:
ret =
bson_append_date_time (bson, key, key_length, value->value.v_datetime);
break;
case BSON_TYPE_NULL:
ret = bson_append_null (bson, key, key_length);
break;
case BSON_TYPE_REGEX:
ret = bson_append_regex (bson,
key,
key_length,
value->value.v_regex.regex,
value->value.v_regex.options);
break;
case BSON_TYPE_DBPOINTER:
ret = bson_append_dbpointer (bson,
key,
key_length,
value->value.v_dbpointer.collection,
&value->value.v_dbpointer.oid);
break;
case BSON_TYPE_CODE:
ret = bson_append_code (bson, key, key_length, value->value.v_code.code);
break;
case BSON_TYPE_SYMBOL:
ret = bson_append_symbol (bson,
key,
key_length,
value->value.v_symbol.symbol,
value->value.v_symbol.len);
break;
case BSON_TYPE_CODEWSCOPE:
if (bson_init_static (&local,
value->value.v_codewscope.scope_data,
value->value.v_codewscope.scope_len)) {
ret = bson_append_code_with_scope (
bson, key, key_length, value->value.v_codewscope.code, &local);
bson_destroy (&local);
}
break;
case BSON_TYPE_INT32:
ret = bson_append_int32 (bson, key, key_length, value->value.v_int32);
break;
case BSON_TYPE_TIMESTAMP:
ret = bson_append_timestamp (bson,
key,
key_length,
value->value.v_timestamp.timestamp,
value->value.v_timestamp.increment);
break;
case BSON_TYPE_INT64:
ret = bson_append_int64 (bson, key, key_length, value->value.v_int64);
break;
case BSON_TYPE_DECIMAL128:
ret = bson_append_decimal128 (
bson, key, key_length, &(value->value.v_decimal128));
break;
case BSON_TYPE_MAXKEY:
ret = bson_append_maxkey (bson, key, key_length);
break;
case BSON_TYPE_MINKEY:
ret = bson_append_minkey (bson, key, key_length);
break;
case BSON_TYPE_EOD:
default:
break;
}
return ret;
}
void
bson_init (bson_t *bson)
{
bson_impl_inline_t *impl = (bson_impl_inline_t *) bson;
BSON_ASSERT (bson);
impl->flags = BSON_FLAG_INLINE | BSON_FLAG_STATIC;
impl->len = 5;
impl->data[0] = 5;
impl->data[1] = 0;
impl->data[2] = 0;
impl->data[3] = 0;
impl->data[4] = 0;
}
void
bson_reinit (bson_t *bson)
{
uint8_t *data;
BSON_ASSERT (bson);
data = _bson_data (bson);
bson->len = 5;
data[0] = 5;
data[1] = 0;
data[2] = 0;
data[3] = 0;
data[4] = 0;
}
bool
bson_init_static (bson_t *bson, const uint8_t *data, size_t length)
{
bson_impl_alloc_t *impl = (bson_impl_alloc_t *) bson;
uint32_t len_le;
BSON_ASSERT (bson);
BSON_ASSERT (data);
if ((length < 5) || (length > INT_MAX)) {
return false;
}
memcpy (&len_le, data, sizeof (len_le));
if ((size_t) BSON_UINT32_FROM_LE (len_le) != length) {
return false;
}
if (data[length - 1]) {
return false;
}
impl->flags = BSON_FLAG_STATIC | BSON_FLAG_RDONLY;
impl->len = (uint32_t) length;
impl->parent = NULL;
impl->depth = 0;
impl->buf = &impl->alloc;
impl->buflen = &impl->alloclen;
impl->offset = 0;
impl->alloc = (uint8_t *) data;
impl->alloclen = length;
impl->realloc = NULL;
impl->realloc_func_ctx = NULL;
return true;
}
bson_t *
bson_new (void)
{
bson_impl_inline_t *impl;
bson_t *bson;
bson = bson_malloc (sizeof *bson);
impl = (bson_impl_inline_t *) bson;
impl->flags = BSON_FLAG_INLINE;
impl->len = 5;
impl->data[0] = 5;
impl->data[1] = 0;
impl->data[2] = 0;
impl->data[3] = 0;
impl->data[4] = 0;
return bson;
}
bson_t *
bson_sized_new (size_t size)
{
bson_impl_alloc_t *impl_a;
bson_t *b;
BSON_ASSERT (size <= INT32_MAX);
b = bson_malloc (sizeof *b);
impl_a = (bson_impl_alloc_t *) b;
if (size <= BSON_INLINE_DATA_SIZE) {
bson_init (b);
b->flags &= ~BSON_FLAG_STATIC;
} else {
impl_a->flags = BSON_FLAG_NONE;
impl_a->len = 5;
impl_a->parent = NULL;
impl_a->depth = 0;
impl_a->buf = &impl_a->alloc;
impl_a->buflen = &impl_a->alloclen;
impl_a->offset = 0;
impl_a->alloclen = BSON_MAX (5, size);
impl_a->alloc = bson_malloc (impl_a->alloclen);
impl_a->alloc[0] = 5;
impl_a->alloc[1] = 0;
impl_a->alloc[2] = 0;
impl_a->alloc[3] = 0;
impl_a->alloc[4] = 0;
impl_a->realloc = bson_realloc_ctx;
impl_a->realloc_func_ctx = NULL;
}
return b;
}
bson_t *
bson_new_from_data (const uint8_t *data, size_t length)
{
uint32_t len_le;
bson_t *bson;
BSON_ASSERT (data);
if ((length < 5) || (length > INT_MAX) || data[length - 1]) {
return NULL;
}
memcpy (&len_le, data, sizeof (len_le));
if (length != (size_t) BSON_UINT32_FROM_LE (len_le)) {
return NULL;
}
bson = bson_sized_new (length);
memcpy (_bson_data (bson), data, length);
bson->len = (uint32_t) length;
return bson;
}
bson_t *
bson_new_from_buffer (uint8_t **buf,
size_t *buf_len,
bson_realloc_func realloc_func,
void *realloc_func_ctx)
{
bson_impl_alloc_t *impl;
uint32_t len_le;
uint32_t length;
bson_t *bson;
BSON_ASSERT (buf);
BSON_ASSERT (buf_len);
if (!realloc_func) {
realloc_func = bson_realloc_ctx;
}
bson = bson_malloc0 (sizeof *bson);
impl = (bson_impl_alloc_t *) bson;
if (!*buf) {
length = 5;
len_le = BSON_UINT32_TO_LE (length);
*buf_len = 5;
*buf = realloc_func (*buf, *buf_len, realloc_func_ctx);
memcpy (*buf, &len_le, sizeof (len_le));
(*buf)[4] = '\0';
} else {
if ((*buf_len < 5) || (*buf_len > INT_MAX)) {
bson_free (bson);
return NULL;
}
memcpy (&len_le, *buf, sizeof (len_le));
length = BSON_UINT32_FROM_LE (len_le);
}
if ((*buf)[length - 1]) {
bson_free (bson);
return NULL;
}
impl->flags = BSON_FLAG_NO_FREE;
impl->len = length;
impl->buf = buf;
impl->buflen = buf_len;
impl->realloc = realloc_func;
impl->realloc_func_ctx = realloc_func_ctx;
return bson;
}
bson_t *
bson_copy (const bson_t *bson)
{
const uint8_t *data;
BSON_ASSERT (bson);
data = _bson_data (bson);
return bson_new_from_data (data, bson->len);
}
void
bson_copy_to (const bson_t *src, bson_t *dst)
{
const uint8_t *data;
bson_impl_alloc_t *adst;
size_t len;
BSON_ASSERT (src);
BSON_ASSERT (dst);
if ((src->flags & BSON_FLAG_INLINE)) {
memcpy (dst, src, sizeof *dst);
dst->flags = (BSON_FLAG_STATIC | BSON_FLAG_INLINE);
return;
}
data = _bson_data (src);
len = bson_next_power_of_two ((size_t) src->len);
adst = (bson_impl_alloc_t *) dst;
adst->flags = BSON_FLAG_STATIC;
adst->len = src->len;
adst->parent = NULL;
adst->depth = 0;
adst->buf = &adst->alloc;
adst->buflen = &adst->alloclen;
adst->offset = 0;
adst->alloc = bson_malloc (len);
adst->alloclen = len;
adst->realloc = bson_realloc_ctx;
adst->realloc_func_ctx = NULL;
memcpy (adst->alloc, data, src->len);
}
static bool
should_ignore (const char *first_exclude, va_list args, const char *name)
{
bool ret = false;
const char *exclude = first_exclude;
va_list args_copy;
va_copy (args_copy, args);
do {
if (!strcmp (name, exclude)) {
ret = true;
break;
}
} while ((exclude = va_arg (args_copy, const char *)));
va_end (args_copy);
return ret;
}
static void
_bson_copy_to_excluding_va (const bson_t *src,
bson_t *dst,
const char *first_exclude,
va_list args)
{
bson_iter_t iter;
if (bson_iter_init (&iter, src)) {
while (bson_iter_next (&iter)) {
if (!should_ignore (first_exclude, args, bson_iter_key (&iter))) {
if (!bson_append_iter (dst, NULL, 0, &iter)) {
/*
* This should not be able to happen since we are copying
* from within a valid bson_t.
*/
BSON_ASSERT (false);
return;
}
}
}
}
}
void
bson_copy_to_excluding (const bson_t *src,
bson_t *dst,
const char *first_exclude,
...)
{
va_list args;
BSON_ASSERT (src);
BSON_ASSERT (dst);
BSON_ASSERT (first_exclude);
bson_init (dst);
va_start (args, first_exclude);
_bson_copy_to_excluding_va (src, dst, first_exclude, args);
va_end (args);
}
void
bson_copy_to_excluding_noinit (const bson_t *src,
bson_t *dst,
const char *first_exclude,
...)
{
va_list args;
BSON_ASSERT (src);
BSON_ASSERT (dst);
BSON_ASSERT (first_exclude);
va_start (args, first_exclude);
_bson_copy_to_excluding_va (src, dst, first_exclude, args);
va_end (args);
}
void
bson_destroy (bson_t *bson)
{
BSON_ASSERT (bson);
if (!(bson->flags &
(BSON_FLAG_RDONLY | BSON_FLAG_INLINE | BSON_FLAG_NO_FREE))) {
bson_free (*((bson_impl_alloc_t *) bson)->buf);
}
if (!(bson->flags & BSON_FLAG_STATIC)) {
bson_free (bson);
}
}
uint8_t *
bson_reserve_buffer (bson_t *bson, uint32_t size)
{
if (bson->flags &
(BSON_FLAG_CHILD | BSON_FLAG_IN_CHILD | BSON_FLAG_RDONLY)) {
return NULL;
}
if (!_bson_grow (bson, size)) {
return NULL;
}
if (bson->flags & BSON_FLAG_INLINE) {
/* bson_grow didn't spill over */
((bson_impl_inline_t *) bson)->len = size;
} else {
((bson_impl_alloc_t *) bson)->len = size;
}
return _bson_data (bson);
}
bool
bson_steal (bson_t *dst, bson_t *src)
{
bson_impl_inline_t *src_inline;
bson_impl_inline_t *dst_inline;
bson_impl_alloc_t *alloc;
BSON_ASSERT (dst);
BSON_ASSERT (src);
bson_init (dst);
if (src->flags & (BSON_FLAG_CHILD | BSON_FLAG_IN_CHILD | BSON_FLAG_RDONLY)) {
return false;
}
if (src->flags & BSON_FLAG_INLINE) {
src_inline = (bson_impl_inline_t *) src;
dst_inline = (bson_impl_inline_t *) dst;
dst_inline->len = src_inline->len;
memcpy (dst_inline->data, src_inline->data, sizeof src_inline->data);
/* for consistency, src is always invalid after steal, even if inline */
src->len = 0;
} else {
memcpy (dst, src, sizeof (bson_t));
alloc = (bson_impl_alloc_t *) dst;
alloc->flags |= BSON_FLAG_STATIC;
alloc->buf = &alloc->alloc;
alloc->buflen = &alloc->alloclen;
}
if (!(src->flags & BSON_FLAG_STATIC)) {
bson_free (src);
} else {
/* src is invalid after steal */
src->len = 0;
}
return true;
}
uint8_t *
bson_destroy_with_steal (bson_t *bson, bool steal, uint32_t *length)
{
uint8_t *ret = NULL;
BSON_ASSERT (bson);
if (length) {
*length = bson->len;
}
if (!steal) {
bson_destroy (bson);
return NULL;
}
if ((bson->flags &
(BSON_FLAG_CHILD | BSON_FLAG_IN_CHILD | BSON_FLAG_RDONLY))) {
/* Do nothing */
} else if ((bson->flags & BSON_FLAG_INLINE)) {
bson_impl_inline_t *inl;
inl = (bson_impl_inline_t *) bson;
ret = bson_malloc (bson->len);
memcpy (ret, inl->data, bson->len);
} else {
bson_impl_alloc_t *alloc;
alloc = (bson_impl_alloc_t *) bson;
ret = *alloc->buf;
*alloc->buf = NULL;
}
bson_destroy (bson);
return ret;
}
const uint8_t *
bson_get_data (const bson_t *bson)
{
BSON_ASSERT (bson);
return _bson_data (bson);
}
uint32_t
bson_count_keys (const bson_t *bson)
{
uint32_t count = 0;
bson_iter_t iter;
BSON_ASSERT (bson);
if (bson_iter_init (&iter, bson)) {
while (bson_iter_next (&iter)) {
count++;
}
}
return count;
}
bool
bson_has_field (const bson_t *bson, const char *key)
{
bson_iter_t iter;
bson_iter_t child;
BSON_ASSERT (bson);
BSON_ASSERT (key);
if (NULL != strchr (key, '.')) {
return (bson_iter_init (&iter, bson) &&
bson_iter_find_descendant (&iter, key, &child));
}
return bson_iter_init_find (&iter, bson, key);
}
int
bson_compare (const bson_t *bson, const bson_t *other)
{
const uint8_t *data1;
const uint8_t *data2;
size_t len1;
size_t len2;
int64_t ret;
data1 = _bson_data (bson) + 4;
len1 = bson->len - 4;
data2 = _bson_data (other) + 4;
len2 = other->len - 4;
if (len1 == len2) {
return memcmp (data1, data2, len1);
}
ret = memcmp (data1, data2, BSON_MIN (len1, len2));
if (ret == 0) {
ret = (int64_t) (len1 - len2);
}
return (ret < 0) ? -1 : (ret > 0);
}
bool
bson_equal (const bson_t *bson, const bson_t *other)
{
return !bson_compare (bson, other);
}
static bool
_bson_as_json_visit_utf8 (const bson_iter_t *iter,
const char *key,
size_t v_utf8_len,
const char *v_utf8,
void *data)
{
bson_json_state_t *state = data;
char *escaped;
escaped = bson_utf8_escape_for_json (v_utf8, v_utf8_len);
if (escaped) {
bson_string_append (state->str, "\"");
bson_string_append (state->str, escaped);
bson_string_append (state->str, "\"");
bson_free (escaped);
return false;
}
return true;
}
static bool
_bson_as_json_visit_int32 (const bson_iter_t *iter,
const char *key,
int32_t v_int32,
void *data)
{
bson_json_state_t *state = data;
if (state->mode == BSON_JSON_MODE_CANONICAL) {
bson_string_append_printf (
state->str, "{ \"$numberInt\" : \"%" PRId32 "\" }", v_int32);
} else {
bson_string_append_printf (state->str, "%" PRId32, v_int32);
}
return false;
}
static bool
_bson_as_json_visit_int64 (const bson_iter_t *iter,
const char *key,
int64_t v_int64,
void *data)
{
bson_json_state_t *state = data;
if (state->mode == BSON_JSON_MODE_CANONICAL) {
bson_string_append_printf (
state->str, "{ \"$numberLong\" : \"%" PRId64 "\"}", v_int64);
} else {
bson_string_append_printf (state->str, "%" PRId64, v_int64);
}
return false;
}
static bool
_bson_as_json_visit_decimal128 (const bson_iter_t *iter,
const char *key,
const bson_decimal128_t *value,
void *data)
{
bson_json_state_t *state = data;
char decimal128_string[BSON_DECIMAL128_STRING];
bson_decimal128_to_string (value, decimal128_string);
bson_string_append (state->str, "{ \"$numberDecimal\" : \"");
bson_string_append (state->str, decimal128_string);
bson_string_append (state->str, "\" }");
return false;
}
static bool
_bson_as_json_visit_double (const bson_iter_t *iter,
const char *key,
double v_double,
void *data)
{
bson_json_state_t *state = data;
bson_string_t *str = state->str;
uint32_t start_len;
bool legacy;
/* Determine if legacy (i.e. unwrapped) output should be used. Relaxed mode
* will use this for nan and inf values, which we check manually since old
* platforms may not have isinf or isnan. */
legacy = state->mode == BSON_JSON_MODE_LEGACY ||
(state->mode == BSON_JSON_MODE_RELAXED &&
!(v_double != v_double || v_double * 0 != 0));
if (!legacy) {
bson_string_append (state->str, "{ \"$numberDouble\" : \"");
}
if (!legacy && v_double != v_double) {
bson_string_append (str, "NaN");
} else if (!legacy && v_double * 0 != 0) {
if (v_double > 0) {
bson_string_append (str, "Infinity");
} else {
bson_string_append (str, "-Infinity");
}
} else {
start_len = str->len;
bson_string_append_printf (str, "%.20g", v_double);
/* ensure trailing ".0" to distinguish "3" from "3.0" */
if (strspn (&str->str[start_len], "0123456789-") ==
str->len - start_len) {
bson_string_append (str, ".0");
}
}
if (!legacy) {
bson_string_append (state->str, "\" }");
}
return false;
}
static bool
_bson_as_json_visit_undefined (const bson_iter_t *iter,
const char *key,
void *data)
{
bson_json_state_t *state = data;
bson_string_append (state->str, "{ \"$undefined\" : true }");
return false;
}
static bool
_bson_as_json_visit_null (const bson_iter_t *iter, const char *key, void *data)
{
bson_json_state_t *state = data;
bson_string_append (state->str, "null");
return false;
}
static bool
_bson_as_json_visit_oid (const bson_iter_t *iter,
const char *key,
const bson_oid_t *oid,
void *data)
{
bson_json_state_t *state = data;
char str[25];
bson_oid_to_string (oid, str);
bson_string_append (state->str, "{ \"$oid\" : \"");
bson_string_append (state->str, str);
bson_string_append (state->str, "\" }");
return false;
}
static bool
_bson_as_json_visit_binary (const bson_iter_t *iter,
const char *key,
bson_subtype_t v_subtype,
size_t v_binary_len,
const uint8_t *v_binary,
void *data)
{
bson_json_state_t *state = data;
size_t b64_len;
char *b64;
b64_len = (v_binary_len / 3 + 1) * 4 + 1;
b64 = bson_malloc0 (b64_len);
b64_ntop (v_binary, v_binary_len, b64, b64_len);
if (state->mode == BSON_JSON_MODE_CANONICAL ||
state->mode == BSON_JSON_MODE_RELAXED) {
bson_string_append (state->str, "{ \"$binary\" : { \"base64\": \"");
bson_string_append (state->str, b64);
bson_string_append (state->str, "\", \"subType\" : \"");
bson_string_append_printf (state->str, "%02x", v_subtype);
bson_string_append (state->str, "\" } }");
} else {
bson_string_append (state->str, "{ \"$binary\" : \"");
bson_string_append (state->str, b64);
bson_string_append (state->str, "\", \"$type\" : \"");
bson_string_append_printf (state->str, "%02x", v_subtype);
bson_string_append (state->str, "\" }");
}
bson_free (b64);
return false;
}
static bool
_bson_as_json_visit_bool (const bson_iter_t *iter,
const char *key,
bool v_bool,
void *data)
{
bson_json_state_t *state = data;
bson_string_append (state->str, v_bool ? "true" : "false");
return false;
}
static bool
_bson_as_json_visit_date_time (const bson_iter_t *iter,
const char *key,
int64_t msec_since_epoch,
void *data)
{
bson_json_state_t *state = data;
if (state->mode == BSON_JSON_MODE_CANONICAL ||
(state->mode == BSON_JSON_MODE_RELAXED && msec_since_epoch < 0)) {
bson_string_append (state->str, "{ \"$date\" : { \"$numberLong\" : \"");
bson_string_append_printf (state->str, "%" PRId64, msec_since_epoch);
bson_string_append (state->str, "\" } }");
} else if (state->mode == BSON_JSON_MODE_RELAXED) {
bson_string_append (state->str, "{ \"$date\" : \"");
_bson_iso8601_date_format (msec_since_epoch, state->str);
bson_string_append (state->str, "\" }");
} else {
bson_string_append (state->str, "{ \"$date\" : ");
bson_string_append_printf (state->str, "%" PRId64, msec_since_epoch);
bson_string_append (state->str, " }");
}
return false;
}
static bool
_bson_as_json_visit_regex (const bson_iter_t *iter,
const char *key,
const char *v_regex,
const char *v_options,
void *data)
{
bson_json_state_t *state = data;
char *escaped;
escaped = bson_utf8_escape_for_json (v_regex, -1);
if (!escaped) {
return true;
}
if (state->mode == BSON_JSON_MODE_CANONICAL ||
state->mode == BSON_JSON_MODE_RELAXED) {
bson_string_append (state->str,
"{ \"$regularExpression\" : { \"pattern\" : \"");
bson_string_append (state->str, escaped);
bson_string_append (state->str, "\", \"options\" : \"");
_bson_append_regex_options_sorted (state->str, v_options);
bson_string_append (state->str, "\" } }");
} else {
bson_string_append (state->str, "{ \"$regex\" : \"");
bson_string_append (state->str, escaped);
bson_string_append (state->str, "\", \"$options\" : \"");
_bson_append_regex_options_sorted (state->str, v_options);
bson_string_append (state->str, "\" }");
}
bson_free (escaped);
return false;
}
static bool
_bson_as_json_visit_timestamp (const bson_iter_t *iter,
const char *key,
uint32_t v_timestamp,
uint32_t v_increment,
void *data)
{
bson_json_state_t *state = data;
bson_string_append (state->str, "{ \"$timestamp\" : { \"t\" : ");
bson_string_append_printf (state->str, "%u", v_timestamp);
bson_string_append (state->str, ", \"i\" : ");
bson_string_append_printf (state->str, "%u", v_increment);
bson_string_append (state->str, " } }");
return false;
}
static bool
_bson_as_json_visit_dbpointer (const bson_iter_t *iter,
const char *key,
size_t v_collection_len,
const char *v_collection,
const bson_oid_t *v_oid,
void *data)
{
bson_json_state_t *state = data;
char *escaped;
char str[25];
escaped = bson_utf8_escape_for_json (v_collection, -1);
if (!escaped) {
return true;
}
if (state->mode == BSON_JSON_MODE_CANONICAL ||
state->mode == BSON_JSON_MODE_RELAXED) {
bson_string_append (state->str, "{ \"$dbPointer\" : { \"$ref\" : \"");
bson_string_append (state->str, escaped);
bson_string_append (state->str, "\"");
if (v_oid) {
bson_oid_to_string (v_oid, str);
bson_string_append (state->str, ", \"$id\" : { \"$oid\" : \"");
bson_string_append (state->str, str);
bson_string_append (state->str, "\" }");
}
bson_string_append (state->str, " } }");
} else {
bson_string_append (state->str, "{ \"$ref\" : \"");
bson_string_append (state->str, escaped);
bson_string_append (state->str, "\"");
if (v_oid) {
bson_oid_to_string (v_oid, str);
bson_string_append (state->str, ", \"$id\" : \"");
bson_string_append (state->str, str);
bson_string_append (state->str, "\"");
}
bson_string_append (state->str, " }");
}
bson_free (escaped);
return false;
}
static bool
_bson_as_json_visit_minkey (const bson_iter_t *iter,
const char *key,
void *data)
{
bson_json_state_t *state = data;
bson_string_append (state->str, "{ \"$minKey\" : 1 }");
return false;
}
static bool
_bson_as_json_visit_maxkey (const bson_iter_t *iter,
const char *key,
void *data)
{
bson_json_state_t *state = data;
bson_string_append (state->str, "{ \"$maxKey\" : 1 }");
return false;
}
static bool
_bson_as_json_visit_before (const bson_iter_t *iter,
const char *key,
void *data)
{
bson_json_state_t *state = data;
char *escaped;
if (state->count) {
bson_string_append (state->str, ", ");
}
if (state->keys) {
escaped = bson_utf8_escape_for_json (key, -1);
if (escaped) {
bson_string_append (state->str, "\"");
bson_string_append (state->str, escaped);
bson_string_append (state->str, "\" : ");
bson_free (escaped);
} else {
return true;
}
}
state->count++;
return false;
}
static void
_bson_as_json_visit_corrupt (const bson_iter_t *iter, void *data)
{
*(((bson_json_state_t *) data)->err_offset) = iter->off;
}
static bool
_bson_as_json_visit_code (const bson_iter_t *iter,
const char *key,
size_t v_code_len,
const char *v_code,
void *data)
{
bson_json_state_t *state = data;
char *escaped;
escaped = bson_utf8_escape_for_json (v_code, v_code_len);
if (!escaped) {
return true;
}
bson_string_append (state->str, "{ \"$code\" : \"");
bson_string_append (state->str, escaped);
bson_string_append (state->str, "\" }");
bson_free (escaped);
return false;
}
static bool
_bson_as_json_visit_symbol (const bson_iter_t *iter,
const char *key,
size_t v_symbol_len,
const char *v_symbol,
void *data)
{
bson_json_state_t *state = data;
char *escaped;
escaped = bson_utf8_escape_for_json (v_symbol, v_symbol_len);
if (!escaped) {
return true;
}
if (state->mode == BSON_JSON_MODE_CANONICAL ||
state->mode == BSON_JSON_MODE_RELAXED) {
bson_string_append (state->str, "{ \"$symbol\" : \"");
bson_string_append (state->str, escaped);
bson_string_append (state->str, "\" }");
} else {
bson_string_append (state->str, "\"");
bson_string_append (state->str, escaped);
bson_string_append (state->str, "\"");
}
bson_free (escaped);
return false;
}
static bool
_bson_as_json_visit_codewscope (const bson_iter_t *iter,
const char *key,
size_t v_code_len,
const char *v_code,
const bson_t *v_scope,
void *data)
{
bson_json_state_t *state = data;
char *code_escaped;
char *scope;
code_escaped = bson_utf8_escape_for_json (v_code, v_code_len);
if (!code_escaped) {
return true;
}
/* Encode scope with the same mode */
scope = _bson_as_json_visit_all (v_scope, NULL, state->mode);
if (!scope) {
bson_free (code_escaped);
return true;
}
bson_string_append (state->str, "{ \"$code\" : \"");
bson_string_append (state->str, code_escaped);
bson_string_append (state->str, "\", \"$scope\" : ");
bson_string_append (state->str, scope);
bson_string_append (state->str, " }");
bson_free (code_escaped);
bson_free (scope);
return false;
}
static const bson_visitor_t bson_as_json_visitors = {
_bson_as_json_visit_before,
NULL, /* visit_after */
_bson_as_json_visit_corrupt,
_bson_as_json_visit_double,
_bson_as_json_visit_utf8,
_bson_as_json_visit_document,
_bson_as_json_visit_array,
_bson_as_json_visit_binary,
_bson_as_json_visit_undefined,
_bson_as_json_visit_oid,
_bson_as_json_visit_bool,
_bson_as_json_visit_date_time,
_bson_as_json_visit_null,
_bson_as_json_visit_regex,
_bson_as_json_visit_dbpointer,
_bson_as_json_visit_code,
_bson_as_json_visit_symbol,
_bson_as_json_visit_codewscope,
_bson_as_json_visit_int32,
_bson_as_json_visit_timestamp,
_bson_as_json_visit_int64,
_bson_as_json_visit_maxkey,
_bson_as_json_visit_minkey,
NULL, /* visit_unsupported_type */
_bson_as_json_visit_decimal128,
};
static bool
_bson_as_json_visit_document (const bson_iter_t *iter,
const char *key,
const bson_t *v_document,
void *data)
{
bson_json_state_t *state = data;
bson_json_state_t child_state = {0, true, state->err_offset};
bson_iter_t child;
if (state->depth >= BSON_MAX_RECURSION) {
bson_string_append (state->str, "{ ... }");
return false;
}
if (bson_iter_init (&child, v_document)) {
child_state.str = bson_string_new ("{ ");
child_state.depth = state->depth + 1;
child_state.mode = state->mode;
if (bson_iter_visit_all (&child, &bson_as_json_visitors, &child_state)) {
return true;
}
bson_string_append (child_state.str, " }");
bson_string_append (state->str, child_state.str->str);
bson_string_free (child_state.str, true);
}
return false;
}
static bool
_bson_as_json_visit_array (const bson_iter_t *iter,
const char *key,
const bson_t *v_array,
void *data)
{
bson_json_state_t *state = data;
bson_json_state_t child_state = {0, false, state->err_offset};
bson_iter_t child;
if (state->depth >= BSON_MAX_RECURSION) {
bson_string_append (state->str, "{ ... }");
return false;
}
if (bson_iter_init (&child, v_array)) {
child_state.str = bson_string_new ("[ ");
child_state.depth = state->depth + 1;
child_state.mode = state->mode;
if (bson_iter_visit_all (&child, &bson_as_json_visitors, &child_state)) {
return true;
}
bson_string_append (child_state.str, " ]");
bson_string_append (state->str, child_state.str->str);
bson_string_free (child_state.str, true);
}
return false;
}
static char *
_bson_as_json_visit_all (const bson_t *bson,
size_t *length,
bson_json_mode_t mode)
{
bson_json_state_t state;
bson_iter_t iter;
ssize_t err_offset = -1;
BSON_ASSERT (bson);
if (length) {
*length = 0;
}
if (bson_empty0 (bson)) {
if (length) {
*length = 3;
}
return bson_strdup ("{ }");
}
if (!bson_iter_init (&iter, bson)) {
return NULL;
}
state.count = 0;
state.keys = true;
state.str = bson_string_new ("{ ");
state.depth = 0;
state.err_offset = &err_offset;
state.mode = mode;
if (bson_iter_visit_all (&iter, &bson_as_json_visitors, &state) ||
err_offset != -1) {
/*
* We were prematurely exited due to corruption or failed visitor.
*/
bson_string_free (state.str, true);
if (length) {
*length = 0;
}
return NULL;
}
bson_string_append (state.str, " }");
if (length) {
*length = state.str->len;
}
return bson_string_free (state.str, false);
}
char *
bson_as_canonical_extended_json (const bson_t *bson, size_t *length)
{
return _bson_as_json_visit_all (bson, length, BSON_JSON_MODE_CANONICAL);
}
char *
bson_as_json (const bson_t *bson, size_t *length)
{
return _bson_as_json_visit_all (bson, length, BSON_JSON_MODE_LEGACY);
}
char *
bson_as_relaxed_extended_json (const bson_t *bson, size_t *length)
{
return _bson_as_json_visit_all (bson, length, BSON_JSON_MODE_RELAXED);
}
char *
bson_array_as_json (const bson_t *bson, size_t *length)
{
bson_json_state_t state;
bson_iter_t iter;
ssize_t err_offset = -1;
BSON_ASSERT (bson);
if (length) {
*length = 0;
}
if (bson_empty0 (bson)) {
if (length) {
*length = 3;
}
return bson_strdup ("[ ]");
}
if (!bson_iter_init (&iter, bson)) {
return NULL;
}
state.count = 0;
state.keys = false;
state.str = bson_string_new ("[ ");
state.depth = 0;
state.err_offset = &err_offset;
state.mode = BSON_JSON_MODE_LEGACY;
bson_iter_visit_all (&iter, &bson_as_json_visitors, &state);
if (bson_iter_visit_all (&iter, &bson_as_json_visitors, &state) ||
err_offset != -1) {
/*
* We were prematurely exited due to corruption or failed visitor.
*/
bson_string_free (state.str, true);
if (length) {
*length = 0;
}
return NULL;
}
bson_string_append (state.str, " ]");
if (length) {
*length = state.str->len;
}
return bson_string_free (state.str, false);
}
#define VALIDATION_ERR(_flag, _msg, ...) \
bson_set_error (&state->error, BSON_ERROR_INVALID, _flag, _msg, __VA_ARGS__)
static bool
_bson_iter_validate_utf8 (const bson_iter_t *iter,
const char *key,
size_t v_utf8_len,
const char *v_utf8,
void *data)
{
bson_validate_state_t *state = data;
bool allow_null;
if ((state->flags & BSON_VALIDATE_UTF8)) {
allow_null = !!(state->flags & BSON_VALIDATE_UTF8_ALLOW_NULL);
if (!bson_utf8_validate (v_utf8, v_utf8_len, allow_null)) {
state->err_offset = iter->off;
VALIDATION_ERR (
BSON_VALIDATE_UTF8, "invalid utf8 string for key \"%s\"", key);
return true;
}
}
if ((state->flags & BSON_VALIDATE_DOLLAR_KEYS)) {
if (state->phase == BSON_VALIDATE_PHASE_LF_REF_UTF8) {
state->phase = BSON_VALIDATE_PHASE_LF_ID_KEY;
} else if (state->phase == BSON_VALIDATE_PHASE_LF_DB_UTF8) {
state->phase = BSON_VALIDATE_PHASE_NOT_DBREF;
}
}
return false;
}
static void
_bson_iter_validate_corrupt (const bson_iter_t *iter, void *data)
{
bson_validate_state_t *state = data;
state->err_offset = iter->err_off;
VALIDATION_ERR (BSON_VALIDATE_NONE, "%s", "corrupt BSON");
}
static bool
_bson_iter_validate_before (const bson_iter_t *iter,
const char *key,
void *data)
{
bson_validate_state_t *state = data;
if ((state->flags & BSON_VALIDATE_EMPTY_KEYS)) {
if (key[0] == '\0') {
state->err_offset = iter->off;
VALIDATION_ERR (BSON_VALIDATE_EMPTY_KEYS, "%s", "empty key");
return true;
}
}
if ((state->flags & BSON_VALIDATE_DOLLAR_KEYS)) {
if (key[0] == '$') {
if (state->phase == BSON_VALIDATE_PHASE_LF_REF_KEY &&
strcmp (key, "$ref") == 0) {
state->phase = BSON_VALIDATE_PHASE_LF_REF_UTF8;
} else if (state->phase == BSON_VALIDATE_PHASE_LF_ID_KEY &&
strcmp (key, "$id") == 0) {
state->phase = BSON_VALIDATE_PHASE_LF_DB_KEY;
} else if (state->phase == BSON_VALIDATE_PHASE_LF_DB_KEY &&
strcmp (key, "$db") == 0) {
state->phase = BSON_VALIDATE_PHASE_LF_DB_UTF8;
} else {
state->err_offset = iter->off;
VALIDATION_ERR (BSON_VALIDATE_DOLLAR_KEYS,
"keys cannot begin with \"$\": \"%s\"",
key);
return true;
}
} else if (state->phase == BSON_VALIDATE_PHASE_LF_ID_KEY ||
state->phase == BSON_VALIDATE_PHASE_LF_REF_UTF8 ||
state->phase == BSON_VALIDATE_PHASE_LF_DB_UTF8) {
state->err_offset = iter->off;
VALIDATION_ERR (BSON_VALIDATE_DOLLAR_KEYS,
"invalid key within DBRef subdocument: \"%s\"",
key);
return true;
} else {
state->phase = BSON_VALIDATE_PHASE_NOT_DBREF;
}
}
if ((state->flags & BSON_VALIDATE_DOT_KEYS)) {
if (strstr (key, ".")) {
state->err_offset = iter->off;
VALIDATION_ERR (
BSON_VALIDATE_DOT_KEYS, "keys cannot contain \".\": \"%s\"", key);
return true;
}
}
return false;
}
static bool
_bson_iter_validate_codewscope (const bson_iter_t *iter,
const char *key,
size_t v_code_len,
const char *v_code,
const bson_t *v_scope,
void *data)
{
bson_validate_state_t *state = data;
size_t offset = 0;
if (!bson_validate (v_scope, state->flags, &offset)) {
state->err_offset = iter->off + offset;
VALIDATION_ERR (BSON_VALIDATE_NONE, "%s", "corrupt code-with-scope");
return false;
}
return true;
}
static bool
_bson_iter_validate_document (const bson_iter_t *iter,
const char *key,
const bson_t *v_document,
void *data);
static const bson_visitor_t bson_validate_funcs = {
_bson_iter_validate_before,
NULL, /* visit_after */
_bson_iter_validate_corrupt,
NULL, /* visit_double */
_bson_iter_validate_utf8,
_bson_iter_validate_document,
_bson_iter_validate_document, /* visit_array */
NULL, /* visit_binary */
NULL, /* visit_undefined */
NULL, /* visit_oid */
NULL, /* visit_bool */
NULL, /* visit_date_time */
NULL, /* visit_null */
NULL, /* visit_regex */
NULL, /* visit_dbpoint */
NULL, /* visit_code */
NULL, /* visit_symbol */
_bson_iter_validate_codewscope,
};
static bool
_bson_iter_validate_document (const bson_iter_t *iter,
const char *key,
const bson_t *v_document,
void *data)
{
bson_validate_state_t *state = data;
bson_iter_t child;
bson_validate_phase_t phase = state->phase;
if (!bson_iter_init (&child, v_document)) {
state->err_offset = iter->off;
return true;
}
if (state->phase == BSON_VALIDATE_PHASE_START) {
state->phase = BSON_VALIDATE_PHASE_TOP;
} else {
state->phase = BSON_VALIDATE_PHASE_LF_REF_KEY;
}
bson_iter_visit_all (&child, &bson_validate_funcs, state);
if (state->phase == BSON_VALIDATE_PHASE_LF_ID_KEY ||
state->phase == BSON_VALIDATE_PHASE_LF_REF_UTF8 ||
state->phase == BSON_VALIDATE_PHASE_LF_DB_UTF8) {
if (state->err_offset <= 0) {
state->err_offset = iter->off;
}
return true;
}
state->phase = phase;
return false;
}
static void
_bson_validate_internal (const bson_t *bson, bson_validate_state_t *state)
{
bson_iter_t iter;
state->err_offset = -1;
state->phase = BSON_VALIDATE_PHASE_START;
memset (&state->error, 0, sizeof state->error);
if (!bson_iter_init (&iter, bson)) {
state->err_offset = 0;
VALIDATION_ERR (BSON_VALIDATE_NONE, "%s", "corrupt BSON");
} else {
_bson_iter_validate_document (&iter, NULL, bson, state);
}
}
bool
bson_validate (const bson_t *bson, bson_validate_flags_t flags, size_t *offset)
{
bson_validate_state_t state;
state.flags = flags;
_bson_validate_internal (bson, &state);
if (state.err_offset > 0 && offset) {
*offset = (size_t) state.err_offset;
}
return state.err_offset < 0;
}
bool
bson_validate_with_error (const bson_t *bson,
bson_validate_flags_t flags,
bson_error_t *error)
{
bson_validate_state_t state;
state.flags = flags;
_bson_validate_internal (bson, &state);
if (state.err_offset > 0 && error) {
memcpy (error, &state.error, sizeof *error);
}
return state.err_offset < 0;
}
bool
bson_concat (bson_t *dst, const bson_t *src)
{
BSON_ASSERT (dst);
BSON_ASSERT (src);
if (!bson_empty (src)) {
return _bson_append (
dst, 1, src->len - 5, src->len - 5, _bson_data (src) + 4);
}
return true;
}
diff --git a/mongodb-1.3.4/src/libbson/src/bson/bson.h b/mongodb-1.4.2/src/libbson/src/bson/bson.h
similarity index 100%
rename from mongodb-1.3.4/src/libbson/src/bson/bson.h
rename to mongodb-1.4.2/src/libbson/src/bson/bson.h
diff --git a/mongodb-1.3.4/src/libbson/src/jsonsl/jsonsl.c b/mongodb-1.4.2/src/libbson/src/jsonsl/jsonsl.c
similarity index 100%
rename from mongodb-1.3.4/src/libbson/src/jsonsl/jsonsl.c
rename to mongodb-1.4.2/src/libbson/src/jsonsl/jsonsl.c
diff --git a/mongodb-1.3.4/src/libbson/src/jsonsl/jsonsl.h b/mongodb-1.4.2/src/libbson/src/jsonsl/jsonsl.h
similarity index 100%
rename from mongodb-1.3.4/src/libbson/src/jsonsl/jsonsl.h
rename to mongodb-1.4.2/src/libbson/src/jsonsl/jsonsl.h
diff --git a/mongodb-1.4.2/src/libmongoc/VERSION_CURRENT b/mongodb-1.4.2/src/libmongoc/VERSION_CURRENT
new file mode 100644
index 00000000..77fee73a
--- /dev/null
+++ b/mongodb-1.4.2/src/libmongoc/VERSION_CURRENT
@@ -0,0 +1 @@
+1.9.3
diff --git a/mongodb-1.4.2/src/libmongoc/VERSION_RELEASED b/mongodb-1.4.2/src/libmongoc/VERSION_RELEASED
new file mode 100644
index 00000000..77fee73a
--- /dev/null
+++ b/mongodb-1.4.2/src/libmongoc/VERSION_RELEASED
@@ -0,0 +1 @@
+1.9.3
diff --git a/mongodb-1.3.4/src/libmongoc/build/autotools/AutomaticInitAndCleanup.m4 b/mongodb-1.4.2/src/libmongoc/build/autotools/AutomaticInitAndCleanup.m4
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/build/autotools/AutomaticInitAndCleanup.m4
rename to mongodb-1.4.2/src/libmongoc/build/autotools/AutomaticInitAndCleanup.m4
diff --git a/mongodb-1.3.4/src/libmongoc/build/autotools/CheckCompiler.m4 b/mongodb-1.4.2/src/libmongoc/build/autotools/CheckCompiler.m4
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/build/autotools/CheckCompiler.m4
rename to mongodb-1.4.2/src/libmongoc/build/autotools/CheckCompiler.m4
diff --git a/mongodb-1.3.4/src/libmongoc/build/autotools/CheckHost.m4 b/mongodb-1.4.2/src/libmongoc/build/autotools/CheckHost.m4
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/build/autotools/CheckHost.m4
rename to mongodb-1.4.2/src/libmongoc/build/autotools/CheckHost.m4
diff --git a/mongodb-1.3.4/src/libmongoc/build/autotools/CheckProgs.m4 b/mongodb-1.4.2/src/libmongoc/build/autotools/CheckProgs.m4
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/build/autotools/CheckProgs.m4
rename to mongodb-1.4.2/src/libmongoc/build/autotools/CheckProgs.m4
diff --git a/mongodb-1.3.4/src/libmongoc/build/autotools/CheckSSL.m4 b/mongodb-1.4.2/src/libmongoc/build/autotools/CheckSSL.m4
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/build/autotools/CheckSSL.m4
rename to mongodb-1.4.2/src/libmongoc/build/autotools/CheckSSL.m4
diff --git a/mongodb-1.3.4/src/libmongoc/build/autotools/CheckSasl.m4 b/mongodb-1.4.2/src/libmongoc/build/autotools/CheckSasl.m4
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/build/autotools/CheckSasl.m4
rename to mongodb-1.4.2/src/libmongoc/build/autotools/CheckSasl.m4
diff --git a/mongodb-1.3.4/src/libmongoc/build/autotools/CheckSnappy.m4 b/mongodb-1.4.2/src/libmongoc/build/autotools/CheckSnappy.m4
similarity index 96%
rename from mongodb-1.3.4/src/libmongoc/build/autotools/CheckSnappy.m4
rename to mongodb-1.4.2/src/libmongoc/build/autotools/CheckSnappy.m4
index 49a73efb..e2586b24 100644
--- a/mongodb-1.3.4/src/libmongoc/build/autotools/CheckSnappy.m4
+++ b/mongodb-1.4.2/src/libmongoc/build/autotools/CheckSnappy.m4
@@ -1,36 +1,36 @@
# If --with-snappy=auto, determine if there is a system installed snappy
# greater than our required version.
found_snappy=no
AS_IF([test "x${with_snappy}" = xauto -o "x${with_snappy}" = xsystem], [
PKG_CHECK_MODULES(SNAPPY, [snappy], [
found_snappy=yes
], [
# If we didn't find snappy with pkgconfig, search manually. If that
# fails and with-snappy=system, fail.
AC_CHECK_LIB([snappy], [snappy_uncompress], [
AC_CHECK_HEADER([snappy-c.h], [
found_snappy=yes
+ SNAPPY_LIBS=-lsnappy
])
])
])
])
AS_IF([test "x${found_snappy}" = xyes], [
with_snappy=system
- SNAPPY_LIBS=-lsnappy
], [
# snappy not found
AS_IF([test "x${with_snappy}" = xsystem], [
AC_MSG_ERROR([Cannot find system installed snappy. try --with-snappy=no])
])
with_snappy=no
])
if test "x${with_snappy}" != "xno"; then
AC_SUBST(MONGOC_ENABLE_COMPRESSION_SNAPPY, 1)
else
AC_SUBST(MONGOC_ENABLE_COMPRESSION_SNAPPY, 0)
fi
AC_SUBST(SNAPPY_LIBS)
diff --git a/mongodb-1.3.4/src/libmongoc/build/autotools/CheckTarget.m4 b/mongodb-1.4.2/src/libmongoc/build/autotools/CheckTarget.m4
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/build/autotools/CheckTarget.m4
rename to mongodb-1.4.2/src/libmongoc/build/autotools/CheckTarget.m4
diff --git a/mongodb-1.3.4/src/libmongoc/build/autotools/CheckZlib.m4 b/mongodb-1.4.2/src/libmongoc/build/autotools/CheckZlib.m4
similarity index 95%
rename from mongodb-1.3.4/src/libmongoc/build/autotools/CheckZlib.m4
rename to mongodb-1.4.2/src/libmongoc/build/autotools/CheckZlib.m4
index c52f221b..c7cb1654 100644
--- a/mongodb-1.3.4/src/libmongoc/build/autotools/CheckZlib.m4
+++ b/mongodb-1.4.2/src/libmongoc/build/autotools/CheckZlib.m4
@@ -1,55 +1,55 @@
# If --with-zlib=auto, determine if there is a system installed zlib
# greater than our required version.
found_zlib=no
AS_IF([test "x${with_zlib}" = xauto -o "x${with_zlib}" = xsystem], [
- PKG_CHECK_MODULES(zlib, [zlib], [
+ PKG_CHECK_MODULES(ZLIB, [zlib], [
found_zlib=yes
], [
# If we didn't find zlib with pkgconfig, search manually. If that
# fails and with-zlib=system, fail, or if with-zlib=auto, use
# bundled.
AC_CHECK_LIB([zlib], [compress2], [
AC_CHECK_HEADER([zlib.h], [
found_zlib=yes
+ ZLIB_LIBS=-lz
])
])
])
], [
AS_IF([test "x${with_zlib}" != xbundled -a "x${with_zlib}" != xno], [
AC_MSG_ERROR([Invalid --with-zlib option: must be system, bundled, auto, or no.])
])
])
AS_IF([test "x${found_zlib}" = "xyes"], [
with_zlib=system
- ZLIB_LIBS=-lz
], [
# zlib not found
AS_IF([test "x${with_zlib}" = xauto -o "x${with_zlib}" = xbundled], [
with_zlib=bundled
], [
AS_IF([test "x${with_zlib}" = xno], [], [
# zlib not found, with-zlib=system
AC_MSG_ERROR([Cannot find system installed zlib. try --with-zlib=bundled])
])
])
])
# If we are using the bundled zlib, recurse into its configure.
AS_IF([test "x${with_zlib}" = xbundled],[
AC_MSG_CHECKING(whether to enable bundled zlib)
AC_MSG_RESULT(yes)
ZLIB_LIBS=
ZLIB_CFLAGS="-Isrc/zlib-1.2.11"
])
if test "x${with_zlib}" != "xno"; then
AC_SUBST(MONGOC_ENABLE_COMPRESSION_ZLIB, 1)
else
AC_SUBST(MONGOC_ENABLE_COMPRESSION_ZLIB, 0)
fi
AC_SUBST(ZLIB_LIBS)
AC_SUBST(ZLIB_CFLAGS)
diff --git a/mongodb-1.3.4/src/libmongoc/build/autotools/Coverage.m4 b/mongodb-1.4.2/src/libmongoc/build/autotools/Coverage.m4
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/build/autotools/Coverage.m4
rename to mongodb-1.4.2/src/libmongoc/build/autotools/Coverage.m4
diff --git a/mongodb-1.3.4/src/libmongoc/build/autotools/FindDependencies.m4 b/mongodb-1.4.2/src/libmongoc/build/autotools/FindDependencies.m4
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/build/autotools/FindDependencies.m4
rename to mongodb-1.4.2/src/libmongoc/build/autotools/FindDependencies.m4
diff --git a/mongodb-1.4.2/src/libmongoc/build/autotools/FindResSearch.m4 b/mongodb-1.4.2/src/libmongoc/build/autotools/FindResSearch.m4
new file mode 100644
index 00000000..c2cb90a1
--- /dev/null
+++ b/mongodb-1.4.2/src/libmongoc/build/autotools/FindResSearch.m4
@@ -0,0 +1,104 @@
+# Windows only.
+AC_SUBST(MONGOC_HAVE_DNSAPI, 0)
+
+AS_IF([test "x$enable_srv" = "xyes" -o "x$enable_srv" = "xauto"], [
+ # Thread-safe DNS query function for _mongoc_client_get_srv.
+ # Could be a macro, not a function, so check with AC_TRY_LINK.
+ AC_MSG_CHECKING([for res_nsearch])
+ save_LIBS="$LIBS"
+ LIBS="$LIBS -lresolv"
+ AC_TRY_LINK([
+ #include <sys/types.h>
+ #include <netinet/in.h>
+ #include <arpa/nameser.h>
+ #include <resolv.h>
+ ],[
+ int len;
+ unsigned char reply[1024];
+ res_state statep;
+ len = res_nsearch(
+ statep, "example.com", ns_c_in, ns_t_srv, reply, sizeof(reply));
+ ],[
+ AC_MSG_RESULT([yes])
+ AC_SUBST(MONGOC_HAVE_RES_SEARCH, 0)
+ AC_SUBST(MONGOC_HAVE_RES_NSEARCH, 1)
+ AC_SUBST(RESOLV_LIBS, -lresolv)
+ enable_srv=yes
+
+ # We have res_nsearch. Call res_ndestroy (BSD/Mac) or res_nclose (Linux)?
+ AC_MSG_CHECKING([for res_ndestroy])
+ AC_TRY_LINK([
+ #include <sys/types.h>
+ #include <netinet/in.h>
+ #include <arpa/nameser.h>
+ #include <resolv.h>
+ ],[
+ res_state statep;
+ res_ndestroy(statep);
+ ], [
+ AC_MSG_RESULT([yes])
+ AC_SUBST(MONGOC_HAVE_RES_NDESTROY, 1)
+ AC_SUBST(MONGOC_HAVE_RES_NCLOSE, 0)
+ ], [
+ AC_MSG_RESULT([no])
+ AC_SUBST(MONGOC_HAVE_RES_NDESTROY, 0)
+
+ AC_MSG_CHECKING([for res_nclose])
+ AC_TRY_LINK([
+ #include <sys/types.h>
+ #include <netinet/in.h>
+ #include <arpa/nameser.h>
+ #include <resolv.h>
+ ],[
+ res_state statep;
+ res_nclose(statep);
+ ], [
+ AC_MSG_RESULT([yes])
+ AC_SUBST(MONGOC_HAVE_RES_NCLOSE, 1)
+ ], [
+ AC_MSG_RESULT([no])
+ AC_SUBST(MONGOC_HAVE_RES_NCLOSE, 0)
+ ])
+ ])
+ ],[
+ AC_SUBST(MONGOC_HAVE_RES_NSEARCH, 0)
+ AC_SUBST(MONGOC_HAVE_RES_NDESTROY, 0)
+ AC_SUBST(MONGOC_HAVE_RES_NCLOSE, 0)
+
+ AC_MSG_RESULT([no])
+ AC_MSG_CHECKING([for res_search])
+
+ # Thread-unsafe function.
+ AC_TRY_LINK([
+ #include <sys/types.h>
+ #include <netinet/in.h>
+ #include <arpa/nameser.h>
+ #include <resolv.h>
+ ],[
+ int len;
+ unsigned char reply[1024];
+ len = res_search("example.com", ns_c_in, ns_t_srv, reply, sizeof(reply));
+ ], [
+ AC_MSG_RESULT([yes])
+ AC_SUBST(MONGOC_HAVE_RES_SEARCH, 1)
+ AC_SUBST(RESOLV_LIBS, -lresolv)
+ enable_srv=yes
+ ], [
+ AC_MSG_RESULT([no])
+ AC_SUBST(MONGOC_HAVE_RES_SEARCH, 0)
+ ])
+ ])
+
+ LIBS="$save_LIBS"
+
+], [
+ # enable_srv = "no"
+
+ AC_SUBST(MONGOC_HAVE_RES_NSEARCH, 0)
+ AC_SUBST(MONGOC_HAVE_RES_NDESTROY, 0)
+ AC_SUBST(MONGOC_HAVE_RES_NCLOSE, 0)
+ AC_SUBST(MONGOC_HAVE_RES_SEARCH, 0)
+])
+
+AS_IF([test "x${RESOLV_LIBS}" = "x" -a "x$enable_srv" = "xyes"],
+ [AC_MSG_ERROR([Cannot find libresolv. Try --disable_srv])])
diff --git a/mongodb-1.3.4/src/libmongoc/build/autotools/Libbson.m4 b/mongodb-1.4.2/src/libmongoc/build/autotools/Libbson.m4
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/build/autotools/Libbson.m4
rename to mongodb-1.4.2/src/libmongoc/build/autotools/Libbson.m4
diff --git a/mongodb-1.3.4/src/libmongoc/build/autotools/MaintainerFlags.m4 b/mongodb-1.4.2/src/libmongoc/build/autotools/MaintainerFlags.m4
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/build/autotools/MaintainerFlags.m4
rename to mongodb-1.4.2/src/libmongoc/build/autotools/MaintainerFlags.m4
diff --git a/mongodb-1.3.4/src/libmongoc/build/autotools/Optimizations.m4 b/mongodb-1.4.2/src/libmongoc/build/autotools/Optimizations.m4
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/build/autotools/Optimizations.m4
rename to mongodb-1.4.2/src/libmongoc/build/autotools/Optimizations.m4
diff --git a/mongodb-1.3.4/src/libmongoc/build/autotools/PlatformFlags.m4 b/mongodb-1.4.2/src/libmongoc/build/autotools/PlatformFlags.m4
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/build/autotools/PlatformFlags.m4
rename to mongodb-1.4.2/src/libmongoc/build/autotools/PlatformFlags.m4
diff --git a/mongodb-1.3.4/src/libmongoc/build/autotools/PrintBuildConfiguration.m4 b/mongodb-1.4.2/src/libmongoc/build/autotools/PrintBuildConfiguration.m4
similarity index 96%
rename from mongodb-1.3.4/src/libmongoc/build/autotools/PrintBuildConfiguration.m4
rename to mongodb-1.4.2/src/libmongoc/build/autotools/PrintBuildConfiguration.m4
index ba3d7e06..ba79b12a 100644
--- a/mongodb-1.3.4/src/libmongoc/build/autotools/PrintBuildConfiguration.m4
+++ b/mongodb-1.4.2/src/libmongoc/build/autotools/PrintBuildConfiguration.m4
@@ -1,56 +1,57 @@
AC_OUTPUT
if test -n "$MONGOC_PRERELEASE_VERSION"; then
cat << EOF
*** IMPORTANT ***
This is an unstable version of libmongoc.
It is for test purposes only.
Please, DO NOT use it in a production environment.
It will probably crash and you will lose your data.
Additionally, the API/ABI may change during the course
of development.
Thanks,
The libmongoc team.
*** END OF WARNING ***
EOF
fi
if test x"${enable_automatic_init_and_cleanup}" != x"no"; then
automatic_init_deprecated="
DEPRECATED: use --disable-automatic-init-and-cleanup"
fi
if test "x$MONGOC_36_EXPERIMENT" = "xyes"; then
experimental_features="
Feature #1 : ${enable_feature_1}"
fi
echo "
libmongoc $MONGOC_VERSION was configured with the following options:
Build configuration:
Enable debugging (slow) : ${enable_debug}
Compile with debug symbols (slow) : ${enable_debug_symbols}
Enable GCC build optimization : ${enable_optimizations}
Enable automatic init and cleanup : ${enable_automatic_init_and_cleanup}${automatic_init_deprecated}
+ Enable mongodb+srv URIs : ${enable_srv}
Enable maintainer flags : ${enable_maintainer_flags}
Code coverage support : ${enable_coverage}
Cross Compiling : ${enable_crosscompile}
Fast counters : ${enable_rdtscp}
Shared memory performance counters : ${enable_shm_counters}
SASL : ${sasl_mode}
SSL : ${enable_ssl}
Snappy Compression : ${with_snappy}
Zlib Compression : ${with_zlib}
Libbson : ${with_libbson}
${experimental_features}
Documentation:
man : ${enable_man_pages}
HTML : ${enable_html_docs}
"
diff --git a/mongodb-1.3.4/src/libmongoc/build/autotools/ReadCommandLineArguments.m4 b/mongodb-1.4.2/src/libmongoc/build/autotools/ReadCommandLineArguments.m4
similarity index 95%
rename from mongodb-1.3.4/src/libmongoc/build/autotools/ReadCommandLineArguments.m4
rename to mongodb-1.4.2/src/libmongoc/build/autotools/ReadCommandLineArguments.m4
index cb14afb4..0c61137d 100644
--- a/mongodb-1.3.4/src/libmongoc/build/autotools/ReadCommandLineArguments.m4
+++ b/mongodb-1.4.2/src/libmongoc/build/autotools/ReadCommandLineArguments.m4
@@ -1,130 +1,139 @@
AC_MSG_CHECKING([whether to do a debug build])
AC_ARG_ENABLE(debug,
AC_HELP_STRING([--enable-debug], [disable optimizations @<:@default=no@:>@]),
[],[enable_debug="no"])
AC_MSG_RESULT([$enable_debug])
AC_MSG_CHECKING([whether to enable tracing])
AC_ARG_ENABLE(tracing,
AC_HELP_STRING([--enable-tracing], [very verbose debug output @<:@default=no@:>@]),
[],[enable_tracing="no"])
AC_MSG_RESULT([$enable_tracing])
AC_MSG_CHECKING([whether to automatic init and cleanup])
AC_ARG_ENABLE(automatic-init-and-cleanup,
AC_HELP_STRING([--enable-automatic-init-and-cleanup],
[call mongoc_init() and mongoc_cleanup() automatically - DEPRECATED @<:@default=yes@:>@]),
[],[enable_automatic_init_and_cleanup="yes"])
AC_MSG_RESULT([$enable_automatic_init_and_cleanup])
AC_MSG_CHECKING([whether to enable optimized builds])
AC_ARG_ENABLE(optimizations,
AC_HELP_STRING([--enable-optimizations], [turn on build-time optimizations @<:@default=yes@:>@]),
[enable_optimizations=$enableval],
[
if test "$enable_debug" = "yes"; then
enable_optimizations="no";
else
enable_optimizations="yes";
fi
])
AC_MSG_RESULT([$enable_optimizations])
AC_MSG_CHECKING([whether to enable shared memory performance counters])
AC_ARG_ENABLE(shm_counters,
AC_HELP_STRING([--enable-shm-counters], [turn on shared memory performance counters @<:@default=yes@:>@]),
[],[enable_shm_counters="yes"])
AC_MSG_RESULT([$enable_shm_counters])
AC_MSG_CHECKING([whether to enable code coverage support])
AC_ARG_ENABLE(coverage,
AC_HELP_STRING([--enable-coverage], [enable code coverage support @<:@default=no@:>@]),
[],
[enable_coverage="no"])
AC_MSG_RESULT([$enable_coverage])
AC_MSG_CHECKING([whether to enable debug symbols])
AC_ARG_ENABLE(debug_symbols,
AC_HELP_STRING([--enable-debug-symbols=yes|no|min],
[enable debug symbols @<:@default=yes for debug builds, otherwise no@:>@]),
[
case "$enable_debug_symbols" in
yes) enable_debug_symbols="full" ;;
no|min|full) ;;
*) AC_MSG_ERROR([Invalid debug symbols option: must be yes, no, or min.]) ;;
esac
],
[
if test "$enable_debug" = "yes"; then
enable_debug_symbols="yes";
else
enable_debug_symbols="no";
fi
])
AC_MSG_RESULT([$enable_debug_symbols])
AC_ARG_ENABLE([rdtscp],
[AS_HELP_STRING([--enable-rdtscp=@<:@no/yes@:>@],
[fast performance counters on Intel using the RDTSCP instruction @<:@default=no@:>@])],
[],
[enable_rdtscp=no])
+AC_ARG_ENABLE(srv,
+ AC_HELP_STRING([--enable-srv=@<:@auto/yes/no@:>@],
+ [support mongodb+srv URIs. default=auto]),
+ [],
+ [enable_srv=auto])
+
+AS_IF([test "x$enable_srv" != "xyes" -a "x$enable_srv" != "xno"],
+ [enable_srv=auto])
+
# use strict compiler flags only on development releases
AS_IF([test "x$MONGOC_PRERELEASE_VERSION" != "x"],
[maintainer_flags_default=yes],
[maintainer_flags_default=no])
AC_ARG_ENABLE([maintainer-flags],
[AS_HELP_STRING([--enable-maintainer-flags=@<:@no/yes@:>@],
[use strict compiler checks @<:@default=no for release builds, yes for prereleases@:>@])],
[],
enable_maintainer_flags=$maintainer_flags_default)
# Check if we should use the bundled (git submodule) libbson
AC_ARG_WITH(libbson,
AC_HELP_STRING([--with-libbson=@<:@auto/system/bundled@:>@],
[use system installed libbson or bundled libbson. default=auto]),
[],
[with_libbson=auto])
AS_IF([test "x$with_libbson" != xbundled -a "x$with_libbson" != xsystem -a "x$with_libbson" != xauto],
[AC_MSG_ERROR([Invalid --with-libbson option: must be system, bundled, or auto])])
AC_ARG_WITH(snappy,
AC_HELP_STRING([--with-snappy=@<:@auto/yes/no@:>@],
[use system installed snappy. default=auto]),
[],
[with_snappy=auto])
AS_IF([test "x$with_snappy" != xyes -a "x$with_snappy" != xsystem -a "x$with_snappy" != xauto -a "x$with_snappy" != xno],
[AC_MSG_ERROR([Invalid --with-snappy option: must be auto, yes, or no])])
AC_ARG_WITH(zlib,
AC_HELP_STRING([--with-zlib=@<:@auto/system/bundled/no@:>@],
[use system installed zlib or bundled zlib. default=auto]),
[],
[with_zlib=auto])
AS_IF([test "x$with_zlib" != xbundled -a "x$with_zlib" != xsystem -a "x$with_zlib" != xauto -a "x$with_zlib" != xno],
[AC_MSG_ERROR([Invalid --with-zlib option: must be system, bundled, auto, no])])
AC_ARG_ENABLE([html-docs],
[AS_HELP_STRING([--enable-html-docs=@<:@yes/no@:>@],
[build HTML documentation @<:@default=no@:>@])],
[],
[enable_html_docs=no])
AC_ARG_ENABLE([man-pages],
[AS_HELP_STRING([--enable-man-pages=@<:@yes/no@:>@],
[build and install man pages @<:@default=no@:>@])],
[],
[enable_man_pages=no])
AC_ARG_ENABLE([examples],
[AS_HELP_STRING([--enable-examples=@<:@yes/no@:>@],
[build MongoDB C Driver example programs])],
[],
[enable_examples=yes])
AC_ARG_ENABLE([tests],
[AS_HELP_STRING([--enable-tests=@<:@yes/no@:>@],
[build MongoDB C Driver tests])],
[],
[enable_tests=yes])
diff --git a/mongodb-1.3.4/src/libmongoc/build/autotools/SetupAutomake.m4 b/mongodb-1.4.2/src/libmongoc/build/autotools/SetupAutomake.m4
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/build/autotools/SetupAutomake.m4
rename to mongodb-1.4.2/src/libmongoc/build/autotools/SetupAutomake.m4
diff --git a/mongodb-1.3.4/src/libmongoc/build/autotools/SetupLibtool.m4 b/mongodb-1.4.2/src/libmongoc/build/autotools/SetupLibtool.m4
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/build/autotools/SetupLibtool.m4
rename to mongodb-1.4.2/src/libmongoc/build/autotools/SetupLibtool.m4
diff --git a/mongodb-1.3.4/src/libmongoc/build/autotools/Versions.m4 b/mongodb-1.4.2/src/libmongoc/build/autotools/Versions.m4
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/build/autotools/Versions.m4
rename to mongodb-1.4.2/src/libmongoc/build/autotools/Versions.m4
diff --git a/mongodb-1.3.4/src/libmongoc/build/autotools/WeakSymbols.m4 b/mongodb-1.4.2/src/libmongoc/build/autotools/WeakSymbols.m4
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/build/autotools/WeakSymbols.m4
rename to mongodb-1.4.2/src/libmongoc/build/autotools/WeakSymbols.m4
diff --git a/mongodb-1.3.4/src/libmongoc/build/autotools/m4/ac_check_typedef.m4 b/mongodb-1.4.2/src/libmongoc/build/autotools/m4/ac_check_typedef.m4
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/build/autotools/m4/ac_check_typedef.m4
rename to mongodb-1.4.2/src/libmongoc/build/autotools/m4/ac_check_typedef.m4
diff --git a/mongodb-1.3.4/src/libbson/build/autotools/m4/ac_compile_check_sizeof.m4 b/mongodb-1.4.2/src/libmongoc/build/autotools/m4/ac_compile_check_sizeof.m4
similarity index 100%
rename from mongodb-1.3.4/src/libbson/build/autotools/m4/ac_compile_check_sizeof.m4
rename to mongodb-1.4.2/src/libmongoc/build/autotools/m4/ac_compile_check_sizeof.m4
diff --git a/mongodb-1.3.4/src/libmongoc/build/autotools/m4/ac_create_stdint_h.m4 b/mongodb-1.4.2/src/libmongoc/build/autotools/m4/ac_create_stdint_h.m4
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/build/autotools/m4/ac_create_stdint_h.m4
rename to mongodb-1.4.2/src/libmongoc/build/autotools/m4/ac_create_stdint_h.m4
diff --git a/mongodb-1.3.4/src/libmongoc/build/autotools/m4/as-compiler-flag.m4 b/mongodb-1.4.2/src/libmongoc/build/autotools/m4/as-compiler-flag.m4
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/build/autotools/m4/as-compiler-flag.m4
rename to mongodb-1.4.2/src/libmongoc/build/autotools/m4/as-compiler-flag.m4
diff --git a/mongodb-1.3.4/src/libmongoc/build/autotools/m4/ax_check_compile_flag.m4 b/mongodb-1.4.2/src/libmongoc/build/autotools/m4/ax_check_compile_flag.m4
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/build/autotools/m4/ax_check_compile_flag.m4
rename to mongodb-1.4.2/src/libmongoc/build/autotools/m4/ax_check_compile_flag.m4
diff --git a/mongodb-1.3.4/src/libmongoc/build/autotools/m4/ax_check_link_flag.m4 b/mongodb-1.4.2/src/libmongoc/build/autotools/m4/ax_check_link_flag.m4
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/build/autotools/m4/ax_check_link_flag.m4
rename to mongodb-1.4.2/src/libmongoc/build/autotools/m4/ax_check_link_flag.m4
diff --git a/mongodb-1.3.4/src/libmongoc/build/autotools/m4/ax_prototype.m4 b/mongodb-1.4.2/src/libmongoc/build/autotools/m4/ax_prototype.m4
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/build/autotools/m4/ax_prototype.m4
rename to mongodb-1.4.2/src/libmongoc/build/autotools/m4/ax_prototype.m4
diff --git a/mongodb-1.3.4/src/libmongoc/build/autotools/m4/ax_pthread.m4 b/mongodb-1.4.2/src/libmongoc/build/autotools/m4/ax_pthread.m4
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/build/autotools/m4/ax_pthread.m4
rename to mongodb-1.4.2/src/libmongoc/build/autotools/m4/ax_pthread.m4
diff --git a/mongodb-1.3.4/src/libbson/build/autotools/m4/pkg.m4 b/mongodb-1.4.2/src/libmongoc/build/autotools/m4/pkg.m4
similarity index 100%
rename from mongodb-1.3.4/src/libbson/build/autotools/m4/pkg.m4
rename to mongodb-1.4.2/src/libmongoc/build/autotools/m4/pkg.m4
diff --git a/mongodb-1.3.4/src/libmongoc/build/autotools/m4/silent.m4 b/mongodb-1.4.2/src/libmongoc/build/autotools/m4/silent.m4
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/build/autotools/m4/silent.m4
rename to mongodb-1.4.2/src/libmongoc/build/autotools/m4/silent.m4
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-apm-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-apm-private.h
similarity index 94%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-apm-private.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-apm-private.h
index c406e945..5652eb97 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-apm-private.h
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-apm-private.h
@@ -1,186 +1,195 @@
/*
* Copyright 2016 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MONGOC_APM_PRIVATE_H
#define MONGOC_APM_PRIVATE_H
#if !defined(MONGOC_INSIDE) && !defined(MONGOC_COMPILATION)
#error "Only <mongoc.h> can be included directly."
#endif
#include <bson.h>
#include "mongoc-apm.h"
BSON_BEGIN_DECLS
+/* forward decl */
+struct _mongoc_cmd_t;
+
struct _mongoc_apm_callbacks_t {
mongoc_apm_command_started_cb_t started;
mongoc_apm_command_succeeded_cb_t succeeded;
mongoc_apm_command_failed_cb_t failed;
mongoc_apm_server_changed_cb_t server_changed;
mongoc_apm_server_opening_cb_t server_opening;
mongoc_apm_server_closed_cb_t server_closed;
mongoc_apm_topology_changed_cb_t topology_changed;
mongoc_apm_topology_opening_cb_t topology_opening;
mongoc_apm_topology_closed_cb_t topology_closed;
mongoc_apm_server_heartbeat_started_cb_t server_heartbeat_started;
mongoc_apm_server_heartbeat_succeeded_cb_t server_heartbeat_succeeded;
mongoc_apm_server_heartbeat_failed_cb_t server_heartbeat_failed;
};
/*
* command monitoring events
*/
struct _mongoc_apm_command_started_t {
bson_t *command;
bool command_owned;
const char *database_name;
const char *command_name;
int64_t request_id;
int64_t operation_id;
const mongoc_host_list_t *host;
uint32_t server_id;
void *context;
};
struct _mongoc_apm_command_succeeded_t {
int64_t duration;
const bson_t *reply;
const char *command_name;
int64_t request_id;
int64_t operation_id;
const mongoc_host_list_t *host;
uint32_t server_id;
void *context;
};
struct _mongoc_apm_command_failed_t {
int64_t duration;
const char *command_name;
const bson_error_t *error;
int64_t request_id;
int64_t operation_id;
const mongoc_host_list_t *host;
uint32_t server_id;
void *context;
};
/*
* SDAM monitoring events
*/
struct _mongoc_apm_server_changed_t {
const mongoc_host_list_t *host;
bson_oid_t topology_id;
const mongoc_server_description_t *previous_description;
const mongoc_server_description_t *new_description;
void *context;
};
struct _mongoc_apm_server_opening_t {
const mongoc_host_list_t *host;
bson_oid_t topology_id;
void *context;
};
struct _mongoc_apm_server_closed_t {
const mongoc_host_list_t *host;
bson_oid_t topology_id;
void *context;
};
struct _mongoc_apm_topology_changed_t {
bson_oid_t topology_id;
const mongoc_topology_description_t *previous_description;
const mongoc_topology_description_t *new_description;
void *context;
};
struct _mongoc_apm_topology_opening_t {
bson_oid_t topology_id;
void *context;
};
struct _mongoc_apm_topology_closed_t {
bson_oid_t topology_id;
void *context;
};
struct _mongoc_apm_server_heartbeat_started_t {
const mongoc_host_list_t *host;
void *context;
};
struct _mongoc_apm_server_heartbeat_succeeded_t {
int64_t duration_usec;
const bson_t *reply;
const mongoc_host_list_t *host;
void *context;
};
struct _mongoc_apm_server_heartbeat_failed_t {
int64_t duration_usec;
const bson_error_t *error;
const mongoc_host_list_t *host;
void *context;
};
void
mongoc_apm_command_started_init (mongoc_apm_command_started_t *event,
const bson_t *command,
const char *database_name,
const char *command_name,
int64_t request_id,
int64_t operation_id,
const mongoc_host_list_t *host,
uint32_t server_id,
void *context);
+void
+mongoc_apm_command_started_init_with_cmd (mongoc_apm_command_started_t *event,
+ struct _mongoc_cmd_t *cmd,
+ int64_t request_id,
+ void *context);
+
void
mongoc_apm_command_started_cleanup (mongoc_apm_command_started_t *event);
void
mongoc_apm_command_succeeded_init (mongoc_apm_command_succeeded_t *event,
int64_t duration,
const bson_t *reply,
const char *command_name,
int64_t request_id,
int64_t operation_id,
const mongoc_host_list_t *host,
uint32_t server_id,
void *context);
void
mongoc_apm_command_succeeded_cleanup (mongoc_apm_command_succeeded_t *event);
void
mongoc_apm_command_failed_init (mongoc_apm_command_failed_t *event,
int64_t duration,
const char *command_name,
const bson_error_t *error,
int64_t request_id,
int64_t operation_id,
const mongoc_host_list_t *host,
uint32_t server_id,
void *context);
void
mongoc_apm_command_failed_cleanup (mongoc_apm_command_failed_t *event);
BSON_END_DECLS
#endif /* MONGOC_APM_PRIVATE_H */
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-apm.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-apm.c
similarity index 87%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-apm.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-apm.c
index 8b73ca7f..7aaab838 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-apm.c
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-apm.c
@@ -1,708 +1,776 @@
/*
* Copyright 2016 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
+#include "mongoc-util-private.h"
#include "mongoc-apm-private.h"
+#include "mongoc-cmd-private.h"
/*
* An Application Performance Management (APM) implementation, complying with
* MongoDB's Command Monitoring Spec:
*
* https://github.com/mongodb/specifications/tree/master/source/command-monitoring
*/
+static void
+append_documents_from_cmd (const mongoc_cmd_t *cmd,
+ mongoc_apm_command_started_t *event)
+{
+ int32_t doc_len;
+ bson_t doc;
+ const uint8_t *pos;
+ const char *field_name;
+ bson_t bson;
+ char str[16];
+ const char *key;
+ uint32_t i;
+
+ if (!cmd->payload || !cmd->payload_size) {
+ return;
+ }
+
+ if (!event->command_owned) {
+ event->command = bson_copy (event->command);
+ event->command_owned = true;
+ }
+
+ /* make array from outgoing OP_MSG payload type 1 on an "insert",
+ * "update", or "delete" command. */
+ field_name = _mongoc_get_documents_field_name (cmd->command_name);
+ BSON_ASSERT (field_name);
+ BSON_ASSERT (BSON_APPEND_ARRAY_BEGIN (event->command, field_name, &bson));
+
+ pos = cmd->payload;
+ i = 0;
+ while (pos < cmd->payload + cmd->payload_size) {
+ memcpy (&doc_len, pos, sizeof (doc_len));
+ doc_len = BSON_UINT32_FROM_LE (doc_len);
+ BSON_ASSERT (bson_init_static (&doc, pos, (size_t) doc_len));
+ bson_uint32_to_string (i, &key, str, sizeof (str));
+ BSON_APPEND_DOCUMENT (&bson, key, &doc);
+
+ pos += doc_len;
+ i++;
+ }
+
+ bson_append_array_end (event->command, &bson);
+}
+
+
/*
* Private initializer / cleanup functions.
*/
void
mongoc_apm_command_started_init (mongoc_apm_command_started_t *event,
const bson_t *command,
const char *database_name,
const char *command_name,
int64_t request_id,
int64_t operation_id,
const mongoc_host_list_t *host,
uint32_t server_id,
void *context)
{
bson_iter_t iter;
uint32_t len;
const uint8_t *data;
/* Command Monitoring Spec:
*
* In cases where queries or commands are embedded in a $query parameter
* when a read preference is provided, they MUST be unwrapped and the value
* of the $query attribute becomes the filter or the command in the started
* event. The read preference will subsequently be dropped as it is
* considered metadata and metadata is not currently provided in the command
* events.
*/
if (bson_has_field (command, "$readPreference")) {
if (bson_iter_init_find (&iter, command, "$query") &&
BSON_ITER_HOLDS_DOCUMENT (&iter)) {
bson_iter_document (&iter, &len, &data);
event->command = bson_new_from_data (data, len);
+ event->command_owned = true;
} else {
- /* $query should exist, but user could provide us a misformatted doc */
- event->command = bson_new ();
+ /* Got $readPreference without $query, probably OP_MSG */
+ event->command = (bson_t *) command;
+ event->command_owned = false;
}
-
- event->command_owned = true;
} else {
/* discard "const", we promise not to modify "command" */
event->command = (bson_t *) command;
event->command_owned = false;
}
event->database_name = database_name;
event->command_name = command_name;
event->request_id = request_id;
event->operation_id = operation_id;
event->host = host;
event->server_id = server_id;
event->context = context;
}
+void
+mongoc_apm_command_started_init_with_cmd (mongoc_apm_command_started_t *event,
+ mongoc_cmd_t *cmd,
+ int64_t request_id,
+ void *context)
+{
+ mongoc_apm_command_started_init (event,
+ cmd->command,
+ cmd->db_name,
+ cmd->command_name,
+ request_id,
+ cmd->operation_id,
+ &cmd->server_stream->sd->host,
+ cmd->server_stream->sd->id,
+ context);
+
+ /* OP_MSG document sequence for insert, update, or delete? */
+ append_documents_from_cmd (cmd, event);
+}
+
+
void
mongoc_apm_command_started_cleanup (mongoc_apm_command_started_t *event)
{
if (event->command_owned) {
bson_destroy (event->command);
}
}
void
mongoc_apm_command_succeeded_init (mongoc_apm_command_succeeded_t *event,
int64_t duration,
const bson_t *reply,
const char *command_name,
int64_t request_id,
int64_t operation_id,
const mongoc_host_list_t *host,
uint32_t server_id,
void *context)
{
BSON_ASSERT (reply);
event->duration = duration;
event->reply = reply;
event->command_name = command_name;
event->request_id = request_id;
event->operation_id = operation_id;
event->host = host;
event->server_id = server_id;
event->context = context;
}
void
mongoc_apm_command_succeeded_cleanup (mongoc_apm_command_succeeded_t *event)
{
/* no-op */
}
void
mongoc_apm_command_failed_init (mongoc_apm_command_failed_t *event,
int64_t duration,
const char *command_name,
const bson_error_t *error,
int64_t request_id,
int64_t operation_id,
const mongoc_host_list_t *host,
uint32_t server_id,
void *context)
{
event->duration = duration;
event->command_name = command_name;
event->error = error;
event->request_id = request_id;
event->operation_id = operation_id;
event->host = host;
event->server_id = server_id;
event->context = context;
}
void
mongoc_apm_command_failed_cleanup (mongoc_apm_command_failed_t *event)
{
/* no-op */
}
/*
* event field accessors
*/
/* command-started event fields */
const bson_t *
mongoc_apm_command_started_get_command (
const mongoc_apm_command_started_t *event)
{
return event->command;
}
const char *
mongoc_apm_command_started_get_database_name (
const mongoc_apm_command_started_t *event)
{
return event->database_name;
}
const char *
mongoc_apm_command_started_get_command_name (
const mongoc_apm_command_started_t *event)
{
return event->command_name;
}
int64_t
mongoc_apm_command_started_get_request_id (
const mongoc_apm_command_started_t *event)
{
return event->request_id;
}
int64_t
mongoc_apm_command_started_get_operation_id (
const mongoc_apm_command_started_t *event)
{
return event->operation_id;
}
const mongoc_host_list_t *
mongoc_apm_command_started_get_host (const mongoc_apm_command_started_t *event)
{
return event->host;
}
uint32_t
mongoc_apm_command_started_get_server_id (
const mongoc_apm_command_started_t *event)
{
return event->server_id;
}
void *
mongoc_apm_command_started_get_context (
const mongoc_apm_command_started_t *event)
{
return event->context;
}
/* command-succeeded event fields */
int64_t
mongoc_apm_command_succeeded_get_duration (
const mongoc_apm_command_succeeded_t *event)
{
return event->duration;
}
const bson_t *
mongoc_apm_command_succeeded_get_reply (
const mongoc_apm_command_succeeded_t *event)
{
return event->reply;
}
const char *
mongoc_apm_command_succeeded_get_command_name (
const mongoc_apm_command_succeeded_t *event)
{
return event->command_name;
}
int64_t
mongoc_apm_command_succeeded_get_request_id (
const mongoc_apm_command_succeeded_t *event)
{
return event->request_id;
}
int64_t
mongoc_apm_command_succeeded_get_operation_id (
const mongoc_apm_command_succeeded_t *event)
{
return event->operation_id;
}
const mongoc_host_list_t *
mongoc_apm_command_succeeded_get_host (
const mongoc_apm_command_succeeded_t *event)
{
return event->host;
}
uint32_t
mongoc_apm_command_succeeded_get_server_id (
const mongoc_apm_command_succeeded_t *event)
{
return event->server_id;
}
void *
mongoc_apm_command_succeeded_get_context (
const mongoc_apm_command_succeeded_t *event)
{
return event->context;
}
/* command-failed event fields */
int64_t
mongoc_apm_command_failed_get_duration (
const mongoc_apm_command_failed_t *event)
{
return event->duration;
}
const char *
mongoc_apm_command_failed_get_command_name (
const mongoc_apm_command_failed_t *event)
{
return event->command_name;
}
void
mongoc_apm_command_failed_get_error (const mongoc_apm_command_failed_t *event,
bson_error_t *error)
{
memcpy (error, event->error, sizeof *event->error);
}
int64_t
mongoc_apm_command_failed_get_request_id (
const mongoc_apm_command_failed_t *event)
{
return event->request_id;
}
int64_t
mongoc_apm_command_failed_get_operation_id (
const mongoc_apm_command_failed_t *event)
{
return event->operation_id;
}
const mongoc_host_list_t *
mongoc_apm_command_failed_get_host (const mongoc_apm_command_failed_t *event)
{
return event->host;
}
uint32_t
mongoc_apm_command_failed_get_server_id (
const mongoc_apm_command_failed_t *event)
{
return event->server_id;
}
void *
mongoc_apm_command_failed_get_context (const mongoc_apm_command_failed_t *event)
{
return event->context;
}
/* server-changed event fields */
const mongoc_host_list_t *
mongoc_apm_server_changed_get_host (const mongoc_apm_server_changed_t *event)
{
return event->host;
}
void
mongoc_apm_server_changed_get_topology_id (
const mongoc_apm_server_changed_t *event, bson_oid_t *topology_id)
{
bson_oid_copy (&event->topology_id, topology_id);
}
const mongoc_server_description_t *
mongoc_apm_server_changed_get_previous_description (
const mongoc_apm_server_changed_t *event)
{
return event->previous_description;
}
const mongoc_server_description_t *
mongoc_apm_server_changed_get_new_description (
const mongoc_apm_server_changed_t *event)
{
return event->new_description;
}
void *
mongoc_apm_server_changed_get_context (const mongoc_apm_server_changed_t *event)
{
return event->context;
}
/* server-opening event fields */
const mongoc_host_list_t *
mongoc_apm_server_opening_get_host (const mongoc_apm_server_opening_t *event)
{
return event->host;
}
void
mongoc_apm_server_opening_get_topology_id (
const mongoc_apm_server_opening_t *event, bson_oid_t *topology_id)
{
bson_oid_copy (&event->topology_id, topology_id);
}
void *
mongoc_apm_server_opening_get_context (const mongoc_apm_server_opening_t *event)
{
return event->context;
}
/* server-closed event fields */
const mongoc_host_list_t *
mongoc_apm_server_closed_get_host (const mongoc_apm_server_closed_t *event)
{
return event->host;
}
void
mongoc_apm_server_closed_get_topology_id (
const mongoc_apm_server_closed_t *event, bson_oid_t *topology_id)
{
bson_oid_copy (&event->topology_id, topology_id);
}
void *
mongoc_apm_server_closed_get_context (const mongoc_apm_server_closed_t *event)
{
return event->context;
}
/* topology-changed event fields */
void
mongoc_apm_topology_changed_get_topology_id (
const mongoc_apm_topology_changed_t *event, bson_oid_t *topology_id)
{
bson_oid_copy (&event->topology_id, topology_id);
}
const mongoc_topology_description_t *
mongoc_apm_topology_changed_get_previous_description (
const mongoc_apm_topology_changed_t *event)
{
return event->previous_description;
}
const mongoc_topology_description_t *
mongoc_apm_topology_changed_get_new_description (
const mongoc_apm_topology_changed_t *event)
{
return event->new_description;
}
void *
mongoc_apm_topology_changed_get_context (
const mongoc_apm_topology_changed_t *event)
{
return event->context;
}
/* topology-opening event field */
void
mongoc_apm_topology_opening_get_topology_id (
const mongoc_apm_topology_opening_t *event, bson_oid_t *topology_id)
{
bson_oid_copy (&event->topology_id, topology_id);
}
void *
mongoc_apm_topology_opening_get_context (
const mongoc_apm_topology_opening_t *event)
{
return event->context;
}
/* topology-closed event field */
void
mongoc_apm_topology_closed_get_topology_id (
const mongoc_apm_topology_closed_t *event, bson_oid_t *topology_id)
{
bson_oid_copy (&event->topology_id, topology_id);
}
void *
mongoc_apm_topology_closed_get_context (
const mongoc_apm_topology_closed_t *event)
{
return event->context;
}
/* heartbeat-started event field */
const mongoc_host_list_t *
mongoc_apm_server_heartbeat_started_get_host (
const mongoc_apm_server_heartbeat_started_t *event)
{
return event->host;
}
void *
mongoc_apm_server_heartbeat_started_get_context (
const mongoc_apm_server_heartbeat_started_t *event)
{
return event->context;
}
/* heartbeat-succeeded event fields */
int64_t
mongoc_apm_server_heartbeat_succeeded_get_duration (
const mongoc_apm_server_heartbeat_succeeded_t *event)
{
return event->duration_usec;
}
const bson_t *
mongoc_apm_server_heartbeat_succeeded_get_reply (
const mongoc_apm_server_heartbeat_succeeded_t *event)
{
return event->reply;
}
const mongoc_host_list_t *
mongoc_apm_server_heartbeat_succeeded_get_host (
const mongoc_apm_server_heartbeat_succeeded_t *event)
{
return event->host;
}
void *
mongoc_apm_server_heartbeat_succeeded_get_context (
const mongoc_apm_server_heartbeat_succeeded_t *event)
{
return event->context;
}
/* heartbeat-failed event fields */
int64_t
mongoc_apm_server_heartbeat_failed_get_duration (
const mongoc_apm_server_heartbeat_failed_t *event)
{
return event->duration_usec;
}
void
mongoc_apm_server_heartbeat_failed_get_error (
const mongoc_apm_server_heartbeat_failed_t *event, bson_error_t *error)
{
memcpy (error, event->error, sizeof *event->error);
}
const mongoc_host_list_t *
mongoc_apm_server_heartbeat_failed_get_host (
const mongoc_apm_server_heartbeat_failed_t *event)
{
return event->host;
}
void *
mongoc_apm_server_heartbeat_failed_get_context (
const mongoc_apm_server_heartbeat_failed_t *event)
{
return event->context;
}
/*
* registering callbacks
*/
mongoc_apm_callbacks_t *
mongoc_apm_callbacks_new (void)
{
size_t s = sizeof (mongoc_apm_callbacks_t);
return (mongoc_apm_callbacks_t *) bson_malloc0 (s);
}
void
mongoc_apm_callbacks_destroy (mongoc_apm_callbacks_t *callbacks)
{
bson_free (callbacks);
}
void
mongoc_apm_set_command_started_cb (mongoc_apm_callbacks_t *callbacks,
mongoc_apm_command_started_cb_t cb)
{
callbacks->started = cb;
}
void
mongoc_apm_set_command_succeeded_cb (mongoc_apm_callbacks_t *callbacks,
mongoc_apm_command_succeeded_cb_t cb)
{
callbacks->succeeded = cb;
}
void
mongoc_apm_set_command_failed_cb (mongoc_apm_callbacks_t *callbacks,
mongoc_apm_command_failed_cb_t cb)
{
callbacks->failed = cb;
}
void
mongoc_apm_set_server_changed_cb (mongoc_apm_callbacks_t *callbacks,
mongoc_apm_server_changed_cb_t cb)
{
callbacks->server_changed = cb;
}
void
mongoc_apm_set_server_opening_cb (mongoc_apm_callbacks_t *callbacks,
mongoc_apm_server_opening_cb_t cb)
{
callbacks->server_opening = cb;
}
void
mongoc_apm_set_server_closed_cb (mongoc_apm_callbacks_t *callbacks,
mongoc_apm_server_closed_cb_t cb)
{
callbacks->server_closed = cb;
}
void
mongoc_apm_set_topology_changed_cb (mongoc_apm_callbacks_t *callbacks,
mongoc_apm_topology_changed_cb_t cb)
{
callbacks->topology_changed = cb;
}
void
mongoc_apm_set_topology_opening_cb (mongoc_apm_callbacks_t *callbacks,
mongoc_apm_topology_opening_cb_t cb)
{
callbacks->topology_opening = cb;
}
void
mongoc_apm_set_topology_closed_cb (mongoc_apm_callbacks_t *callbacks,
mongoc_apm_topology_closed_cb_t cb)
{
callbacks->topology_closed = cb;
}
void
mongoc_apm_set_server_heartbeat_started_cb (
mongoc_apm_callbacks_t *callbacks,
mongoc_apm_server_heartbeat_started_cb_t cb)
{
callbacks->server_heartbeat_started = cb;
}
void
mongoc_apm_set_server_heartbeat_succeeded_cb (
mongoc_apm_callbacks_t *callbacks,
mongoc_apm_server_heartbeat_succeeded_cb_t cb)
{
callbacks->server_heartbeat_succeeded = cb;
}
void
mongoc_apm_set_server_heartbeat_failed_cb (
mongoc_apm_callbacks_t *callbacks,
mongoc_apm_server_heartbeat_failed_cb_t cb)
{
callbacks->server_heartbeat_failed = cb;
}
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-apm.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-apm.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-apm.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-apm.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-array-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-array-private.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-array-private.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-array-private.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-array.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-array.c
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-array.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-array.c
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-async-cmd-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-async-cmd-private.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-async-cmd-private.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-async-cmd-private.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-async-cmd.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-async-cmd.c
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-async-cmd.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-async-cmd.c
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-async-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-async-private.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-async-private.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-async-private.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-async.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-async.c
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-async.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-async.c
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-b64-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-b64-private.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-b64-private.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-b64-private.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-b64.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-b64.c
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-b64.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-b64.c
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-buffer-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-buffer-private.h
similarity index 93%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-buffer-private.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-buffer-private.h
index 2fcfaad0..674c0ce7 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-buffer-private.h
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-buffer-private.h
@@ -1,82 +1,87 @@
/*
* Copyright 2013 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MONGOC_BUFFER_PRIVATE_H
#define MONGOC_BUFFER_PRIVATE_H
#if !defined(MONGOC_COMPILATION)
#error "Only <mongoc.h> can be included directly."
#endif
#include <bson.h>
#include "mongoc-stream.h"
BSON_BEGIN_DECLS
typedef struct _mongoc_buffer_t mongoc_buffer_t;
struct _mongoc_buffer_t {
uint8_t *data;
size_t datalen;
off_t off;
size_t len;
bson_realloc_func realloc_func;
void *realloc_data;
};
void
_mongoc_buffer_init (mongoc_buffer_t *buffer,
uint8_t *buf,
size_t buflen,
bson_realloc_func realloc_func,
void *realloc_data);
+bool
+_mongoc_buffer_append (mongoc_buffer_t *buffer,
+ const uint8_t *data,
+ size_t data_size);
+
bool
_mongoc_buffer_append_from_stream (mongoc_buffer_t *buffer,
mongoc_stream_t *stream,
size_t size,
int32_t timeout_msec,
bson_error_t *error);
ssize_t
_mongoc_buffer_try_append_from_stream (mongoc_buffer_t *buffer,
mongoc_stream_t *stream,
size_t size,
int32_t timeout_msec);
ssize_t
_mongoc_buffer_fill (mongoc_buffer_t *buffer,
mongoc_stream_t *stream,
size_t min_bytes,
int32_t timeout_msec,
bson_error_t *error);
void
_mongoc_buffer_destroy (mongoc_buffer_t *buffer);
void
_mongoc_buffer_clear (mongoc_buffer_t *buffer, bool zero);
BSON_END_DECLS
#endif /* MONGOC_BUFFER_PRIVATE_H */
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-buffer.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-buffer.c
similarity index 90%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-buffer.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-buffer.c
index c73c5ca1..4e646cfb 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-buffer.c
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-buffer.c
@@ -1,329 +1,369 @@
/*
* Copyright 2013 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <bson.h>
#include <stdarg.h>
#include "mongoc-error.h"
#include "mongoc-buffer-private.h"
#include "mongoc-trace-private.h"
#undef MONGOC_LOG_DOMAIN
#define MONGOC_LOG_DOMAIN "buffer"
#ifndef MONGOC_BUFFER_DEFAULT_SIZE
#define MONGOC_BUFFER_DEFAULT_SIZE 1024
#endif
#define SPACE_FOR(_b, _sz) \
(((ssize_t) (_b)->datalen - (ssize_t) (_b)->off - (ssize_t) (_b)->len) >= \
(ssize_t) (_sz))
/**
* _mongoc_buffer_init:
* @buffer: A mongoc_buffer_t to initialize.
* @buf: A data buffer to attach to @buffer.
* @buflen: The size of @buflen.
* @realloc_func: A function to resize @buf.
*
* Initializes @buffer for use. If additional space is needed by @buffer, then
* @realloc_func will be called to resize @buf.
*
* @buffer takes ownership of @buf and will realloc it to zero bytes when
* cleaning up the data structure.
*/
void
_mongoc_buffer_init (mongoc_buffer_t *buffer,
uint8_t *buf,
size_t buflen,
bson_realloc_func realloc_func,
void *realloc_data)
{
BSON_ASSERT (buffer);
BSON_ASSERT (buflen || !buf);
if (!realloc_func) {
realloc_func = bson_realloc_ctx;
}
if (!buflen) {
buflen = MONGOC_BUFFER_DEFAULT_SIZE;
}
if (!buf) {
buf = (uint8_t *) realloc_func (NULL, buflen, NULL);
}
memset (buffer, 0, sizeof *buffer);
buffer->data = buf;
buffer->datalen = buflen;
buffer->len = 0;
buffer->off = 0;
buffer->realloc_func = realloc_func;
buffer->realloc_data = realloc_data;
}
/**
* _mongoc_buffer_destroy:
* @buffer: A mongoc_buffer_t.
*
* Cleanup after @buffer and release any allocated resources.
*/
void
_mongoc_buffer_destroy (mongoc_buffer_t *buffer)
{
BSON_ASSERT (buffer);
if (buffer->data && buffer->realloc_func) {
buffer->realloc_func (buffer->data, 0, buffer->realloc_data);
}
memset (buffer, 0, sizeof *buffer);
}
/**
* _mongoc_buffer_clear:
* @buffer: A mongoc_buffer_t.
* @zero: If the memory should be zeroed.
*
* Clears a buffers contents and resets it to initial state. You can request
* that the memory is zeroed, which might be useful if you know the contents
* contain security related information.
*/
void
_mongoc_buffer_clear (mongoc_buffer_t *buffer, bool zero)
{
BSON_ASSERT (buffer);
if (zero) {
memset (buffer->data, 0, buffer->datalen);
}
buffer->off = 0;
buffer->len = 0;
}
+bool
+_mongoc_buffer_append (mongoc_buffer_t *buffer,
+ const uint8_t *data,
+ size_t data_size)
+{
+ uint8_t *buf;
+
+ ENTRY;
+
+ BSON_ASSERT (buffer);
+ BSON_ASSERT (data_size);
+
+ BSON_ASSERT (buffer->datalen);
+ BSON_ASSERT ((buffer->datalen + data_size) < INT_MAX);
+
+ if (!SPACE_FOR (buffer, data_size)) {
+ if (buffer->len) {
+ memmove (&buffer->data[0], &buffer->data[buffer->off], buffer->len);
+ }
+ buffer->off = 0;
+ if (!SPACE_FOR (buffer, data_size)) {
+ buffer->datalen =
+ bson_next_power_of_two (data_size + buffer->len + buffer->off);
+ buffer->data = (uint8_t *) buffer->realloc_func (
+ buffer->data, buffer->datalen, NULL);
+ }
+ }
+
+ buf = &buffer->data[buffer->off + buffer->len];
+
+ BSON_ASSERT ((buffer->off + buffer->len + data_size) <= buffer->datalen);
+
+ memcpy (buf, data, data_size);
+
+ buffer->len += data_size;
+
+ RETURN (true);
+}
+
+
/**
* mongoc_buffer_append_from_stream:
* @buffer; A mongoc_buffer_t.
* @stream: The stream to read from.
* @size: The number of bytes to read.
* @timeout_msec: The number of milliseconds to wait or -1 for the default
* @error: A location for a bson_error_t, or NULL.
*
* Reads from stream @size bytes and stores them in @buffer. This can be used
* in conjunction with reading RPCs from a stream. You read from the stream
* into this buffer and then scatter the buffer into the RPC.
*
* Returns: true if successful; otherwise false and @error is set.
*/
bool
_mongoc_buffer_append_from_stream (mongoc_buffer_t *buffer,
mongoc_stream_t *stream,
size_t size,
int32_t timeout_msec,
bson_error_t *error)
{
uint8_t *buf;
ssize_t ret;
ENTRY;
BSON_ASSERT (buffer);
BSON_ASSERT (stream);
BSON_ASSERT (size);
BSON_ASSERT (buffer->datalen);
BSON_ASSERT ((buffer->datalen + size) < INT_MAX);
if (!SPACE_FOR (buffer, size)) {
if (buffer->len) {
memmove (&buffer->data[0], &buffer->data[buffer->off], buffer->len);
}
buffer->off = 0;
if (!SPACE_FOR (buffer, size)) {
buffer->datalen =
bson_next_power_of_two (size + buffer->len + buffer->off);
buffer->data = (uint8_t *) buffer->realloc_func (
buffer->data, buffer->datalen, NULL);
}
}
buf = &buffer->data[buffer->off + buffer->len];
BSON_ASSERT ((buffer->off + buffer->len + size) <= buffer->datalen);
ret = mongoc_stream_read (stream, buf, size, size, timeout_msec);
if (ret != size) {
bson_set_error (error,
MONGOC_ERROR_STREAM,
MONGOC_ERROR_STREAM_SOCKET,
"Failed to read %" PRIu64
" bytes: socket error or timeout",
(uint64_t) size);
RETURN (false);
}
buffer->len += ret;
RETURN (true);
}
/**
* _mongoc_buffer_fill:
* @buffer: A mongoc_buffer_t.
* @stream: A stream to read from.
* @min_bytes: The minumum number of bytes to read.
* @error: A location for a bson_error_t or NULL.
*
* Attempts to fill the entire buffer, or at least @min_bytes.
*
* Returns: The number of buffered bytes, or -1 on failure.
*/
ssize_t
_mongoc_buffer_fill (mongoc_buffer_t *buffer,
mongoc_stream_t *stream,
size_t min_bytes,
int32_t timeout_msec,
bson_error_t *error)
{
ssize_t ret;
size_t avail_bytes;
ENTRY;
BSON_ASSERT (buffer);
BSON_ASSERT (stream);
BSON_ASSERT (buffer->data);
BSON_ASSERT (buffer->datalen);
if (min_bytes <= buffer->len) {
RETURN (buffer->len);
}
min_bytes -= buffer->len;
if (buffer->len) {
memmove (&buffer->data[0], &buffer->data[buffer->off], buffer->len);
}
buffer->off = 0;
if (!SPACE_FOR (buffer, min_bytes)) {
buffer->datalen = bson_next_power_of_two (buffer->len + min_bytes);
buffer->data = (uint8_t *) buffer->realloc_func (
buffer->data, buffer->datalen, buffer->realloc_data);
}
avail_bytes = buffer->datalen - buffer->len;
ret = mongoc_stream_read (stream,
&buffer->data[buffer->off + buffer->len],
avail_bytes,
min_bytes,
timeout_msec);
if (ret == -1) {
bson_set_error (error,
MONGOC_ERROR_STREAM,
MONGOC_ERROR_STREAM_SOCKET,
"Failed to buffer %u bytes",
(unsigned) min_bytes);
RETURN (-1);
}
buffer->len += ret;
if (buffer->len < min_bytes) {
bson_set_error (error,
MONGOC_ERROR_STREAM,
MONGOC_ERROR_STREAM_SOCKET,
"Could only buffer %u of %u bytes",
(unsigned) buffer->len,
(unsigned) min_bytes);
RETURN (-1);
}
RETURN (buffer->len);
}
/**
* mongoc_buffer_try_append_from_stream:
* @buffer; A mongoc_buffer_t.
* @stream: The stream to read from.
* @size: The number of bytes to read.
* @timeout_msec: The number of milliseconds to wait or -1 for the default
*
* Reads from stream @size bytes and stores them in @buffer. This can be used
* in conjunction with reading RPCs from a stream. You read from the stream
* into this buffer and then scatter the buffer into the RPC.
*
* Returns: bytes read if successful; otherwise 0 or -1.
*/
ssize_t
_mongoc_buffer_try_append_from_stream (mongoc_buffer_t *buffer,
mongoc_stream_t *stream,
size_t size,
int32_t timeout_msec)
{
uint8_t *buf;
ssize_t ret;
ENTRY;
BSON_ASSERT (buffer);
BSON_ASSERT (stream);
BSON_ASSERT (size);
BSON_ASSERT (buffer->datalen);
BSON_ASSERT ((buffer->datalen + size) < INT_MAX);
if (!SPACE_FOR (buffer, size)) {
if (buffer->len) {
memmove (&buffer->data[0], &buffer->data[buffer->off], buffer->len);
}
buffer->off = 0;
if (!SPACE_FOR (buffer, size)) {
buffer->datalen =
bson_next_power_of_two (size + buffer->len + buffer->off);
buffer->data = (uint8_t *) buffer->realloc_func (
buffer->data, buffer->datalen, NULL);
}
}
buf = &buffer->data[buffer->off + buffer->len];
BSON_ASSERT ((buffer->off + buffer->len + size) <= buffer->datalen);
ret = mongoc_stream_read (stream, buf, size, 0, timeout_msec);
if (ret > 0) {
buffer->len += ret;
}
RETURN (ret);
}
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-bulk-operation-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-bulk-operation-private.h
similarity index 97%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-bulk-operation-private.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-bulk-operation-private.h
index 5c53ae3e..cfadb616 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-bulk-operation-private.h
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-bulk-operation-private.h
@@ -1,56 +1,57 @@
/*
* Copyright 2014 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MONGOC_BULK_OPERATION_PRIVATE_H
#define MONGOC_BULK_OPERATION_PRIVATE_H
#if !defined(MONGOC_COMPILATION)
#error "Only <mongoc.h> can be included directly."
#endif
#include "mongoc-array-private.h"
#include "mongoc-client.h"
#include "mongoc-write-command-private.h"
BSON_BEGIN_DECLS
struct _mongoc_bulk_operation_t {
char *database;
char *collection;
mongoc_client_t *client;
+ mongoc_client_session_t *session;
mongoc_write_concern_t *write_concern;
mongoc_bulk_write_flags_t flags;
uint32_t server_id;
mongoc_array_t commands;
mongoc_write_result_t result;
bool executed;
int64_t operation_id;
};
mongoc_bulk_operation_t *
_mongoc_bulk_operation_new (mongoc_client_t *client,
const char *database,
const char *collection,
mongoc_bulk_write_flags_t flags,
const mongoc_write_concern_t *write_concern);
BSON_END_DECLS
#endif /* MONGOC_BULK_OPERATION_PRIVATE_H */
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-bulk-operation.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-bulk-operation.c
similarity index 93%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-bulk-operation.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-bulk-operation.c
index 195f4f2a..56dedb71 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-bulk-operation.c
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-bulk-operation.c
@@ -1,890 +1,903 @@
/*
* Copyright 2014 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "mongoc-bulk-operation.h"
#include "mongoc-bulk-operation-private.h"
#include "mongoc-client-private.h"
#include "mongoc-trace-private.h"
#include "mongoc-write-concern-private.h"
#include "mongoc-util-private.h"
/*
* This is the implementation of both write commands and bulk write commands.
* They are all implemented as one contiguous set since we'd like to cut down
* on code duplication here.
*
* This implementation is currently naive.
*
* Some interesting optimizations might be:
*
* - If unordered mode, send operations as we get them instead of waiting
* for execute() to be called. This could save us memcpy()'s too.
* - If there is no acknowledgement desired, keep a count of how many
* replies we need and ask the socket layer to skip that many bytes
* when reading.
* - Try to use iovec to send write commands with subdocuments rather than
* copying them into the write command document.
*/
mongoc_bulk_operation_t *
mongoc_bulk_operation_new (bool ordered)
{
mongoc_bulk_operation_t *bulk;
bulk = (mongoc_bulk_operation_t *) bson_malloc0 (sizeof *bulk);
bulk->flags.bypass_document_validation =
MONGOC_BYPASS_DOCUMENT_VALIDATION_DEFAULT;
bulk->flags.ordered = ordered;
bulk->server_id = 0;
_mongoc_array_init (&bulk->commands, sizeof (mongoc_write_command_t));
_mongoc_write_result_init (&bulk->result);
return bulk;
}
mongoc_bulk_operation_t *
_mongoc_bulk_operation_new (
mongoc_client_t *client, /* IN */
const char *database, /* IN */
const char *collection, /* IN */
mongoc_bulk_write_flags_t flags, /* IN */
const mongoc_write_concern_t *write_concern) /* IN */
{
mongoc_bulk_operation_t *bulk;
BSON_ASSERT (client);
BSON_ASSERT (collection);
bulk = mongoc_bulk_operation_new (flags.ordered);
bulk->client = client;
bulk->database = bson_strdup (database);
bulk->collection = bson_strdup (collection);
bulk->write_concern = mongoc_write_concern_copy (write_concern);
bulk->executed = false;
bulk->flags = flags;
bulk->operation_id = ++client->cluster.operation_id;
return bulk;
}
void
mongoc_bulk_operation_destroy (mongoc_bulk_operation_t *bulk) /* IN */
{
mongoc_write_command_t *command;
int i;
if (bulk) {
for (i = 0; i < bulk->commands.len; i++) {
command =
&_mongoc_array_index (&bulk->commands, mongoc_write_command_t, i);
_mongoc_write_command_destroy (command);
}
bson_free (bulk->database);
bson_free (bulk->collection);
mongoc_write_concern_destroy (bulk->write_concern);
_mongoc_array_destroy (&bulk->commands);
if (bulk->executed) {
_mongoc_write_result_destroy (&bulk->result);
}
bson_free (bulk);
}
}
-/* for speed, pre-split batch every 1000 docs. a future server's
- * maxWriteBatchSize may grow larger than the default, then we'll revise. */
-#define SHOULD_APPEND(_write_cmd, _write_cmd_type) \
- (((_write_cmd->type) == (_write_cmd_type)) && \
- (_write_cmd)->n_documents < MONGOC_DEFAULT_WRITE_BATCH_SIZE)
-
/* already failed, e.g. a bad call to mongoc_bulk_operation_insert? */
#define BULK_EXIT_IF_PRIOR_ERROR \
do { \
if (bulk->result.error.domain) { \
EXIT; \
} \
} while (0)
#define BULK_RETURN_IF_PRIOR_ERROR \
do { \
if (bulk->result.error.domain) { \
if (error != &bulk->result.error) { \
bson_set_error (error, \
MONGOC_ERROR_COMMAND, \
MONGOC_ERROR_COMMAND_INVALID_ARG, \
"Bulk operation is invalid from prior error: %s", \
bulk->result.error.message); \
}; \
return false; \
}; \
} while (0)
bool
_mongoc_bulk_operation_remove_with_opts (mongoc_bulk_operation_t *bulk,
const bson_t *selector,
const bson_t *opts,
bson_error_t *error) /* OUT */
{
mongoc_write_command_t command = {0};
mongoc_write_command_t *last;
ENTRY;
BSON_ASSERT (bulk);
BSON_ASSERT (selector);
BULK_RETURN_IF_PRIOR_ERROR;
if (bulk->commands.len) {
last = &_mongoc_array_index (
&bulk->commands, mongoc_write_command_t, bulk->commands.len - 1);
- if (SHOULD_APPEND (last, MONGOC_WRITE_COMMAND_DELETE)) {
+ if (last->type == MONGOC_WRITE_COMMAND_DELETE) {
_mongoc_write_command_delete_append (last, selector, opts);
RETURN (true);
}
}
_mongoc_write_command_init_delete (
- &command, selector, opts, bulk->flags, bulk->operation_id);
+ &command, selector, NULL, opts, bulk->flags, bulk->operation_id);
_mongoc_array_append_val (&bulk->commands, command);
RETURN (true);
}
bool
mongoc_bulk_operation_remove_one_with_opts (mongoc_bulk_operation_t *bulk,
const bson_t *selector,
const bson_t *opts,
bson_error_t *error) /* OUT */
{
bool retval;
bson_t opts_dup;
bson_iter_t iter;
ENTRY;
BULK_RETURN_IF_PRIOR_ERROR;
if (opts && bson_iter_init_find (&iter, opts, "limit")) {
if ((!BSON_ITER_HOLDS_INT (&iter)) || !bson_iter_as_int64 (&iter)) {
bson_set_error (error,
MONGOC_ERROR_COMMAND,
MONGOC_ERROR_COMMAND_INVALID_ARG,
"%s expects the 'limit' option to be 1",
BSON_FUNC);
RETURN (false);
}
return _mongoc_bulk_operation_remove_with_opts (
bulk, selector, opts, error);
}
bson_init (&opts_dup);
BSON_APPEND_INT32 (&opts_dup, "limit", 1);
if (opts) {
bson_concat (&opts_dup, opts);
}
retval = _mongoc_bulk_operation_remove_with_opts (
bulk, selector, &opts_dup, error);
bson_destroy (&opts_dup);
RETURN (retval);
}
bool
mongoc_bulk_operation_remove_many_with_opts (mongoc_bulk_operation_t *bulk,
const bson_t *selector,
const bson_t *opts,
bson_error_t *error) /* OUT */
{
bool retval;
bson_t opts_dup;
bson_iter_t iter;
ENTRY;
BULK_RETURN_IF_PRIOR_ERROR;
if (opts && bson_iter_init_find (&iter, opts, "limit")) {
if ((!BSON_ITER_HOLDS_INT (&iter)) || bson_iter_as_int64 (&iter)) {
bson_set_error (error,
MONGOC_ERROR_COMMAND,
MONGOC_ERROR_COMMAND_INVALID_ARG,
"%s expects the 'limit' option to be 0",
BSON_FUNC);
RETURN (false);
}
RETURN (
_mongoc_bulk_operation_remove_with_opts (bulk, selector, opts, error));
}
bson_init (&opts_dup);
BSON_APPEND_INT32 (&opts_dup, "limit", 0);
if (opts) {
bson_concat (&opts_dup, opts);
}
retval = _mongoc_bulk_operation_remove_with_opts (
bulk, selector, &opts_dup, error);
bson_destroy (&opts_dup);
RETURN (retval);
}
void
mongoc_bulk_operation_remove (mongoc_bulk_operation_t *bulk, /* IN */
const bson_t *selector) /* IN */
{
bson_t opts;
bson_error_t *error = &bulk->result.error;
ENTRY;
BULK_EXIT_IF_PRIOR_ERROR;
bson_init (&opts);
BSON_APPEND_INT32 (&opts, "limit", 0);
mongoc_bulk_operation_remove_many_with_opts (bulk, selector, &opts, error);
bson_destroy (&opts);
if (error->domain) {
MONGOC_WARNING ("%s", error->message);
}
EXIT;
}
void
mongoc_bulk_operation_remove_one (mongoc_bulk_operation_t *bulk, /* IN */
const bson_t *selector) /* IN */
{
bson_t opts;
bson_error_t *error = &bulk->result.error;
ENTRY;
BULK_EXIT_IF_PRIOR_ERROR;
bson_init (&opts);
BSON_APPEND_INT32 (&opts, "limit", 1);
mongoc_bulk_operation_remove_one_with_opts (bulk, selector, &opts, error);
bson_destroy (&opts);
if (error->domain) {
MONGOC_WARNING ("%s", error->message);
}
EXIT;
}
void
mongoc_bulk_operation_delete (mongoc_bulk_operation_t *bulk,
const bson_t *selector)
{
ENTRY;
mongoc_bulk_operation_remove (bulk, selector);
EXIT;
}
void
mongoc_bulk_operation_delete_one (mongoc_bulk_operation_t *bulk,
const bson_t *selector)
{
ENTRY;
mongoc_bulk_operation_remove_one (bulk, selector);
EXIT;
}
void
mongoc_bulk_operation_insert (mongoc_bulk_operation_t *bulk,
const bson_t *document)
{
ENTRY;
BSON_ASSERT (bulk);
BSON_ASSERT (document);
if (!mongoc_bulk_operation_insert_with_opts (
bulk, document, NULL /* opts */, &bulk->result.error)) {
MONGOC_WARNING ("%s", bulk->result.error.message);
}
EXIT;
}
bool
mongoc_bulk_operation_insert_with_opts (mongoc_bulk_operation_t *bulk,
const bson_t *document,
const bson_t *opts,
bson_error_t *error)
{
mongoc_write_command_t command = {0};
mongoc_write_command_t *last;
- bson_iter_t iter;
ENTRY;
BSON_ASSERT (bulk);
BSON_ASSERT (document);
BULK_RETURN_IF_PRIOR_ERROR;
- if (opts && bson_iter_init_find_case (&iter, opts, "legacyIndex") &&
- bson_iter_as_bool (&iter)) {
- if (!_mongoc_validate_legacy_index (document, error)) {
- return false;
- }
- } else if (!_mongoc_validate_new_document (document, error)) {
+ if (!_mongoc_validate_new_document (document, error)) {
return false;
}
if (bulk->commands.len) {
last = &_mongoc_array_index (
&bulk->commands, mongoc_write_command_t, bulk->commands.len - 1);
- if (SHOULD_APPEND (last, MONGOC_WRITE_COMMAND_INSERT)) {
+ if (last->type == MONGOC_WRITE_COMMAND_INSERT) {
_mongoc_write_command_insert_append (last, document);
return true;
}
}
_mongoc_write_command_init_insert (
&command,
document,
+ opts,
bulk->flags,
bulk->operation_id,
!mongoc_write_concern_is_acknowledged (bulk->write_concern));
_mongoc_array_append_val (&bulk->commands, command);
return true;
}
bool
_mongoc_bulk_operation_replace_one_with_opts (mongoc_bulk_operation_t *bulk,
const bson_t *selector,
const bson_t *document,
const bson_t *opts,
bson_error_t *error) /* OUT */
{
mongoc_write_command_t command = {0};
mongoc_write_command_t *last;
ENTRY;
BULK_RETURN_IF_PRIOR_ERROR;
BSON_ASSERT (bulk);
BSON_ASSERT (selector);
BSON_ASSERT (document);
if (!_mongoc_validate_replace (document, error)) {
RETURN (false);
}
if (bulk->commands.len) {
last = &_mongoc_array_index (
&bulk->commands, mongoc_write_command_t, bulk->commands.len - 1);
- if (SHOULD_APPEND (last, MONGOC_WRITE_COMMAND_UPDATE)) {
+ if (last->type == MONGOC_WRITE_COMMAND_UPDATE) {
_mongoc_write_command_update_append (last, selector, document, opts);
RETURN (true);
}
}
_mongoc_write_command_init_update (
&command, selector, document, opts, bulk->flags, bulk->operation_id);
_mongoc_array_append_val (&bulk->commands, command);
RETURN (true);
}
void
mongoc_bulk_operation_replace_one (mongoc_bulk_operation_t *bulk,
const bson_t *selector,
const bson_t *document,
bool upsert)
{
bson_t opts;
bson_error_t *error = &bulk->result.error;
ENTRY;
bson_init (&opts);
BSON_APPEND_BOOL (&opts, "upsert", upsert);
BSON_APPEND_BOOL (&opts, "multi", false);
_mongoc_bulk_operation_replace_one_with_opts (
bulk, selector, document, &opts, error);
bson_destroy (&opts);
if (error->domain) {
MONGOC_WARNING ("%s", error->message);
}
EXIT;
}
bool
mongoc_bulk_operation_replace_one_with_opts (mongoc_bulk_operation_t *bulk,
const bson_t *selector,
const bson_t *document,
const bson_t *opts,
bson_error_t *error) /* OUT */
{
bson_iter_t iter;
bson_t opts_dup;
bool retval;
ENTRY;
BSON_ASSERT (bulk);
BSON_ASSERT (selector);
BSON_ASSERT (document);
if (opts && bson_iter_init_find (&iter, opts, "multi")) {
if (!BSON_ITER_HOLDS_BOOL (&iter) || bson_iter_bool (&iter)) {
bson_set_error (error,
MONGOC_ERROR_COMMAND,
MONGOC_ERROR_COMMAND_INVALID_ARG,
"%s expects the 'multi' option to be false",
BSON_FUNC);
RETURN (false);
}
retval = _mongoc_bulk_operation_replace_one_with_opts (
bulk, selector, document, opts, error);
} else {
bson_init (&opts_dup);
BSON_APPEND_BOOL (&opts_dup, "multi", false);
if (opts) {
bson_concat (&opts_dup, opts);
}
retval = _mongoc_bulk_operation_replace_one_with_opts (
bulk, selector, document, &opts_dup, error);
bson_destroy (&opts_dup);
}
RETURN (retval);
}
bool
_mongoc_bulk_operation_update_with_opts (mongoc_bulk_operation_t *bulk,
const bson_t *selector,
const bson_t *document,
const bson_t *opts,
bson_error_t *error) /* OUT */
{
mongoc_write_command_t command = {0};
mongoc_write_command_t *last;
ENTRY;
BSON_ASSERT (bulk);
BSON_ASSERT (selector);
BSON_ASSERT (document);
BULK_RETURN_IF_PRIOR_ERROR;
if (!_mongoc_validate_update (document, error)) {
RETURN (false);
}
if (bulk->commands.len) {
last = &_mongoc_array_index (
&bulk->commands, mongoc_write_command_t, bulk->commands.len - 1);
- if (SHOULD_APPEND (last, MONGOC_WRITE_COMMAND_UPDATE)) {
+ if (last->type == MONGOC_WRITE_COMMAND_UPDATE) {
_mongoc_write_command_update_append (last, selector, document, opts);
RETURN (true);
}
}
_mongoc_write_command_init_update (
&command, selector, document, opts, bulk->flags, bulk->operation_id);
_mongoc_array_append_val (&bulk->commands, command);
RETURN (true);
}
bool
mongoc_bulk_operation_update_one_with_opts (mongoc_bulk_operation_t *bulk,
const bson_t *selector,
const bson_t *document,
const bson_t *opts,
bson_error_t *error) /* OUT */
{
bool retval;
bson_t opts_dup;
bson_iter_t iter;
ENTRY;
if (opts && bson_iter_init_find (&iter, opts, "multi")) {
if (!BSON_ITER_HOLDS_BOOL (&iter) || bson_iter_bool (&iter)) {
bson_set_error (error,
MONGOC_ERROR_COMMAND,
MONGOC_ERROR_COMMAND_INVALID_ARG,
"%s expects the 'multi' option to be false",
BSON_FUNC);
RETURN (false);
}
RETURN (_mongoc_bulk_operation_update_with_opts (
bulk, selector, document, opts, error));
}
bson_init (&opts_dup);
BSON_APPEND_BOOL (&opts_dup, "multi", false);
if (opts) {
bson_concat (&opts_dup, opts);
}
retval = _mongoc_bulk_operation_update_with_opts (
bulk, selector, document, &opts_dup, error);
bson_destroy (&opts_dup);
RETURN (retval);
}
bool
mongoc_bulk_operation_update_many_with_opts (mongoc_bulk_operation_t *bulk,
const bson_t *selector,
const bson_t *document,
const bson_t *opts,
bson_error_t *error) /* OUT */
{
bool retval;
bson_t opts_dup;
bson_iter_t iter;
ENTRY;
if (opts && bson_iter_init_find (&iter, opts, "multi")) {
if (!BSON_ITER_HOLDS_BOOL (&iter) || !bson_iter_bool (&iter)) {
bson_set_error (error,
MONGOC_ERROR_COMMAND,
MONGOC_ERROR_COMMAND_INVALID_ARG,
"%s expects the 'multi' option to be true",
BSON_FUNC);
RETURN (false);
}
return _mongoc_bulk_operation_update_with_opts (
bulk, selector, document, opts, error);
}
bson_init (&opts_dup);
BSON_APPEND_BOOL (&opts_dup, "multi", true);
if (opts) {
bson_concat (&opts_dup, opts);
}
retval = _mongoc_bulk_operation_update_with_opts (
bulk, selector, document, &opts_dup, error);
bson_destroy (&opts_dup);
RETURN (retval);
}
void
mongoc_bulk_operation_update (mongoc_bulk_operation_t *bulk,
const bson_t *selector,
const bson_t *document,
bool upsert)
{
bson_t opts;
bson_error_t *error = &bulk->result.error;
ENTRY;
BULK_EXIT_IF_PRIOR_ERROR;
bson_init (&opts);
BSON_APPEND_BOOL (&opts, "upsert", upsert);
BSON_APPEND_BOOL (&opts, "multi", true);
_mongoc_bulk_operation_update_with_opts (
bulk, selector, document, &opts, error);
bson_destroy (&opts);
if (error->domain) {
MONGOC_WARNING ("%s", error->message);
}
EXIT;
}
void
mongoc_bulk_operation_update_one (mongoc_bulk_operation_t *bulk,
const bson_t *selector,
const bson_t *document,
bool upsert)
{
bson_t opts;
bson_error_t *error = &bulk->result.error;
ENTRY;
BULK_EXIT_IF_PRIOR_ERROR;
bson_init (&opts);
BSON_APPEND_BOOL (&opts, "upsert", upsert);
BSON_APPEND_BOOL (&opts, "multi", false);
_mongoc_bulk_operation_update_with_opts (
bulk, selector, document, &opts, error);
bson_destroy (&opts);
if (error->domain) {
MONGOC_WARNING ("%s", error->message);
}
EXIT;
}
uint32_t
mongoc_bulk_operation_execute (mongoc_bulk_operation_t *bulk, /* IN */
bson_t *reply, /* OUT */
bson_error_t *error) /* OUT */
{
mongoc_cluster_t *cluster;
mongoc_write_command_t *command;
mongoc_server_stream_t *server_stream;
bool ret;
uint32_t offset = 0;
int i;
ENTRY;
BSON_ASSERT (bulk);
+ if (reply) {
+ bson_init (reply);
+ }
+
if (!bulk->client) {
bson_set_error (error,
MONGOC_ERROR_COMMAND,
MONGOC_ERROR_COMMAND_INVALID_ARG,
"mongoc_bulk_operation_execute() requires a client "
"and one has not been set.");
RETURN (false);
}
cluster = &bulk->client->cluster;
if (bulk->executed) {
_mongoc_write_result_destroy (&bulk->result);
}
bulk->executed = true;
- if (reply) {
- bson_init (reply);
- }
-
if (!bulk->database) {
bson_set_error (error,
MONGOC_ERROR_COMMAND,
MONGOC_ERROR_COMMAND_INVALID_ARG,
"mongoc_bulk_operation_execute() requires a database "
"and one has not been set.");
RETURN (false);
} else if (!bulk->collection) {
bson_set_error (error,
MONGOC_ERROR_COMMAND,
MONGOC_ERROR_COMMAND_INVALID_ARG,
"mongoc_bulk_operation_execute() requires a collection "
"and one has not been set.");
RETURN (false);
}
/* error stored by functions like mongoc_bulk_operation_insert that
* can't report errors immediately */
if (bulk->result.error.domain) {
if (error) {
memcpy (error, &bulk->result.error, sizeof (bson_error_t));
}
RETURN (false);
}
if (!bulk->commands.len) {
bson_set_error (error,
MONGOC_ERROR_COMMAND,
MONGOC_ERROR_COMMAND_INVALID_ARG,
"Cannot do an empty bulk write");
RETURN (false);
}
- if (bulk->server_id) {
- server_stream = mongoc_cluster_stream_for_server (
- cluster, bulk->server_id, true /* reconnect_ok */, error);
- } else {
- server_stream = mongoc_cluster_stream_for_writes (cluster, error);
- }
+ for (i = 0; i < bulk->commands.len; i++) {
+ if (bulk->server_id) {
+ server_stream = mongoc_cluster_stream_for_server (
+ cluster, bulk->server_id, true /* reconnect_ok */, error);
+ } else {
+ server_stream = mongoc_cluster_stream_for_writes (cluster, error);
+ }
- if (!server_stream) {
- RETURN (false);
- }
+ if (!server_stream) {
+ RETURN (false);
+ }
- for (i = 0; i < bulk->commands.len; i++) {
command =
&_mongoc_array_index (&bulk->commands, mongoc_write_command_t, i);
_mongoc_write_command_execute (command,
bulk->client,
server_stream,
bulk->database,
bulk->collection,
bulk->write_concern,
offset,
+ bulk->session,
&bulk->result);
bulk->server_id = server_stream->sd->id;
if (bulk->result.failed &&
(bulk->flags.ordered || bulk->result.must_stop)) {
+ mongoc_server_stream_cleanup (server_stream);
GOTO (cleanup);
}
offset += command->n_documents;
+ mongoc_server_stream_cleanup (server_stream);
}
cleanup:
- ret = _mongoc_write_result_complete (&bulk->result,
- bulk->client->error_api_version,
- bulk->write_concern,
- MONGOC_ERROR_COMMAND /* err domain */,
- reply,
- error);
- mongoc_server_stream_cleanup (server_stream);
+ ret = MONGOC_WRITE_RESULT_COMPLETE (&bulk->result,
+ bulk->client->error_api_version,
+ bulk->write_concern,
+ MONGOC_ERROR_COMMAND /* err domain */,
+ reply,
+ error);
RETURN (ret ? bulk->server_id : 0);
}
void
mongoc_bulk_operation_set_write_concern (
mongoc_bulk_operation_t *bulk, const mongoc_write_concern_t *write_concern)
{
BSON_ASSERT (bulk);
if (bulk->write_concern) {
mongoc_write_concern_destroy (bulk->write_concern);
}
if (write_concern) {
bulk->write_concern = mongoc_write_concern_copy (write_concern);
} else {
bulk->write_concern = mongoc_write_concern_new ();
}
}
const mongoc_write_concern_t *
mongoc_bulk_operation_get_write_concern (const mongoc_bulk_operation_t *bulk)
{
BSON_ASSERT (bulk);
return bulk->write_concern;
}
void
mongoc_bulk_operation_set_database (mongoc_bulk_operation_t *bulk,
const char *database)
{
BSON_ASSERT (bulk);
if (bulk->database) {
bson_free (bulk->database);
}
bulk->database = bson_strdup (database);
}
void
mongoc_bulk_operation_set_collection (mongoc_bulk_operation_t *bulk,
const char *collection)
{
BSON_ASSERT (bulk);
if (bulk->collection) {
bson_free (bulk->collection);
}
bulk->collection = bson_strdup (collection);
}
void
mongoc_bulk_operation_set_client (mongoc_bulk_operation_t *bulk, void *client)
{
BSON_ASSERT (bulk);
+ BSON_ASSERT (client);
+
+ if (bulk->session) {
+ BSON_ASSERT (bulk->session->client == client);
+ }
bulk->client = (mongoc_client_t *) client;
/* if you call set_client, bulk was likely made by mongoc_bulk_operation_new,
- * not mongoc_collection_create_bulk_operation(), so operation_id is 0. */
+ * not mongoc_collection_create_bulk_operation_with_opts(), so operation_id
+ * is 0. */
if (!bulk->operation_id) {
bulk->operation_id = ++bulk->client->cluster.operation_id;
}
}
+void
+mongoc_bulk_operation_set_client_session (
+ mongoc_bulk_operation_t *bulk,
+ struct _mongoc_client_session_t *client_session)
+{
+ BSON_ASSERT (bulk);
+ BSON_ASSERT (client_session);
+
+ if (bulk->client) {
+ BSON_ASSERT (bulk->client == client_session->client);
+ }
+
+ bulk->session = client_session;
+}
+
+
uint32_t
mongoc_bulk_operation_get_hint (const mongoc_bulk_operation_t *bulk)
{
BSON_ASSERT (bulk);
return bulk->server_id;
}
void
mongoc_bulk_operation_set_hint (mongoc_bulk_operation_t *bulk,
uint32_t server_id)
{
BSON_ASSERT (bulk);
bulk->server_id = server_id;
}
void
mongoc_bulk_operation_set_bypass_document_validation (
mongoc_bulk_operation_t *bulk, bool bypass)
{
BSON_ASSERT (bulk);
bulk->flags.bypass_document_validation =
bypass ? MONGOC_BYPASS_DOCUMENT_VALIDATION_TRUE
: MONGOC_BYPASS_DOCUMENT_VALIDATION_FALSE;
}
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-bulk-operation.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-bulk-operation.h
similarity index 96%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-bulk-operation.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-bulk-operation.h
index 66d3945a..a2a302e6 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-bulk-operation.h
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-bulk-operation.h
@@ -1,145 +1,151 @@
/*
* Copyright 2014 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MONGOC_BULK_OPERATION_H
#define MONGOC_BULK_OPERATION_H
#include <bson.h>
#include "mongoc-macros.h"
#include "mongoc-write-concern.h"
#define MONGOC_BULK_WRITE_FLAGS_INIT \
{ \
true, MONGOC_BYPASS_DOCUMENT_VALIDATION_DEFAULT, 0 \
}
BSON_BEGIN_DECLS
+/* forward decl */
+struct _mongoc_client_session_t;
typedef struct _mongoc_bulk_operation_t mongoc_bulk_operation_t;
typedef struct _mongoc_bulk_write_flags_t mongoc_bulk_write_flags_t;
MONGOC_EXPORT (void)
mongoc_bulk_operation_destroy (mongoc_bulk_operation_t *bulk);
MONGOC_EXPORT (uint32_t)
mongoc_bulk_operation_execute (mongoc_bulk_operation_t *bulk,
bson_t *reply,
bson_error_t *error);
MONGOC_EXPORT (void)
mongoc_bulk_operation_delete (mongoc_bulk_operation_t *bulk,
const bson_t *selector)
BSON_GNUC_DEPRECATED_FOR (mongoc_bulk_operation_remove);
MONGOC_EXPORT (void)
mongoc_bulk_operation_delete_one (mongoc_bulk_operation_t *bulk,
const bson_t *selector)
BSON_GNUC_DEPRECATED_FOR (mongoc_bulk_operation_remove_one);
MONGOC_EXPORT (void)
mongoc_bulk_operation_insert (mongoc_bulk_operation_t *bulk,
const bson_t *document);
MONGOC_EXPORT (bool)
mongoc_bulk_operation_insert_with_opts (mongoc_bulk_operation_t *bulk,
const bson_t *document,
const bson_t *opts,
bson_error_t *error); /* OUT */
MONGOC_EXPORT (void)
mongoc_bulk_operation_remove (mongoc_bulk_operation_t *bulk,
const bson_t *selector);
MONGOC_EXPORT (bool)
mongoc_bulk_operation_remove_many_with_opts (mongoc_bulk_operation_t *bulk,
const bson_t *selector,
const bson_t *opts,
bson_error_t *error); /* OUT */
MONGOC_EXPORT (void)
mongoc_bulk_operation_remove_one (mongoc_bulk_operation_t *bulk,
const bson_t *selector);
MONGOC_EXPORT (bool)
mongoc_bulk_operation_remove_one_with_opts (mongoc_bulk_operation_t *bulk,
const bson_t *selector,
const bson_t *opts,
bson_error_t *error); /* OUT */
MONGOC_EXPORT (void)
mongoc_bulk_operation_replace_one (mongoc_bulk_operation_t *bulk,
const bson_t *selector,
const bson_t *document,
bool upsert);
MONGOC_EXPORT (bool)
mongoc_bulk_operation_replace_one_with_opts (mongoc_bulk_operation_t *bulk,
const bson_t *selector,
const bson_t *document,
const bson_t *opts,
bson_error_t *error); /* OUT */
MONGOC_EXPORT (void)
mongoc_bulk_operation_update (mongoc_bulk_operation_t *bulk,
const bson_t *selector,
const bson_t *document,
bool upsert);
MONGOC_EXPORT (bool)
mongoc_bulk_operation_update_many_with_opts (mongoc_bulk_operation_t *bulk,
const bson_t *selector,
const bson_t *document,
const bson_t *opts,
bson_error_t *error); /* OUT */
MONGOC_EXPORT (void)
mongoc_bulk_operation_update_one (mongoc_bulk_operation_t *bulk,
const bson_t *selector,
const bson_t *document,
bool upsert);
MONGOC_EXPORT (bool)
mongoc_bulk_operation_update_one_with_opts (mongoc_bulk_operation_t *bulk,
const bson_t *selector,
const bson_t *document,
const bson_t *opts,
bson_error_t *error); /* OUT */
MONGOC_EXPORT (void)
mongoc_bulk_operation_set_bypass_document_validation (
mongoc_bulk_operation_t *bulk, bool bypass);
/*
* The following functions are really only useful by language bindings and
* those wanting to replay a bulk operation to a number of clients or
* collections.
*/
MONGOC_EXPORT (mongoc_bulk_operation_t *)
mongoc_bulk_operation_new (bool ordered);
MONGOC_EXPORT (void)
mongoc_bulk_operation_set_write_concern (
mongoc_bulk_operation_t *bulk, const mongoc_write_concern_t *write_concern);
MONGOC_EXPORT (void)
mongoc_bulk_operation_set_database (mongoc_bulk_operation_t *bulk,
const char *database);
MONGOC_EXPORT (void)
mongoc_bulk_operation_set_collection (mongoc_bulk_operation_t *bulk,
const char *collection);
MONGOC_EXPORT (void)
mongoc_bulk_operation_set_client (mongoc_bulk_operation_t *bulk, void *client);
+MONGOC_EXPORT (void)
+mongoc_bulk_operation_set_client_session (
+ mongoc_bulk_operation_t *bulk,
+ struct _mongoc_client_session_t *client_session);
/* These names include the term "hint" for backward compatibility, should be
* mongoc_bulk_operation_get_server_id, mongoc_bulk_operation_set_server_id. */
MONGOC_EXPORT (void)
mongoc_bulk_operation_set_hint (mongoc_bulk_operation_t *bulk,
uint32_t server_id);
MONGOC_EXPORT (uint32_t)
mongoc_bulk_operation_get_hint (const mongoc_bulk_operation_t *bulk);
MONGOC_EXPORT (const mongoc_write_concern_t *)
mongoc_bulk_operation_get_write_concern (const mongoc_bulk_operation_t *bulk);
BSON_END_DECLS
#endif /* MONGOC_BULK_OPERATION_H */
diff --git a/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-change-stream-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-change-stream-private.h
new file mode 100644
index 00000000..cdbec081
--- /dev/null
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-change-stream-private.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2017-present MongoDB, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MONGOC_CHANGE_STREAM_PRIVATE_H
+#define MONGOC_CHANGE_STREAM_PRIVATE_H
+
+#include "mongoc-change-stream.h"
+#include "mongoc-collection.h"
+#include "mongoc-cursor.h"
+
+struct _mongoc_change_stream_t {
+ bson_t pipeline_to_append;
+ bson_t full_document;
+ bson_t opts;
+ bson_t resume_token; /* empty, or has resumeAfter: doc */
+
+ bson_error_t err;
+ bson_t err_doc;
+
+ mongoc_cursor_t *cursor;
+ mongoc_collection_t *coll;
+ int64_t max_await_time_ms;
+ int32_t batch_size;
+};
+
+mongoc_change_stream_t *
+_mongoc_change_stream_new (const mongoc_collection_t *coll,
+ const bson_t *pipeline,
+ const bson_t *opts);
+
+#endif /* MONGOC_CHANGE_STREAM_PRIVATE_H */
diff --git a/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-change-stream.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-change-stream.c
new file mode 100644
index 00000000..790f4f51
--- /dev/null
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-change-stream.c
@@ -0,0 +1,378 @@
+/*
+ * Copyright 2017-present MongoDB, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <bson.h>
+#include "mongoc-change-stream-private.h"
+#include "mongoc-error.h"
+#include "mongoc-cursor-private.h"
+#include "mongoc-collection-private.h"
+#include "mongoc-client-session-private.h"
+
+#define CHANGE_STREAM_ERR(_str) \
+ bson_set_error (&stream->err, \
+ MONGOC_ERROR_CURSOR, \
+ MONGOC_ERROR_BSON, \
+ "Could not set " _str);
+
+
+#define SET_BSON_OR_ERR(_dst, _str) \
+ do { \
+ if (!BSON_APPEND_VALUE (_dst, _str, bson_iter_value (&iter))) { \
+ CHANGE_STREAM_ERR (_str); \
+ } \
+ } while (0);
+
+static void
+_mongoc_change_stream_make_cursor (mongoc_change_stream_t *stream)
+{
+ mongoc_client_session_t *cs = NULL;
+ bson_t change_stream_stage; /* { $changeStream: <change_stream_doc> } */
+ bson_t change_stream_doc;
+ bson_t pipeline;
+ bson_t cursor_doc;
+ bson_t command_opts;
+ bson_t command; /* { aggregate: "coll", pipeline: [], ... } */
+ bson_t reply;
+ bson_iter_t iter;
+ bson_error_t err = {0};
+ mongoc_server_description_t *sd;
+ uint32_t server_id;
+
+ BSON_ASSERT (stream);
+
+ /* Construct the aggregate command */
+ /* { aggregate: collname, pipeline: [], cursor: { batchSize: x } } */
+ bson_init (&command);
+ bson_append_utf8 (&command,
+ "aggregate",
+ 9,
+ stream->coll->collection,
+ stream->coll->collectionlen);
+ bson_append_array_begin (&command, "pipeline", 8, &pipeline);
+
+ /* Append the $changeStream stage */
+ bson_append_document_begin (&pipeline, "0", 1, &change_stream_stage);
+ bson_append_document_begin (
+ &change_stream_stage, "$changeStream", 13, &change_stream_doc);
+ bson_concat (&change_stream_doc, &stream->full_document);
+ if (!bson_empty (&stream->resume_token)) {
+ bson_concat (&change_stream_doc, &stream->resume_token);
+ }
+ bson_append_document_end (&change_stream_stage, &change_stream_doc);
+ bson_append_document_end (&pipeline, &change_stream_stage);
+
+ /* Append user pipeline if it exists */
+ if (bson_iter_init_find (&iter, &stream->pipeline_to_append, "pipeline") &&
+ BSON_ITER_HOLDS_ARRAY (&iter)) {
+ bson_iter_t child_iter;
+ uint32_t key_int = 1;
+ char buf[16];
+ const char *key_str;
+
+ bson_iter_recurse (&iter, &child_iter);
+ while (bson_iter_next (&child_iter)) {
+ if (BSON_ITER_HOLDS_DOCUMENT (&child_iter)) {
+ size_t keyLen =
+ bson_uint32_to_string (key_int, &key_str, buf, sizeof (buf));
+ bson_append_value (
+ &pipeline, key_str, keyLen, bson_iter_value (&child_iter));
+ ++key_int;
+ }
+ }
+ }
+
+ bson_append_array_end (&command, &pipeline);
+
+ /* Add batch size if needed */
+ bson_append_document_begin (&command, "cursor", 6, &cursor_doc);
+ if (stream->batch_size > 0) {
+ bson_append_int32 (&cursor_doc, "batchSize", 9, stream->batch_size);
+ }
+ bson_append_document_end (&command, &cursor_doc);
+
+ bson_copy_to (&stream->opts, &command_opts);
+
+ sd = mongoc_client_select_server (stream->coll->client,
+ false /* for_writes */,
+ stream->coll->read_prefs,
+ &err);
+
+ if (!sd) {
+ stream->err = err;
+ goto cleanup;
+ }
+
+ if (bson_iter_init_find (&iter, &command_opts, "sessionId")) {
+ if (!_mongoc_client_session_from_iter (
+ stream->coll->client, &iter, &cs, &stream->err)) {
+ goto cleanup;
+ }
+ }
+
+ server_id = mongoc_server_description_id (sd);
+ bson_append_int32 (&command_opts, "serverId", 8, server_id);
+
+ /* use inherited read preference and read concern of the collection */
+ if (!mongoc_collection_read_command_with_opts (
+ stream->coll, &command, NULL, &command_opts, &reply, &err)) {
+ bson_destroy (&stream->err_doc);
+ bson_copy_to (&reply, &stream->err_doc);
+ bson_destroy (&reply);
+ stream->err = err;
+ goto cleanup;
+ }
+
+ stream->cursor = mongoc_cursor_new_from_command_reply (
+ stream->coll->client, &reply, server_id); /* steals reply */
+
+ if (cs) {
+ stream->cursor->client_session = cs;
+ stream->cursor->explicit_session = 1;
+ }
+
+ /* maxTimeMS is only appended to getMores if these are set in cursor opts */
+ bson_append_bool (&stream->cursor->opts,
+ MONGOC_CURSOR_TAILABLE,
+ MONGOC_CURSOR_TAILABLE_LEN,
+ true);
+ bson_append_bool (&stream->cursor->opts,
+ MONGOC_CURSOR_AWAIT_DATA,
+ MONGOC_CURSOR_AWAIT_DATA_LEN,
+ true);
+
+ if (stream->max_await_time_ms > 0) {
+ BSON_ASSERT (
+ _mongoc_cursor_set_opt_int64 (stream->cursor,
+ MONGOC_CURSOR_MAX_AWAIT_TIME_MS,
+ stream->max_await_time_ms));
+ }
+
+ if (stream->batch_size > 0) {
+ mongoc_cursor_set_batch_size (stream->cursor, stream->batch_size);
+ }
+
+cleanup:
+ bson_destroy (&command);
+ bson_destroy (&command_opts);
+ mongoc_server_description_destroy (sd);
+}
+
+mongoc_change_stream_t *
+_mongoc_change_stream_new (const mongoc_collection_t *coll,
+ const bson_t *pipeline,
+ const bson_t *opts)
+{
+ bool full_doc_set = false;
+ mongoc_change_stream_t *stream =
+ (mongoc_change_stream_t *) bson_malloc (sizeof (mongoc_change_stream_t));
+
+ BSON_ASSERT (coll);
+ BSON_ASSERT (pipeline);
+
+ stream->max_await_time_ms = -1;
+ stream->batch_size = -1;
+ stream->coll = mongoc_collection_copy ((mongoc_collection_t *) coll);
+ bson_init (&stream->pipeline_to_append);
+ bson_init (&stream->full_document);
+ bson_init (&stream->opts);
+ bson_init (&stream->resume_token);
+ bson_init (&stream->err_doc);
+ memset (&stream->err, 0, sizeof (bson_error_t));
+ stream->cursor = NULL;
+
+ /*
+ * The passed options may consist of:
+ * fullDocument: 'default'|'updateLookup', passed to $changeStream stage
+ * resumeAfter: optional<Doc>, passed to $changeStream stage
+ * maxAwaitTimeMS: Optional<Int64>, set on the cursor
+ * batchSize: Optional<Int32>, passed as agg option, {cursor: { batchSize: }}
+ * standard command options like "sessionId", "maxTimeMS", or "collation"
+ */
+
+ if (opts) {
+ bson_iter_t iter;
+
+ if (bson_iter_init_find (&iter, opts, "fullDocument")) {
+ SET_BSON_OR_ERR (&stream->full_document, "fullDocument");
+ full_doc_set = true;
+ }
+
+ if (bson_iter_init_find (&iter, opts, "resumeAfter")) {
+ SET_BSON_OR_ERR (&stream->resume_token, "resumeAfter");
+ }
+
+ if (bson_iter_init_find (&iter, opts, "batchSize")) {
+ if (BSON_ITER_HOLDS_INT32 (&iter)) {
+ stream->batch_size = bson_iter_int32 (&iter);
+ }
+ }
+
+ if (bson_iter_init_find (&iter, opts, "maxAwaitTimeMS") &&
+ BSON_ITER_HOLDS_INT (&iter)) {
+ stream->max_await_time_ms = bson_iter_as_int64 (&iter);
+ }
+
+ /* save the remaining opts for mongoc_collection_read_command_with_opts */
+ bson_copy_to_excluding_noinit (opts,
+ &stream->opts,
+ "fullDocument",
+ "resumeAfter",
+ "batchSize",
+ "maxAwaitTimeMS",
+ NULL);
+ }
+
+ if (!full_doc_set) {
+ if (!BSON_APPEND_UTF8 (
+ &stream->full_document, "fullDocument", "default")) {
+ CHANGE_STREAM_ERR ("fullDocument");
+ }
+ }
+
+ if (!bson_empty (pipeline)) {
+ bson_iter_t iter;
+ if (bson_iter_init_find (&iter, pipeline, "pipeline")) {
+ SET_BSON_OR_ERR (&stream->pipeline_to_append, "pipeline");
+ }
+ }
+
+ if (stream->err.code == 0) {
+ _mongoc_change_stream_make_cursor (stream);
+ }
+
+ return stream;
+}
+
+bool
+mongoc_change_stream_next (mongoc_change_stream_t *stream, const bson_t **bson)
+{
+ bson_iter_t iter;
+
+ BSON_ASSERT (stream);
+ BSON_ASSERT (bson);
+
+ if (stream->err.code != 0) {
+ return false;
+ }
+
+ if (!mongoc_cursor_next (stream->cursor, bson)) {
+ const bson_t *err_doc;
+ bson_error_t err;
+ bool resumable = false;
+
+ if (!mongoc_cursor_error_document (stream->cursor, &err, &err_doc)) {
+ /* No error occurred, just no documents left */
+ return false;
+ }
+
+ /* Change Streams Spec: An error is resumable if it is not a server error,
+ * or if it has error code 43 (cursor not found) or is "not master" */
+ if (!bson_empty (err_doc)) {
+ /* This is a server error */
+ bson_iter_t err_iter;
+ if (bson_iter_init_find (&err_iter, err_doc, "errmsg") &&
+ BSON_ITER_HOLDS_UTF8 (&err_iter)) {
+ uint32_t len;
+ const char *errmsg = bson_iter_utf8 (&err_iter, &len);
+ if (strncmp (errmsg, "not master", len) == 0) {
+ resumable = true;
+ }
+ }
+
+ if (bson_iter_init_find (&err_iter, err_doc, "code") &&
+ BSON_ITER_HOLDS_INT (&err_iter)) {
+ if (bson_iter_as_int64 (&err_iter) == 43) {
+ resumable = true;
+ }
+ }
+ } else {
+ /* This is a client error */
+ resumable = true;
+ }
+
+ if (resumable) {
+ mongoc_cursor_destroy (stream->cursor);
+ _mongoc_change_stream_make_cursor (stream);
+ if (!mongoc_cursor_next (stream->cursor, bson)) {
+ resumable =
+ !mongoc_cursor_error_document (stream->cursor, &err, &err_doc);
+ if (resumable) {
+ /* Empty batch. */
+ return false;
+ }
+ }
+ }
+
+ if (!resumable) {
+ stream->err = err;
+ bson_destroy (&stream->err_doc);
+ bson_copy_to (err_doc, &stream->err_doc);
+ return false;
+ }
+ }
+
+ /* We have received documents, either from the first call to next
+ * or after a resume. */
+ if (!bson_iter_init_find (&iter, *bson, "_id")) {
+ bson_set_error (&stream->err,
+ MONGOC_ERROR_CURSOR,
+ MONGOC_ERROR_CHANGE_STREAM_NO_RESUME_TOKEN,
+ "Cannot provide resume functionality when the resume "
+ "token is missing");
+ return false;
+ }
+
+ /* Copy the resume token */
+ bson_reinit (&stream->resume_token);
+ BSON_APPEND_VALUE (
+ &stream->resume_token, "resumeAfter", bson_iter_value (&iter));
+ return true;
+}
+
+bool
+mongoc_change_stream_error_document (const mongoc_change_stream_t *stream,
+ bson_error_t *err,
+ const bson_t **bson)
+{
+ BSON_ASSERT (stream);
+
+ if (stream->err.code != 0) {
+ if (err) {
+ *err = stream->err;
+ }
+ if (bson) {
+ *bson = &stream->err_doc;
+ }
+ return true;
+ }
+ return false;
+}
+
+void
+mongoc_change_stream_destroy (mongoc_change_stream_t *stream)
+{
+ BSON_ASSERT (stream);
+ bson_destroy (&stream->pipeline_to_append);
+ bson_destroy (&stream->full_document);
+ bson_destroy (&stream->opts);
+ bson_destroy (&stream->resume_token);
+ bson_destroy (&stream->err_doc);
+ if (stream->cursor) {
+ mongoc_cursor_destroy (stream->cursor);
+ }
+ mongoc_collection_destroy (stream->coll);
+ bson_free (stream);
+}
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-rand.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-change-stream.h
similarity index 53%
copy from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-rand.h
copy to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-change-stream.h
index 81f2867a..5e91a7e1 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-rand.h
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-change-stream.h
@@ -1,43 +1,37 @@
/*
- * Copyright 2014 MongoDB, Inc.
+ * Copyright 2017-present MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-
-#if !defined(MONGOC_INSIDE) && !defined(MONGOC_COMPILATION)
-#error "Only <mongoc.h> can be included directly."
-#endif
-
-
-#ifndef MONGOC_RAND_H
-#define MONGOC_RAND_H
-
+#ifndef MONGOC_CHANGE_STREAM_H
+#define MONGOC_CHANGE_STREAM_H
#include <bson.h>
#include "mongoc-macros.h"
-BSON_BEGIN_DECLS
+typedef struct _mongoc_change_stream_t mongoc_change_stream_t;
MONGOC_EXPORT (void)
-mongoc_rand_seed (const void *buf, int num);
-MONGOC_EXPORT (void)
-mongoc_rand_add (const void *buf, int num, double entropy);
-MONGOC_EXPORT (int)
-mongoc_rand_status (void);
+mongoc_change_stream_destroy (mongoc_change_stream_t *);
-BSON_END_DECLS
+MONGOC_EXPORT (bool)
+mongoc_change_stream_next (mongoc_change_stream_t *, const bson_t **);
+MONGOC_EXPORT (bool)
+mongoc_change_stream_error_document (const mongoc_change_stream_t *,
+ bson_error_t *,
+ const bson_t **);
-#endif /* MONGOC_RAND_H */
+#endif /* MONGOC_CHANGE_STREAM_H */
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-client-pool-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-client-pool-private.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-client-pool-private.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-client-pool-private.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-client-pool.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-client-pool.c
similarity index 93%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-client-pool.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-client-pool.c
index 08b9bfc7..614602fb 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-client-pool.c
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-client-pool.c
@@ -1,436 +1,451 @@
/*
* Copyright 2013 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "mongoc.h"
#include "mongoc-apm-private.h"
#include "mongoc-counters-private.h"
#include "mongoc-client-pool-private.h"
#include "mongoc-client-pool.h"
#include "mongoc-client-private.h"
#include "mongoc-queue-private.h"
#include "mongoc-thread-private.h"
#include "mongoc-topology-private.h"
#include "mongoc-trace-private.h"
#ifdef MONGOC_ENABLE_SSL
#include "mongoc-ssl-private.h"
#endif
struct _mongoc_client_pool_t {
mongoc_mutex_t mutex;
mongoc_cond_t cond;
mongoc_queue_t queue;
mongoc_topology_t *topology;
mongoc_uri_t *uri;
uint32_t min_pool_size;
uint32_t max_pool_size;
uint32_t size;
#ifdef MONGOC_ENABLE_SSL
bool ssl_opts_set;
mongoc_ssl_opt_t ssl_opts;
#endif
bool apm_callbacks_set;
mongoc_apm_callbacks_t apm_callbacks;
void *apm_context;
int32_t error_api_version;
bool error_api_set;
};
#ifdef MONGOC_ENABLE_SSL
void
mongoc_client_pool_set_ssl_opts (mongoc_client_pool_t *pool,
const mongoc_ssl_opt_t *opts)
{
BSON_ASSERT (pool);
mongoc_mutex_lock (&pool->mutex);
_mongoc_ssl_opts_cleanup (&pool->ssl_opts);
memset (&pool->ssl_opts, 0, sizeof pool->ssl_opts);
pool->ssl_opts_set = false;
if (opts) {
_mongoc_ssl_opts_copy_to (opts, &pool->ssl_opts);
pool->ssl_opts_set = true;
}
mongoc_topology_scanner_set_ssl_opts (pool->topology->scanner,
&pool->ssl_opts);
mongoc_mutex_unlock (&pool->mutex);
}
#endif
mongoc_client_pool_t *
mongoc_client_pool_new (const mongoc_uri_t *uri)
{
mongoc_topology_t *topology;
mongoc_client_pool_t *pool;
const bson_t *b;
bson_iter_t iter;
const char *appname;
ENTRY;
BSON_ASSERT (uri);
#ifndef MONGOC_ENABLE_SSL
if (mongoc_uri_get_ssl (uri)) {
MONGOC_ERROR ("Can't create SSL client pool,"
" SSL not enabled in this build.");
return NULL;
}
#endif
pool = (mongoc_client_pool_t *) bson_malloc0 (sizeof *pool);
mongoc_mutex_init (&pool->mutex);
_mongoc_queue_init (&pool->queue);
pool->uri = mongoc_uri_copy (uri);
pool->min_pool_size = 0;
pool->max_pool_size = 100;
pool->size = 0;
topology = mongoc_topology_new (uri, false);
pool->topology = topology;
pool->error_api_version = MONGOC_ERROR_API_VERSION_LEGACY;
b = mongoc_uri_get_options (pool->uri);
if (bson_iter_init_find_case (&iter, b, MONGOC_URI_MINPOOLSIZE)) {
+ MONGOC_WARNING (
+ MONGOC_URI_MINPOOLSIZE
+ " is deprecated; its behavior does not match its name, and its actual"
+ " behavior will likely hurt performance.");
+
if (BSON_ITER_HOLDS_INT32 (&iter)) {
pool->min_pool_size = BSON_MAX (0, bson_iter_int32 (&iter));
}
}
if (bson_iter_init_find_case (&iter, b, MONGOC_URI_MAXPOOLSIZE)) {
if (BSON_ITER_HOLDS_INT32 (&iter)) {
pool->max_pool_size = BSON_MAX (1, bson_iter_int32 (&iter));
}
}
appname =
mongoc_uri_get_option_as_utf8 (pool->uri, MONGOC_URI_APPNAME, NULL);
if (appname) {
/* the appname should have already been validated */
BSON_ASSERT (mongoc_client_pool_set_appname (pool, appname));
}
#ifdef MONGOC_ENABLE_SSL
if (mongoc_uri_get_ssl (pool->uri)) {
mongoc_ssl_opt_t ssl_opt = {0};
_mongoc_ssl_opts_from_uri (&ssl_opt, pool->uri);
/* sets use_ssl = true */
mongoc_client_pool_set_ssl_opts (pool, &ssl_opt);
}
#endif
mongoc_counter_client_pools_active_inc ();
RETURN (pool);
}
void
mongoc_client_pool_destroy (mongoc_client_pool_t *pool)
{
mongoc_client_t *client;
ENTRY;
BSON_ASSERT (pool);
+ if (pool->topology->session_pool) {
+ client = mongoc_client_pool_pop (pool);
+ _mongoc_client_end_sessions (client);
+ mongoc_client_pool_push (pool, client);
+ }
+
while (
(client = (mongoc_client_t *) _mongoc_queue_pop_head (&pool->queue))) {
mongoc_client_destroy (client);
}
mongoc_topology_destroy (pool->topology);
mongoc_uri_destroy (pool->uri);
mongoc_mutex_destroy (&pool->mutex);
mongoc_cond_destroy (&pool->cond);
#ifdef MONGOC_ENABLE_SSL
_mongoc_ssl_opts_cleanup (&pool->ssl_opts);
#endif
bson_free (pool);
mongoc_counter_client_pools_active_dec ();
mongoc_counter_client_pools_disposed_inc ();
EXIT;
}
/*
* Start the background topology scanner.
*
* This function assumes the pool's mutex is locked
*/
static void
_start_scanner_if_needed (mongoc_client_pool_t *pool)
{
if (!_mongoc_topology_start_background_scanner (pool->topology)) {
MONGOC_ERROR ("Background scanner did not start!");
abort ();
}
}
mongoc_client_t *
mongoc_client_pool_pop (mongoc_client_pool_t *pool)
{
mongoc_client_t *client;
ENTRY;
BSON_ASSERT (pool);
mongoc_mutex_lock (&pool->mutex);
again:
if (!(client = (mongoc_client_t *) _mongoc_queue_pop_head (&pool->queue))) {
if (pool->size < pool->max_pool_size) {
- client = _mongoc_client_new_from_uri (pool->uri, pool->topology);
+ client = _mongoc_client_new_from_uri (pool->topology);
/* for tests */
mongoc_client_set_stream_initiator (
client,
pool->topology->scanner->initiator,
pool->topology->scanner->initiator_context);
client->error_api_version = pool->error_api_version;
_mongoc_client_set_apm_callbacks_private (
client, &pool->apm_callbacks, pool->apm_context);
#ifdef MONGOC_ENABLE_SSL
if (pool->ssl_opts_set) {
mongoc_client_set_ssl_opts (client, &pool->ssl_opts);
}
#endif
pool->size++;
} else {
mongoc_cond_wait (&pool->cond, &pool->mutex);
GOTO (again);
}
}
_start_scanner_if_needed (pool);
mongoc_mutex_unlock (&pool->mutex);
RETURN (client);
}
mongoc_client_t *
mongoc_client_pool_try_pop (mongoc_client_pool_t *pool)
{
mongoc_client_t *client;
ENTRY;
BSON_ASSERT (pool);
mongoc_mutex_lock (&pool->mutex);
if (!(client = (mongoc_client_t *) _mongoc_queue_pop_head (&pool->queue))) {
if (pool->size < pool->max_pool_size) {
- client = _mongoc_client_new_from_uri (pool->uri, pool->topology);
+ client = _mongoc_client_new_from_uri (pool->topology);
#ifdef MONGOC_ENABLE_SSL
if (pool->ssl_opts_set) {
mongoc_client_set_ssl_opts (client, &pool->ssl_opts);
}
#endif
pool->size++;
}
}
if (client) {
_start_scanner_if_needed (pool);
}
mongoc_mutex_unlock (&pool->mutex);
RETURN (client);
}
void
mongoc_client_pool_push (mongoc_client_pool_t *pool, mongoc_client_t *client)
{
ENTRY;
BSON_ASSERT (pool);
BSON_ASSERT (client);
mongoc_mutex_lock (&pool->mutex);
_mongoc_queue_push_head (&pool->queue, client);
if (pool->min_pool_size &&
_mongoc_queue_get_length (&pool->queue) > pool->min_pool_size) {
mongoc_client_t *old_client;
old_client = (mongoc_client_t *) _mongoc_queue_pop_tail (&pool->queue);
if (old_client) {
mongoc_client_destroy (old_client);
pool->size--;
}
}
mongoc_cond_signal (&pool->cond);
mongoc_mutex_unlock (&pool->mutex);
EXIT;
}
/* for tests */
void
_mongoc_client_pool_set_stream_initiator (mongoc_client_pool_t *pool,
mongoc_stream_initiator_t si,
void *context)
{
mongoc_topology_scanner_set_stream_initiator (
pool->topology->scanner, si, context);
}
/* for tests */
size_t
mongoc_client_pool_get_size (mongoc_client_pool_t *pool)
{
size_t size = 0;
ENTRY;
mongoc_mutex_lock (&pool->mutex);
size = pool->size;
mongoc_mutex_unlock (&pool->mutex);
RETURN (size);
}
size_t
mongoc_client_pool_num_pushed (mongoc_client_pool_t *pool)
{
size_t num_pushed = 0;
ENTRY;
mongoc_mutex_lock (&pool->mutex);
num_pushed = pool->queue.length;
mongoc_mutex_unlock (&pool->mutex);
RETURN (num_pushed);
}
mongoc_topology_t *
_mongoc_client_pool_get_topology (mongoc_client_pool_t *pool)
{
return pool->topology;
}
void
mongoc_client_pool_max_size (mongoc_client_pool_t *pool, uint32_t max_pool_size)
{
ENTRY;
mongoc_mutex_lock (&pool->mutex);
pool->max_pool_size = max_pool_size;
mongoc_mutex_unlock (&pool->mutex);
EXIT;
}
void
mongoc_client_pool_min_size (mongoc_client_pool_t *pool, uint32_t min_pool_size)
{
ENTRY;
+ MONGOC_WARNING (
+ "mongoc_client_pool_min_size is deprecated; its behavior does not match"
+ " its name, and its actual behavior will likely hurt performance.");
+
mongoc_mutex_lock (&pool->mutex);
pool->min_pool_size = min_pool_size;
mongoc_mutex_unlock (&pool->mutex);
EXIT;
}
bool
mongoc_client_pool_set_apm_callbacks (mongoc_client_pool_t *pool,
mongoc_apm_callbacks_t *callbacks,
void *context)
{
mongoc_topology_t *topology;
topology = pool->topology;
if (pool->apm_callbacks_set) {
MONGOC_ERROR ("Can only set callbacks once");
return false;
}
mongoc_mutex_lock (&topology->mutex);
if (callbacks) {
memcpy (&topology->description.apm_callbacks,
callbacks,
sizeof (mongoc_apm_callbacks_t));
memcpy (&pool->apm_callbacks, callbacks, sizeof (mongoc_apm_callbacks_t));
}
mongoc_topology_set_apm_callbacks (topology, callbacks, context);
topology->description.apm_context = context;
pool->apm_context = context;
pool->apm_callbacks_set = true;
mongoc_mutex_unlock (&topology->mutex);
return true;
}
bool
mongoc_client_pool_set_error_api (mongoc_client_pool_t *pool, int32_t version)
{
if (version != MONGOC_ERROR_API_VERSION_LEGACY &&
version != MONGOC_ERROR_API_VERSION_2) {
MONGOC_ERROR ("Unsupported Error API Version: %" PRId32, version);
return false;
}
if (pool->error_api_set) {
MONGOC_ERROR ("Can only set Error API Version once");
return false;
}
pool->error_api_version = version;
pool->error_api_set = true;
return true;
}
bool
mongoc_client_pool_set_appname (mongoc_client_pool_t *pool, const char *appname)
{
bool ret;
mongoc_mutex_lock (&pool->mutex);
ret = _mongoc_topology_set_appname (pool->topology, appname);
mongoc_mutex_unlock (&pool->mutex);
return ret;
}
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-client-pool.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-client-pool.h
similarity index 96%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-client-pool.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-client-pool.h
index 98763562..487f6fce 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-client-pool.h
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-client-pool.h
@@ -1,75 +1,75 @@
/*
* Copyright 2013 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MONGOC_CLIENT_POOL_H
#define MONGOC_CLIENT_POOL_H
#if !defined(MONGOC_INSIDE) && !defined(MONGOC_COMPILATION)
#error "Only <mongoc.h> can be included directly."
#endif
#include <bson.h>
#include "mongoc-macros.h"
#include "mongoc-apm.h"
#include "mongoc-client.h"
#include "mongoc-config.h"
#ifdef MONGOC_ENABLE_SSL
#include "mongoc-ssl.h"
#endif
#include "mongoc-uri.h"
BSON_BEGIN_DECLS
typedef struct _mongoc_client_pool_t mongoc_client_pool_t;
MONGOC_EXPORT (mongoc_client_pool_t *)
mongoc_client_pool_new (const mongoc_uri_t *uri);
MONGOC_EXPORT (void)
mongoc_client_pool_destroy (mongoc_client_pool_t *pool);
MONGOC_EXPORT (mongoc_client_t *)
mongoc_client_pool_pop (mongoc_client_pool_t *pool);
MONGOC_EXPORT (void)
mongoc_client_pool_push (mongoc_client_pool_t *pool, mongoc_client_t *client);
MONGOC_EXPORT (mongoc_client_t *)
mongoc_client_pool_try_pop (mongoc_client_pool_t *pool);
MONGOC_EXPORT (void)
mongoc_client_pool_max_size (mongoc_client_pool_t *pool,
uint32_t max_pool_size);
MONGOC_EXPORT (void)
mongoc_client_pool_min_size (mongoc_client_pool_t *pool,
- uint32_t min_pool_size);
+ uint32_t min_pool_size) BSON_GNUC_DEPRECATED;
#ifdef MONGOC_ENABLE_SSL
MONGOC_EXPORT (void)
mongoc_client_pool_set_ssl_opts (mongoc_client_pool_t *pool,
const mongoc_ssl_opt_t *opts);
#endif
MONGOC_EXPORT (bool)
mongoc_client_pool_set_apm_callbacks (mongoc_client_pool_t *pool,
mongoc_apm_callbacks_t *callbacks,
void *context);
MONGOC_EXPORT (bool)
mongoc_client_pool_set_error_api (mongoc_client_pool_t *pool, int32_t version);
MONGOC_EXPORT (bool)
mongoc_client_pool_set_appname (mongoc_client_pool_t *pool,
const char *appname);
BSON_END_DECLS
#endif /* MONGOC_CLIENT_POOL_H */
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-client-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-client-private.h
similarity index 75%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-client-private.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-client-private.h
index 8c7ca99e..97ecd480 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-client-private.h
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-client-private.h
@@ -1,167 +1,196 @@
/*
* Copyright 2013 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MONGOC_CLIENT_PRIVATE_H
#define MONGOC_CLIENT_PRIVATE_H
#if !defined(MONGOC_COMPILATION)
#error "Only <mongoc.h> can be included directly."
#endif
#include <bson.h>
#include "mongoc-apm-private.h"
#include "mongoc-buffer-private.h"
#include "mongoc-client.h"
#include "mongoc-cluster-private.h"
#include "mongoc-config.h"
#include "mongoc-host-list.h"
#include "mongoc-read-prefs.h"
#include "mongoc-rpc-private.h"
#include "mongoc-opcode.h"
#ifdef MONGOC_ENABLE_SSL
#include "mongoc-ssl.h"
#endif
#include "mongoc-stream.h"
#include "mongoc-topology-private.h"
#include "mongoc-write-concern.h"
-
BSON_BEGIN_DECLS
/* protocol versions this driver can speak */
-#define WIRE_VERSION_MIN 0
-#define WIRE_VERSION_MAX 5
+#define WIRE_VERSION_MIN 2
+#define WIRE_VERSION_MAX 6
/* first version that supported aggregation cursors */
#define WIRE_VERSION_AGG_CURSOR 1
/* first version that supported "insert", "update", "delete" commands */
#define WIRE_VERSION_WRITE_CMD 2
/* first version when SCRAM-SHA-1 replaced MONGODB-CR as default auth mech */
#define WIRE_VERSION_SCRAM_DEFAULT 3
/* first version that supported "find" and "getMore" commands */
#define WIRE_VERSION_FIND_CMD 4
/* first version with "killCursors" command */
#define WIRE_VERSION_KILLCURSORS_CMD 4
/* first version when findAndModify accepts writeConcern */
#define WIRE_VERSION_FAM_WRITE_CONCERN 4
/* first version to support readConcern */
#define WIRE_VERSION_READ_CONCERN 4
/* first version to support maxStalenessSeconds */
#define WIRE_VERSION_MAX_STALENESS 5
/* first version to support writeConcern */
#define WIRE_VERSION_CMD_WRITE_CONCERN 5
/* first version to support collation */
#define WIRE_VERSION_COLLATION 5
+/* first version to support OP_MSG */
+#define WIRE_VERSION_OP_MSG 6
+/* first version to support retryable writes */
+#define WIRE_VERSION_RETRY_WRITES 6
struct _mongoc_client_t {
mongoc_uri_t *uri;
mongoc_cluster_t cluster;
bool in_exhaust;
mongoc_stream_initiator_t initiator;
void *initiator_data;
#ifdef MONGOC_ENABLE_SSL
bool use_ssl;
mongoc_ssl_opt_t ssl_opts;
#endif
mongoc_topology_t *topology;
mongoc_read_prefs_t *read_prefs;
mongoc_read_concern_t *read_concern;
mongoc_write_concern_t *write_concern;
mongoc_apm_callbacks_t apm_callbacks;
void *apm_context;
int32_t error_api_version;
bool error_api_set;
+
+ /* mongoc_client_session_t's in use, to look up lsids and clusterTimes */
+ mongoc_set_t *client_sessions;
+ unsigned int csid_rand_seed;
};
/* Defines whether _mongoc_client_command_with_opts() is acting as a read
* command helper for a command like "distinct", or a write command helper for
* a command like "createRole", or both, like "aggregate" with "$out".
*/
typedef enum {
+ MONGOC_CMD_RAW = 0,
MONGOC_CMD_READ = 1,
MONGOC_CMD_WRITE = 2,
MONGOC_CMD_RW = 3,
} mongoc_command_mode_t;
-BSON_STATIC_ASSERT (MONGOC_CMD_RW == (MONGOC_CMD_READ | MONGOC_CMD_WRITE));
+BSON_STATIC_ASSERT2 (mongoc_cmd_rw,
+ MONGOC_CMD_RW == (MONGOC_CMD_READ | MONGOC_CMD_WRITE));
+typedef enum { MONGOC_RR_SRV, MONGOC_RR_TXT } mongoc_rr_type_t;
+
+bool
+_mongoc_client_get_rr (const char *service,
+ mongoc_rr_type_t rr_type,
+ mongoc_uri_t *uri,
+ bson_error_t *error);
mongoc_client_t *
-_mongoc_client_new_from_uri (const mongoc_uri_t *uri,
- mongoc_topology_t *topology);
+_mongoc_client_new_from_uri (mongoc_topology_t *topology);
bool
_mongoc_client_set_apm_callbacks_private (mongoc_client_t *client,
mongoc_apm_callbacks_t *callbacks,
void *context);
mongoc_stream_t *
mongoc_client_default_stream_initiator (const mongoc_uri_t *uri,
const mongoc_host_list_t *host,
void *user_data,
bson_error_t *error);
mongoc_stream_t *
_mongoc_client_create_stream (mongoc_client_t *client,
const mongoc_host_list_t *host,
bson_error_t *error);
bool
_mongoc_client_recv (mongoc_client_t *client,
mongoc_rpc_t *rpc,
mongoc_buffer_t *buffer,
mongoc_server_stream_t *server_stream,
bson_error_t *error);
-bool
-_mongoc_client_recv_gle (mongoc_client_t *client,
- mongoc_server_stream_t *server_stream,
- bson_t **gle_doc,
- bson_error_t *error);
-
void
_mongoc_client_kill_cursor (mongoc_client_t *client,
uint32_t server_id,
int64_t cursor_id,
int64_t operation_id,
const char *db,
- const char *collection);
+ const char *collection,
+ mongoc_client_session_t *cs);
bool
_mongoc_client_command_with_opts (mongoc_client_t *client,
const char *db_name,
const bson_t *command,
mongoc_command_mode_t mode,
const bson_t *opts,
mongoc_query_flags_t flags,
const mongoc_read_prefs_t *default_prefs,
mongoc_read_concern_t *default_rc,
mongoc_write_concern_t *default_wc,
bson_t *reply,
bson_error_t *error);
+mongoc_server_session_t *
+_mongoc_client_pop_server_session (mongoc_client_t *client,
+ bson_error_t *error);
+
+bool
+_mongoc_client_lookup_session (const mongoc_client_t *client,
+ uint32_t client_session_id,
+ mongoc_client_session_t **cs,
+ bson_error_t *error);
+
+void
+_mongoc_client_unregister_session (mongoc_client_t *client,
+ mongoc_client_session_t *session);
+
+void
+_mongoc_client_push_server_session (mongoc_client_t *client,
+ mongoc_server_session_t *server_session);
+void
+_mongoc_client_end_sessions (mongoc_client_t *client);
BSON_END_DECLS
#endif /* MONGOC_CLIENT_PRIVATE_H */
diff --git a/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-client-session-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-client-session-private.h
new file mode 100644
index 00000000..6430a8bb
--- /dev/null
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-client-session-private.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2017 MongoDB, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MONGOC_CLIENT_SESSION_PRIVATE_H
+#define MONGOC_CLIENT_SESSION_PRIVATE_H
+
+#if !defined(MONGOC_INSIDE) && !defined(MONGOC_COMPILATION)
+#error "Only <mongoc.h> can be included directly."
+#endif
+
+#include <bson.h>
+#include "mongoc-client-session.h"
+
+typedef enum {
+ MONGOC_SESSION_NO_OPTS = 0,
+ MONGOC_SESSION_CAUSAL_CONSISTENCY = (1 << 0),
+} mongoc_session_flag_t;
+
+struct _mongoc_session_opt_t {
+ mongoc_session_flag_t flags;
+};
+
+typedef struct _mongoc_server_session_t {
+ struct _mongoc_server_session_t *prev, *next;
+ int64_t last_used_usec;
+ bson_t lsid; /* logical session id */
+ int64_t txn_number; /* transaction number */
+} mongoc_server_session_t;
+
+struct _mongoc_client_session_t {
+ mongoc_client_t *client;
+ mongoc_session_opt_t opts;
+ mongoc_server_session_t *server_session;
+ uint32_t client_session_id;
+ bson_t cluster_time;
+ uint32_t operation_timestamp;
+ uint32_t operation_increment;
+};
+
+bool
+_mongoc_parse_cluster_time (const bson_t *cluster_time,
+ uint32_t *timestamp,
+ uint32_t *increment);
+
+bool
+_mongoc_cluster_time_greater (const bson_t *new, const bson_t *old);
+
+void
+_mongoc_client_session_handle_reply (mongoc_client_session_t *session,
+ bool is_acknowledged,
+ const bson_t *reply);
+
+mongoc_server_session_t *
+_mongoc_server_session_new (bson_error_t *error);
+
+bool
+_mongoc_server_session_timed_out (const mongoc_server_session_t *server_session,
+ int64_t session_timeout_minutes);
+
+void
+_mongoc_server_session_destroy (mongoc_server_session_t *server_session);
+
+mongoc_client_session_t *
+_mongoc_client_session_new (mongoc_client_t *client,
+ mongoc_server_session_t *server_session,
+ const mongoc_session_opt_t *opts,
+ uint32_t client_session_id);
+
+bool
+_mongoc_client_session_from_iter (mongoc_client_t *client,
+ bson_iter_t *iter,
+ mongoc_client_session_t **cs,
+ bson_error_t *error);
+
+#endif /* MONGOC_CLIENT_SESSION_PRIVATE_H */
diff --git a/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-client-session.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-client-session.c
new file mode 100644
index 00000000..9ff4aa67
--- /dev/null
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-client-session.c
@@ -0,0 +1,465 @@
+/*
+ * Copyright 2017 MongoDB, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include "mongoc-client-session-private.h"
+#include "mongoc-trace-private.h"
+#include "mongoc-client-private.h"
+#include "mongoc-rand-private.h"
+
+#define SESSION_NEVER_USED (-1)
+
+mongoc_session_opt_t *
+mongoc_session_opts_new (void)
+{
+ mongoc_session_opt_t *opts = bson_malloc0 (sizeof (mongoc_session_opt_t));
+
+ /* Driver Sessions Spec: causal consistency is true by default */
+ mongoc_session_opts_set_causal_consistency (opts, true);
+
+ return opts;
+}
+
+
+void
+mongoc_session_opts_set_causal_consistency (mongoc_session_opt_t *opts,
+ bool causal_consistency)
+{
+ ENTRY;
+
+ BSON_ASSERT (opts);
+
+ if (causal_consistency) {
+ opts->flags |= MONGOC_SESSION_CAUSAL_CONSISTENCY;
+ } else {
+ opts->flags &= ~MONGOC_SESSION_CAUSAL_CONSISTENCY;
+ }
+
+ EXIT;
+}
+
+bool
+mongoc_session_opts_get_causal_consistency (const mongoc_session_opt_t *opts)
+{
+ ENTRY;
+
+ BSON_ASSERT (opts);
+
+ RETURN (!!(opts->flags & MONGOC_SESSION_CAUSAL_CONSISTENCY));
+}
+
+
+static void
+_mongoc_session_opts_copy (const mongoc_session_opt_t *src,
+ mongoc_session_opt_t *dst)
+{
+ dst->flags = src->flags;
+}
+
+
+mongoc_session_opt_t *
+mongoc_session_opts_clone (const mongoc_session_opt_t *opts)
+{
+ mongoc_session_opt_t *cloned_opts;
+
+ ENTRY;
+
+ BSON_ASSERT (opts);
+
+ cloned_opts = bson_malloc (sizeof (mongoc_session_opt_t));
+ _mongoc_session_opts_copy (opts, cloned_opts);
+
+ RETURN (cloned_opts);
+}
+
+
+void
+mongoc_session_opts_destroy (mongoc_session_opt_t *opts)
+{
+ ENTRY;
+
+ BSON_ASSERT (opts);
+
+ bson_free (opts);
+
+ EXIT;
+}
+
+
+static bool
+_mongoc_server_session_uuid (uint8_t *data /* OUT */, bson_error_t *error)
+{
+#ifdef MONGOC_ENABLE_CRYPTO
+ /* https://tools.ietf.org/html/rfc4122#page-14
+ * o Set the two most significant bits (bits 6 and 7) of the
+ * clock_seq_hi_and_reserved to zero and one, respectively.
+ *
+ * o Set the four most significant bits (bits 12 through 15) of the
+ * time_hi_and_version field to the 4-bit version number from
+ * Section 4.1.3.
+ *
+ * o Set all the other bits to randomly (or pseudo-randomly) chosen
+ * values.
+ */
+
+ if (!_mongoc_rand_bytes (data, 16)) {
+ bson_set_error (error,
+ MONGOC_ERROR_CLIENT,
+ MONGOC_ERROR_CLIENT_SESSION_FAILURE,
+ "Could not generate UUID for logical session id");
+
+ return false;
+ }
+
+ data[6] = (uint8_t) (0x40 | (data[6] & 0xf));
+ data[8] = (uint8_t) (0x80 | (data[8] & 0x3f));
+
+ return true;
+#else
+ /* no _mongoc_rand_bytes without a crypto library */
+ bson_set_error (error,
+ MONGOC_ERROR_CLIENT,
+ MONGOC_ERROR_CLIENT_SESSION_FAILURE,
+ "Could not generate UUID for logical session id, we need a"
+ " cryptography library like libcrypto, Common Crypto, or"
+ " CNG");
+
+ return false;
+#endif
+}
+
+
+bool
+_mongoc_parse_cluster_time (const bson_t *cluster_time,
+ uint32_t *timestamp,
+ uint32_t *increment)
+{
+ bson_iter_t iter;
+ char *s;
+
+ if (!cluster_time ||
+ !bson_iter_init_find (&iter, cluster_time, "clusterTime") ||
+ !BSON_ITER_HOLDS_TIMESTAMP (&iter)) {
+ s = bson_as_json (cluster_time, NULL);
+ MONGOC_ERROR ("Cannot parse cluster time from %s\n", s);
+ bson_free (s);
+ return false;
+ }
+
+ bson_iter_timestamp (&iter, timestamp, increment);
+
+ return true;
+}
+
+
+bool
+_mongoc_cluster_time_greater (const bson_t *new, const bson_t *old)
+{
+ uint32_t new_t, new_i, old_t, old_i;
+
+ if (!_mongoc_parse_cluster_time (new, &new_t, &new_i) ||
+ !_mongoc_parse_cluster_time (old, &old_t, &old_i)) {
+ return false;
+ }
+
+ return (new_t > old_t) || (new_t == old_t && new_i > old_i);
+}
+
+
+void
+_mongoc_client_session_handle_reply (mongoc_client_session_t *session,
+ bool is_acknowledged,
+ const bson_t *reply)
+{
+ bson_iter_t iter;
+ uint32_t len;
+ const uint8_t *data;
+ bson_t cluster_time;
+ uint32_t t;
+ uint32_t i;
+
+ BSON_ASSERT (session);
+
+ if (!reply || !bson_iter_init (&iter, reply)) {
+ return;
+ }
+
+ while (bson_iter_next (&iter)) {
+ if (!strcmp (bson_iter_key (&iter), "$clusterTime") &&
+ BSON_ITER_HOLDS_DOCUMENT (&iter)) {
+ bson_iter_document (&iter, &len, &data);
+ bson_init_static (&cluster_time, data, (size_t) len);
+
+ mongoc_client_session_advance_cluster_time (session, &cluster_time);
+ } else if (!strcmp (bson_iter_key (&iter), "operationTime") &&
+ BSON_ITER_HOLDS_TIMESTAMP (&iter) && is_acknowledged) {
+ bson_iter_timestamp (&iter, &t, &i);
+ mongoc_client_session_advance_operation_time (session, t, i);
+ }
+ }
+}
+
+
+mongoc_server_session_t *
+_mongoc_server_session_new (bson_error_t *error)
+{
+ uint8_t uuid_data[16];
+ mongoc_server_session_t *s;
+
+ ENTRY;
+
+ if (!_mongoc_server_session_uuid (uuid_data, error)) {
+ RETURN (NULL);
+ }
+
+ s = bson_malloc0 (sizeof (mongoc_server_session_t));
+ s->last_used_usec = SESSION_NEVER_USED;
+ s->prev = NULL;
+ s->next = NULL;
+ bson_init (&s->lsid);
+ bson_append_binary (
+ &s->lsid, "id", 2, BSON_SUBTYPE_UUID, uuid_data, sizeof uuid_data);
+
+ /* transaction number is a positive integer and will be incremented before
+ * each use, so ensure it is initialized to zero. */
+ s->txn_number = 0;
+
+ RETURN (s);
+}
+
+
+bool
+_mongoc_server_session_timed_out (const mongoc_server_session_t *server_session,
+ int64_t session_timeout_minutes)
+{
+ int64_t timeout_usec;
+ const int64_t minute_to_usec = 60 * 1000 * 1000;
+
+ ENTRY;
+
+ if (session_timeout_minutes == MONGOC_NO_SESSIONS) {
+ /* not connected right now; keep the session */
+ return false;
+ }
+
+ if (server_session->last_used_usec == SESSION_NEVER_USED) {
+ return false;
+ }
+
+ /* Driver Sessions Spec: if a session has less than one minute left before
+ * becoming stale, discard it */
+ timeout_usec =
+ server_session->last_used_usec + session_timeout_minutes * minute_to_usec;
+
+ RETURN (timeout_usec - bson_get_monotonic_time () < 1 * minute_to_usec);
+}
+
+
+void
+_mongoc_server_session_destroy (mongoc_server_session_t *server_session)
+{
+ ENTRY;
+
+ bson_destroy (&server_session->lsid);
+ bson_free (server_session);
+
+ EXIT;
+}
+
+
+mongoc_client_session_t *
+_mongoc_client_session_new (mongoc_client_t *client,
+ mongoc_server_session_t *server_session,
+ const mongoc_session_opt_t *opts,
+ uint32_t client_session_id)
+{
+ mongoc_client_session_t *session;
+
+ ENTRY;
+
+ BSON_ASSERT (client);
+
+ session = bson_malloc0 (sizeof (mongoc_client_session_t));
+ session->client = client;
+ session->server_session = server_session;
+ session->client_session_id = client_session_id;
+ bson_init (&session->cluster_time);
+
+ if (opts) {
+ _mongoc_session_opts_copy (opts, &session->opts);
+ } else {
+ /* sessions are causally consistent by default */
+ session->opts.flags = MONGOC_SESSION_CAUSAL_CONSISTENCY;
+ }
+
+ RETURN (session);
+}
+
+
+mongoc_client_t *
+mongoc_client_session_get_client (const mongoc_client_session_t *session)
+{
+ BSON_ASSERT (session);
+
+ return session->client;
+}
+
+
+const mongoc_session_opt_t *
+mongoc_client_session_get_opts (const mongoc_client_session_t *session)
+{
+ BSON_ASSERT (session);
+
+ return &session->opts;
+}
+
+
+const bson_t *
+mongoc_client_session_get_lsid (const mongoc_client_session_t *session)
+{
+ BSON_ASSERT (session);
+
+ return &session->server_session->lsid;
+}
+
+const bson_t *
+mongoc_client_session_get_cluster_time (const mongoc_client_session_t *session)
+{
+ BSON_ASSERT (session);
+
+ if (bson_empty (&session->cluster_time)) {
+ return NULL;
+ }
+
+ return &session->cluster_time;
+}
+
+void
+mongoc_client_session_advance_cluster_time (mongoc_client_session_t *session,
+ const bson_t *cluster_time)
+{
+ uint32_t t, i;
+
+ ENTRY;
+
+ if (bson_empty (&session->cluster_time) &&
+ _mongoc_parse_cluster_time (cluster_time, &t, &i)) {
+ bson_destroy (&session->cluster_time);
+ bson_copy_to (cluster_time, &session->cluster_time);
+ EXIT;
+ }
+
+ if (_mongoc_cluster_time_greater (cluster_time, &session->cluster_time)) {
+ bson_destroy (&session->cluster_time);
+ bson_copy_to (cluster_time, &session->cluster_time);
+ }
+
+ EXIT;
+}
+
+void
+mongoc_client_session_get_operation_time (
+ const mongoc_client_session_t *session,
+ uint32_t *timestamp,
+ uint32_t *increment)
+{
+ BSON_ASSERT (session);
+ BSON_ASSERT (timestamp);
+ BSON_ASSERT (increment);
+
+ *timestamp = session->operation_timestamp;
+ *increment = session->operation_increment;
+}
+
+void
+mongoc_client_session_advance_operation_time (mongoc_client_session_t *session,
+ uint32_t timestamp,
+ uint32_t increment)
+{
+ ENTRY;
+
+ BSON_ASSERT (session);
+
+ if (timestamp > session->operation_timestamp ||
+ (timestamp == session->operation_timestamp &&
+ increment > session->operation_increment)) {
+ session->operation_timestamp = timestamp;
+ session->operation_increment = increment;
+ }
+
+ EXIT;
+}
+
+bool
+_mongoc_client_session_from_iter (mongoc_client_t *client,
+ bson_iter_t *iter,
+ mongoc_client_session_t **cs,
+ bson_error_t *error)
+{
+ ENTRY;
+
+ /* must be int64 that fits in uint32 */
+ if (!BSON_ITER_HOLDS_INT64 (iter) || bson_iter_int64 (iter) > 0xffffffff) {
+ bson_set_error (error,
+ MONGOC_ERROR_COMMAND,
+ MONGOC_ERROR_COMMAND_INVALID_ARG,
+ "Invalid sessionId");
+ RETURN (false);
+ }
+
+ RETURN (_mongoc_client_lookup_session (
+ client, (uint32_t) bson_iter_int64 (iter), cs, error));
+}
+
+
+bool
+mongoc_client_session_append (const mongoc_client_session_t *client_session,
+ bson_t *opts,
+ bson_error_t *error)
+{
+ ENTRY;
+
+ BSON_ASSERT (client_session);
+ BSON_ASSERT (opts);
+
+ if (!bson_append_int64 (
+ opts, "sessionId", 9, client_session->client_session_id)) {
+ bson_set_error (
+ error, MONGOC_ERROR_BSON, MONGOC_ERROR_BSON_INVALID, "invalid opts");
+
+ RETURN (false);
+ }
+
+ RETURN (true);
+}
+
+
+void
+mongoc_client_session_destroy (mongoc_client_session_t *session)
+{
+ ENTRY;
+
+ BSON_ASSERT (session);
+
+ _mongoc_client_unregister_session (session->client, session);
+ _mongoc_client_push_server_session (session->client,
+ session->server_session);
+
+ bson_destroy (&session->cluster_time);
+ bson_free (session);
+
+ EXIT;
+}
diff --git a/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-client-session.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-client-session.h
new file mode 100644
index 00000000..fa0d6bde
--- /dev/null
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-client-session.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright 2017 MongoDB, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MONGOC_CLIENT_SESSION_H
+#define MONGOC_CLIENT_SESSION_H
+
+#if !defined(MONGOC_INSIDE) && !defined(MONGOC_COMPILATION)
+#error "Only <mongoc.h> can be included directly."
+#endif
+
+#include <bson.h>
+#include "mongoc-macros.h"
+/* mongoc_client_session_t and mongoc_session_opt_t are typedef'ed here */
+#include "mongoc-client.h"
+
+BSON_BEGIN_DECLS
+
+MONGOC_EXPORT (mongoc_session_opt_t *)
+mongoc_session_opts_new (void) BSON_GNUC_WARN_UNUSED_RESULT;
+
+MONGOC_EXPORT (void)
+mongoc_session_opts_set_causal_consistency (mongoc_session_opt_t *opts,
+ bool causal_consistency);
+
+MONGOC_EXPORT (bool)
+mongoc_session_opts_get_causal_consistency (const mongoc_session_opt_t *opts);
+
+MONGOC_EXPORT (mongoc_session_opt_t *)
+mongoc_session_opts_clone (const mongoc_session_opt_t *opts);
+
+MONGOC_EXPORT (void)
+mongoc_session_opts_destroy (mongoc_session_opt_t *opts);
+
+MONGOC_EXPORT (mongoc_client_t *)
+mongoc_client_session_get_client (const mongoc_client_session_t *session);
+
+MONGOC_EXPORT (const mongoc_session_opt_t *)
+mongoc_client_session_get_opts (const mongoc_client_session_t *session);
+
+MONGOC_EXPORT (const bson_t *)
+mongoc_client_session_get_lsid (const mongoc_client_session_t *session);
+
+MONGOC_EXPORT (const bson_t *)
+mongoc_client_session_get_cluster_time (const mongoc_client_session_t *session);
+
+MONGOC_EXPORT (void)
+mongoc_client_session_advance_cluster_time (mongoc_client_session_t *session,
+ const bson_t *cluster_time);
+
+MONGOC_EXPORT (void)
+mongoc_client_session_get_operation_time (
+ const mongoc_client_session_t *session,
+ uint32_t *timestamp,
+ uint32_t *increment);
+
+MONGOC_EXPORT (void)
+mongoc_client_session_advance_operation_time (mongoc_client_session_t *session,
+ uint32_t timestamp,
+ uint32_t increment);
+
+MONGOC_EXPORT (bool)
+mongoc_client_session_append (const mongoc_client_session_t *client_session,
+ bson_t *opts,
+ bson_error_t *error);
+
+/* There is no mongoc_client_session_end, only mongoc_client_session_destroy.
+ * Driver Sessions Spec: "In languages that have idiomatic ways of disposing of
+ * resources, drivers SHOULD support that in addition to or instead of
+ * endSession."
+ */
+
+MONGOC_EXPORT (void)
+mongoc_client_session_destroy (mongoc_client_session_t *session);
+
+BSON_END_DECLS
+
+
+#endif /* MONGOC_CLIENT_SESSION_H */
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-client.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-client.c
similarity index 69%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-client.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-client.c
index faa0255b..8276a8c7 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-client.c
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-client.c
@@ -1,2152 +1,2760 @@
/*
* Copyright 2013 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <bson.h>
-#ifndef _WIN32
+#include "mongoc-config.h"
+#ifdef MONGOC_HAVE_DNSAPI
+/* for DnsQuery_UTF8 */
+#include <Windows.h>
+#include <WinDNS.h>
+#include <ws2tcpip.h>
+#else
#include <netdb.h>
#include <netinet/tcp.h>
+#if defined(MONGOC_HAVE_RES_NSEARCH) || defined(MONGOC_HAVE_RES_SEARCH)
+#include <arpa/nameser.h>
+#include <resolv.h>
+#include <bson-string.h>
+
+#endif
#endif
#include "mongoc-cursor-array-private.h"
#include "mongoc-client-private.h"
#include "mongoc-collection-private.h"
-#include "mongoc-config.h"
#include "mongoc-counters-private.h"
#include "mongoc-database-private.h"
#include "mongoc-gridfs-private.h"
#include "mongoc-error.h"
#include "mongoc-log.h"
#include "mongoc-queue-private.h"
#include "mongoc-socket.h"
#include "mongoc-stream-buffered.h"
#include "mongoc-stream-socket.h"
#include "mongoc-thread-private.h"
#include "mongoc-trace-private.h"
#include "mongoc-uri-private.h"
#include "mongoc-util-private.h"
#include "mongoc-set-private.h"
#include "mongoc-log.h"
#include "mongoc-write-concern-private.h"
#include "mongoc-read-concern-private.h"
+#include "mongoc-host-list-private.h"
+#include "mongoc-read-prefs-private.h"
+#include "mongoc-client-session-private.h"
#ifdef MONGOC_ENABLE_SSL
#include "mongoc-stream-tls.h"
#include "mongoc-ssl-private.h"
#include "mongoc-cmd-private.h"
#endif
#undef MONGOC_LOG_DOMAIN
#define MONGOC_LOG_DOMAIN "client"
static void
_mongoc_client_op_killcursors (mongoc_cluster_t *cluster,
mongoc_server_stream_t *server_stream,
int64_t cursor_id,
int64_t operation_id,
const char *db,
const char *collection);
static void
_mongoc_client_killcursors_command (mongoc_cluster_t *cluster,
mongoc_server_stream_t *server_stream,
int64_t cursor_id,
const char *db,
- const char *collection);
+ const char *collection,
+ mongoc_client_session_t *cs);
+
+#define DNS_ERROR(_msg, ...) \
+ do { \
+ bson_set_error (error, \
+ MONGOC_ERROR_STREAM, \
+ MONGOC_ERROR_STREAM_NAME_RESOLUTION, \
+ _msg, \
+ __VA_ARGS__); \
+ GOTO (done); \
+ } while (0)
+
+
+#ifdef MONGOC_HAVE_DNSAPI
+
+typedef bool (*mongoc_rr_callback_t) (const char *service,
+ PDNS_RECORD pdns,
+ mongoc_uri_t *uri,
+ bson_error_t *error);
+
+static bool
+srv_callback (const char *service,
+ PDNS_RECORD pdns,
+ mongoc_uri_t *uri,
+ bson_error_t *error)
+{
+ return mongoc_uri_append_host (
+ uri, pdns->Data.SRV.pNameTarget, pdns->Data.SRV.wPort, error);
+}
+
+static bool
+txt_callback (const char *service,
+ PDNS_RECORD pdns,
+ mongoc_uri_t *uri,
+ bson_error_t *error)
+{
+ DWORD i;
+ bson_string_t *txt;
+ bool r;
+
+ txt = bson_string_new (NULL);
+
+ for (i = 0; i < pdns->Data.TXT.dwStringCount; i++) {
+ bson_string_append (txt, pdns->Data.TXT.pStringArray[i]);
+ }
+
+ r = mongoc_uri_parse_options (uri, txt->str, true /* from_dns */, error);
+ bson_string_free (txt, true);
+
+ return r;
+}
+
+/*
+ *--------------------------------------------------------------------------
+ *
+ * _mongoc_get_rr_dnsapi --
+ *
+ * Fetch SRV or TXT resource records using the Windows DNS API and
+ * update @uri.
+ *
+ * Returns:
+ * Success or failure.
+ *
+ * For an SRV lookup, returns false if there is any error.
+ *
+ * For TXT lookup, ignores any error fetching the resource record, but
+ * returns false if the resource record is found and there is an error
+ * reading its contents as URI options.
+ *
+ * Side effects:
+ * @error is set if there is a failure.
+ *
+ *--------------------------------------------------------------------------
+ */
+
+static bool
+_mongoc_get_rr_dnsapi (const char *service,
+ mongoc_rr_type_t rr_type,
+ mongoc_uri_t *uri,
+ bson_error_t *error)
+{
+ const char *rr_type_name;
+ WORD nst;
+ mongoc_rr_callback_t callback;
+ PDNS_RECORD pdns = NULL;
+ DNS_STATUS res;
+ LPVOID lpMsgBuf = NULL;
+ bool dns_success;
+ bool callback_success = true;
+ int i;
+
+ ENTRY;
+
+ if (rr_type == MONGOC_RR_SRV) {
+ /* return true only if DNS succeeds */
+ dns_success = false;
+ rr_type_name = "SRV";
+ nst = DNS_TYPE_SRV;
+ callback = srv_callback;
+ } else {
+ /* return true whether or not DNS succeeds */
+ dns_success = true;
+ rr_type_name = "TXT";
+ nst = DNS_TYPE_TEXT;
+ callback = txt_callback;
+ }
+
+ res = DnsQuery_UTF8 (service,
+ nst,
+ DNS_QUERY_BYPASS_CACHE,
+ NULL /* IP Address */,
+ &pdns,
+ 0 /* reserved */);
+
+ if (res) {
+ DWORD flags = FORMAT_MESSAGE_ALLOCATE_BUFFER |
+ FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS;
+
+ if (FormatMessage (flags,
+ 0,
+ res,
+ MAKELANGID (LANG_NEUTRAL, SUBLANG_DEFAULT),
+ (LPTSTR) &lpMsgBuf,
+ 0,
+ 0)) {
+ DNS_ERROR ("Failed to look up %s record \"%s\": %s",
+ rr_type_name,
+ service,
+ (char *) lpMsgBuf);
+ }
+
+ DNS_ERROR ("Failed to look up %s record \"%s\": Unknown error",
+ rr_type_name,
+ service);
+ }
+
+ if (!pdns) {
+ DNS_ERROR ("No %s records for \"%s\"", rr_type_name, service);
+ }
+
+ dns_success = true;
+ i = 0;
+
+ do {
+ if (i > 0 && rr_type == MONGOC_RR_TXT) {
+ /* Initial DNS Seedlist Discovery Spec: a client "MUST raise an error
+ * when multiple TXT records are encountered". */
+ callback_success = false;
+ DNS_ERROR ("Multiple TXT records for \"%s\"", service);
+ }
+
+ if (!callback (service, pdns, uri, error)) {
+ callback_success = false;
+ GOTO (done);
+ }
+ pdns = pdns->pNext;
+ i++;
+ } while (pdns);
+
+done:
+ if (pdns) {
+ DnsRecordListFree (pdns, DnsFreeRecordList);
+ }
+
+ if (lpMsgBuf) {
+ LocalFree (lpMsgBuf);
+ }
+
+ RETURN (dns_success && callback_success);
+}
+
+#elif (defined(MONGOC_HAVE_RES_NSEARCH) || defined(MONGOC_HAVE_RES_SEARCH))
+
+typedef bool (*mongoc_rr_callback_t) (const char *service,
+ ns_msg *ns_answer,
+ ns_rr *rr,
+ mongoc_uri_t *uri,
+ bson_error_t *error);
+
+static bool
+srv_callback (const char *service,
+ ns_msg *ns_answer,
+ ns_rr *rr,
+ mongoc_uri_t *uri,
+ bson_error_t *error)
+{
+ const uint8_t *data;
+ char name[1024];
+ uint16_t port;
+ int size;
+ bool ret = false;
+
+ data = ns_rr_rdata (*rr);
+ port = ntohs (*(short *) (data + 4));
+ size = dn_expand (ns_msg_base (*ns_answer),
+ ns_msg_end (*ns_answer),
+ data + 6,
+ name,
+ sizeof (name));
+
+ if (size < 1) {
+ DNS_ERROR ("Invalid record in SRV answer for \"%s\": \"%s\"",
+ service,
+ strerror (h_errno));
+ }
+
+ ret = mongoc_uri_append_host (uri, name, port, error);
+
+done:
+ return ret;
+}
+
+static bool
+txt_callback (const char *service,
+ ns_msg *ns_answer,
+ ns_rr *rr,
+ mongoc_uri_t *uri,
+ bson_error_t *error)
+{
+ char s[256];
+ const uint8_t *data;
+ bson_string_t *txt;
+ uint16_t pos, total;
+ uint8_t len;
+ bool r = false;
+
+ total = (uint16_t) ns_rr_rdlen (*rr);
+ if (total < 1 || total > 255) {
+ DNS_ERROR ("Invalid TXT record size %hu for \"%s\"", total, service);
+ }
+
+ /* a TXT record has one or more strings, each up to 255 chars, each is
+ * prefixed by its length as 1 byte. thus endianness doesn't matter. */
+ txt = bson_string_new (NULL);
+ pos = 0;
+ data = ns_rr_rdata (*rr);
+
+ while (pos < total) {
+ memcpy (&len, data + pos, sizeof (uint8_t));
+ pos++;
+ bson_strncpy (s, (const char *) (data + pos), (size_t) len + 1);
+ bson_string_append (txt, s);
+ pos += len;
+ }
+
+ r = mongoc_uri_parse_options (uri, txt->str, true /* from_dns */, error);
+ bson_string_free (txt, true);
+
+done:
+ return r;
+}
+
+/*
+ *--------------------------------------------------------------------------
+ *
+ * _mongoc_get_rr_search --
+ *
+ * Fetch SRV or TXT resource records using libresolv and update @uri.
+ *
+ * Returns:
+ * Success or failure.
+ *
+ * For an SRV lookup, returns false if there is any error.
+ *
+ * For TXT lookup, ignores any error fetching the resource record, but
+ * returns false if the resource record is found and there is an error
+ * reading its contents as URI options.
+ *
+ * Side effects:
+ * @error is set if there is a failure.
+ *
+ *--------------------------------------------------------------------------
+ */
+
+static bool
+_mongoc_get_rr_search (const char *service,
+ mongoc_rr_type_t rr_type,
+ mongoc_uri_t *uri,
+ bson_error_t *error)
+{
+#ifdef MONGOC_HAVE_RES_NSEARCH
+ struct __res_state state = {0};
+#endif
+ int size;
+ unsigned char search_buf[1024];
+ ns_msg ns_answer;
+ int n;
+ int i;
+ const char *rr_type_name;
+ ns_type nst;
+ mongoc_rr_callback_t callback;
+ ns_rr resource_record;
+ bool dns_success;
+ bool callback_success = true;
+
+ ENTRY;
+
+ if (rr_type == MONGOC_RR_SRV) {
+ /* return true only if DNS succeeds */
+ dns_success = false;
+ rr_type_name = "SRV";
+ nst = ns_t_srv;
+ callback = srv_callback;
+ } else {
+ /* return true whether or not DNS succeeds */
+ dns_success = true;
+ rr_type_name = "TXT";
+ nst = ns_t_txt;
+ callback = txt_callback;
+ }
+
+#ifdef MONGOC_HAVE_RES_NSEARCH
+ /* thread-safe */
+ res_ninit (&state);
+ size = res_nsearch (
+ &state, service, ns_c_in, nst, search_buf, sizeof (search_buf));
+#elif defined(MONGOC_HAVE_RES_SEARCH)
+ size = res_search (service, ns_c_in, nst, search_buf, sizeof (search_buf));
+#endif
+
+ if (size < 0) {
+ DNS_ERROR ("Failed to look up %s record \"%s\": %s",
+ rr_type_name,
+ service,
+ strerror (h_errno));
+ }
+
+ if (ns_initparse (search_buf, size, &ns_answer)) {
+ DNS_ERROR ("Invalid %s answer for \"%s\"", rr_type_name, service);
+ }
+
+ n = ns_msg_count (ns_answer, ns_s_an);
+ if (!n) {
+ DNS_ERROR ("No %s records for \"%s\"", rr_type_name, service);
+ }
+
+ for (i = 0; i < n; i++) {
+ if (i > 0 && rr_type == MONGOC_RR_TXT) {
+ /* Initial DNS Seedlist Discovery Spec: a client "MUST raise an error
+ * when multiple TXT records are encountered". */
+ callback_success = false;
+ DNS_ERROR ("Multiple TXT records for \"%s\"", service);
+ }
+
+ if (ns_parserr (&ns_answer, ns_s_an, i, &resource_record)) {
+ DNS_ERROR ("Invalid record %d of %s answer for \"%s\": \"%s\"",
+ i,
+ rr_type_name,
+ service,
+ strerror (h_errno));
+ }
+
+ if (!callback (service, &ns_answer, &resource_record, uri, error)) {
+ callback_success = false;
+ GOTO (done);
+ }
+ }
+
+ dns_success = true;
+
+done:
+
+#ifdef MONGOC_HAVE_RES_NDESTROY
+ /* defined on BSD/Darwin, and only if MONGOC_HAVE_RES_NSEARCH is defined */
+ res_ndestroy (&state);
+#elif defined(MONGOC_HAVE_RES_NCLOSE)
+ /* defined on Linux, and only if MONGOC_HAVE_RES_NSEARCH is defined */
+ res_nclose (&state);
+#endif
+ RETURN (dns_success && callback_success);
+}
+#endif
+
+/*
+ *--------------------------------------------------------------------------
+ *
+ * _mongoc_client_get_rr --
+ *
+ * Fetch an SRV or TXT resource record and update @uri. See RFCs 1464
+ * and 2782, and MongoDB's Initial DNS Seedlist Discovery Spec.
+ *
+ * Returns:
+ * Success or failure.
+ *
+ * Side effects:
+ * @error is set if there is a failure.
+ *
+ *--------------------------------------------------------------------------
+ */
+
+bool
+_mongoc_client_get_rr (const char *service,
+ mongoc_rr_type_t rr_type,
+ mongoc_uri_t *uri,
+ bson_error_t *error)
+{
+#ifdef MONGOC_HAVE_DNSAPI
+ return _mongoc_get_rr_dnsapi (service, rr_type, uri, error);
+#elif (defined(MONGOC_HAVE_RES_NSEARCH) || defined(MONGOC_HAVE_RES_SEARCH))
+ return _mongoc_get_rr_search (service, rr_type, uri, error);
+#else
+ bson_set_error (error,
+ MONGOC_ERROR_STREAM,
+ MONGOC_ERROR_STREAM_NAME_RESOLUTION,
+ "libresolv unavailable, cannot use mongodb+srv URI");
+ return false;
+#endif
+}
+#undef DNS_ERROR
/*
*--------------------------------------------------------------------------
*
* mongoc_client_connect_tcp --
*
* Connect to a host using a TCP socket.
*
* This will be performed synchronously and return a mongoc_stream_t
* that can be used to connect with the remote host.
*
* Returns:
* A newly allocated mongoc_stream_t if successful; otherwise
* NULL and @error is set.
*
* Side effects:
* @error is set if return value is NULL.
*
*--------------------------------------------------------------------------
*/
static mongoc_stream_t *
mongoc_client_connect_tcp (const mongoc_uri_t *uri,
const mongoc_host_list_t *host,
bson_error_t *error)
{
mongoc_socket_t *sock = NULL;
struct addrinfo hints;
struct addrinfo *result, *rp;
int32_t connecttimeoutms;
int64_t expire_at;
char portstr[8];
int s;
ENTRY;
BSON_ASSERT (uri);
BSON_ASSERT (host);
connecttimeoutms = mongoc_uri_get_option_as_int32 (
uri, MONGOC_URI_CONNECTTIMEOUTMS, MONGOC_DEFAULT_CONNECTTIMEOUTMS);
BSON_ASSERT (connecttimeoutms);
bson_snprintf (portstr, sizeof portstr, "%hu", host->port);
memset (&hints, 0, sizeof hints);
hints.ai_family = host->family;
hints.ai_socktype = SOCK_STREAM;
hints.ai_flags = 0;
hints.ai_protocol = 0;
s = getaddrinfo (host->host, portstr, &hints, &result);
if (s != 0) {
mongoc_counter_dns_failure_inc ();
bson_set_error (error,
MONGOC_ERROR_STREAM,
MONGOC_ERROR_STREAM_NAME_RESOLUTION,
"Failed to resolve %s",
host->host);
RETURN (NULL);
}
mongoc_counter_dns_success_inc ();
for (rp = result; rp; rp = rp->ai_next) {
/*
* Create a new non-blocking socket.
*/
if (!(sock = mongoc_socket_new (
rp->ai_family, rp->ai_socktype, rp->ai_protocol))) {
continue;
}
/*
* Try to connect to the peer.
*/
expire_at = bson_get_monotonic_time () + (connecttimeoutms * 1000L);
if (0 !=
mongoc_socket_connect (
sock, rp->ai_addr, (mongoc_socklen_t) rp->ai_addrlen, expire_at)) {
char *errmsg;
char errmsg_buf[BSON_ERROR_BUFFER_SIZE];
char ip[255];
mongoc_socket_inet_ntop (rp, ip, sizeof ip);
errmsg = bson_strerror_r (
mongoc_socket_errno (sock), errmsg_buf, sizeof errmsg_buf);
MONGOC_WARNING ("Failed to connect to: %s:%d, error: %d, %s\n",
ip,
host->port,
mongoc_socket_errno (sock),
errmsg);
mongoc_socket_destroy (sock);
sock = NULL;
continue;
}
break;
}
if (!sock) {
bson_set_error (error,
MONGOC_ERROR_STREAM,
MONGOC_ERROR_STREAM_CONNECT,
"Failed to connect to target host: %s",
host->host_and_port);
freeaddrinfo (result);
RETURN (NULL);
}
freeaddrinfo (result);
return mongoc_stream_socket_new (sock);
}
/*
*--------------------------------------------------------------------------
*
* mongoc_client_connect_unix --
*
* Connect to a MongoDB server using a UNIX domain socket.
*
* Returns:
* A newly allocated mongoc_stream_t if successful; otherwise
* NULL and @error is set.
*
* Side effects:
* @error is set if return value is NULL.
*
*--------------------------------------------------------------------------
*/
static mongoc_stream_t *
mongoc_client_connect_unix (const mongoc_uri_t *uri,
const mongoc_host_list_t *host,
bson_error_t *error)
{
#ifdef _WIN32
ENTRY;
bson_set_error (error,
MONGOC_ERROR_STREAM,
MONGOC_ERROR_STREAM_CONNECT,
"UNIX domain sockets not supported on win32.");
RETURN (NULL);
#else
struct sockaddr_un saddr;
mongoc_socket_t *sock;
mongoc_stream_t *ret = NULL;
ENTRY;
BSON_ASSERT (uri);
BSON_ASSERT (host);
memset (&saddr, 0, sizeof saddr);
saddr.sun_family = AF_UNIX;
bson_snprintf (saddr.sun_path, sizeof saddr.sun_path - 1, "%s", host->host);
sock = mongoc_socket_new (AF_UNIX, SOCK_STREAM, 0);
if (sock == NULL) {
bson_set_error (error,
MONGOC_ERROR_STREAM,
MONGOC_ERROR_STREAM_SOCKET,
"Failed to create socket.");
RETURN (NULL);
}
if (-1 == mongoc_socket_connect (
sock, (struct sockaddr *) &saddr, sizeof saddr, -1)) {
mongoc_socket_destroy (sock);
bson_set_error (error,
MONGOC_ERROR_STREAM,
MONGOC_ERROR_STREAM_CONNECT,
"Failed to connect to UNIX domain socket.");
RETURN (NULL);
}
ret = mongoc_stream_socket_new (sock);
RETURN (ret);
#endif
}
/*
*--------------------------------------------------------------------------
*
* mongoc_client_default_stream_initiator --
*
* A mongoc_stream_initiator_t that will handle the various type
* of supported sockets by MongoDB including TCP and UNIX.
*
* Language binding authors may want to implement an alternate
* version of this method to use their native stream format.
*
* Returns:
* A mongoc_stream_t if successful; otherwise NULL and @error is set.
*
* Side effects:
* @error is set if return value is NULL.
*
*--------------------------------------------------------------------------
*/
mongoc_stream_t *
mongoc_client_default_stream_initiator (const mongoc_uri_t *uri,
const mongoc_host_list_t *host,
void *user_data,
bson_error_t *error)
{
mongoc_stream_t *base_stream = NULL;
#ifdef MONGOC_ENABLE_SSL
mongoc_client_t *client = (mongoc_client_t *) user_data;
const char *mechanism;
int32_t connecttimeoutms;
#endif
BSON_ASSERT (uri);
BSON_ASSERT (host);
#ifndef MONGOC_ENABLE_SSL
if (mongoc_uri_get_ssl (uri)) {
bson_set_error (error,
MONGOC_ERROR_CLIENT,
MONGOC_ERROR_CLIENT_NO_ACCEPTABLE_PEER,
"SSL is not enabled in this build of mongo-c-driver.");
return NULL;
}
#endif
switch (host->family) {
case AF_UNSPEC:
#if defined(AF_INET6)
case AF_INET6:
#endif
case AF_INET:
base_stream = mongoc_client_connect_tcp (uri, host, error);
break;
case AF_UNIX:
base_stream = mongoc_client_connect_unix (uri, host, error);
break;
default:
bson_set_error (error,
MONGOC_ERROR_STREAM,
MONGOC_ERROR_STREAM_INVALID_TYPE,
"Invalid address family: 0x%02x",
host->family);
break;
}
#ifdef MONGOC_ENABLE_SSL
if (base_stream) {
mechanism = mongoc_uri_get_auth_mechanism (uri);
if (client->use_ssl ||
(mechanism && (0 == strcmp (mechanism, "MONGODB-X509")))) {
mongoc_stream_t *original = base_stream;
base_stream = mongoc_stream_tls_new_with_hostname (
base_stream, host->host, &client->ssl_opts, true);
if (!base_stream) {
mongoc_stream_destroy (original);
bson_set_error (error,
MONGOC_ERROR_STREAM,
MONGOC_ERROR_STREAM_SOCKET,
"Failed initialize TLS state.");
return NULL;
}
connecttimeoutms = mongoc_uri_get_option_as_int32 (
uri, MONGOC_URI_CONNECTTIMEOUTMS, MONGOC_DEFAULT_CONNECTTIMEOUTMS);
if (!mongoc_stream_tls_handshake_block (
base_stream, host->host, connecttimeoutms, error)) {
mongoc_stream_destroy (base_stream);
return NULL;
}
}
}
#endif
return base_stream ? mongoc_stream_buffered_new (base_stream, 1024) : NULL;
}
/*
*--------------------------------------------------------------------------
*
* _mongoc_client_create_stream --
*
* INTERNAL API
*
* This function is used by the mongoc_cluster_t to initiate a
* new stream. This is done because cluster is private API and
* those using mongoc_client_t may need to override this process.
*
* This function calls the default initiator for new streams.
*
* Returns:
* A newly allocated mongoc_stream_t if successful; otherwise
* NULL and @error is set.
*
* Side effects:
* @error is set if return value is NULL.
*
*--------------------------------------------------------------------------
*/
mongoc_stream_t *
_mongoc_client_create_stream (mongoc_client_t *client,
const mongoc_host_list_t *host,
bson_error_t *error)
{
BSON_ASSERT (client);
BSON_ASSERT (host);
return client->initiator (client->uri, host, client->initiator_data, error);
}
/*
*--------------------------------------------------------------------------
*
* _mongoc_client_recv --
*
* Receives a RPC from a remote MongoDB cluster node.
*
* Returns:
* true if successful; otherwise false and @error is set.
*
* Side effects:
* @error is set if return value is false.
*
*--------------------------------------------------------------------------
*/
bool
_mongoc_client_recv (mongoc_client_t *client,
mongoc_rpc_t *rpc,
mongoc_buffer_t *buffer,
mongoc_server_stream_t *server_stream,
bson_error_t *error)
{
BSON_ASSERT (client);
BSON_ASSERT (rpc);
BSON_ASSERT (buffer);
BSON_ASSERT (server_stream);
if (!mongoc_cluster_try_recv (
&client->cluster, rpc, buffer, server_stream, error)) {
mongoc_topology_invalidate_server (
client->topology, server_stream->sd->id, error);
return false;
}
return true;
}
-/*
- *--------------------------------------------------------------------------
- *
- * _bson_to_error --
- *
- * A helper routine to convert a bson document to a bson_error_t.
- *
- * Returns:
- * None.
- *
- * Side effects:
- * @error is set if non-null.
- *
- *--------------------------------------------------------------------------
- */
-
-static void
-_bson_to_error (const bson_t *b, int32_t error_api_version, bson_error_t *error)
-{
- bson_iter_t iter;
- uint32_t code = 0;
- mongoc_error_domain_t domain =
- error_api_version >= MONGOC_ERROR_API_VERSION_2 ? MONGOC_ERROR_SERVER
- : MONGOC_ERROR_QUERY;
-
- BSON_ASSERT (b);
-
- if (!error) {
- return;
- }
-
- if (bson_iter_init_find (&iter, b, "code") &&
- BSON_ITER_HOLDS_INT32 (&iter)) {
- code = (uint32_t) bson_iter_int32 (&iter);
- }
-
- if (bson_iter_init_find (&iter, b, "$err") && BSON_ITER_HOLDS_UTF8 (&iter)) {
- bson_set_error (error, domain, code, "%s", bson_iter_utf8 (&iter, NULL));
- return;
- }
-
- if (bson_iter_init_find (&iter, b, "errmsg") &&
- BSON_ITER_HOLDS_UTF8 (&iter)) {
- bson_set_error (error, domain, code, "%s", bson_iter_utf8 (&iter, NULL));
- return;
- }
-
- bson_set_error (error,
- MONGOC_ERROR_QUERY,
- MONGOC_ERROR_QUERY_FAILURE,
- "An unknown error occurred on the server.");
-}
-
-
-/*
- *--------------------------------------------------------------------------
- *
- * mongoc_client_recv_gle --
- *
- * INTERNAL API
- *
- * This function is used to receive the next RPC from a cluster
- * node, expecting it to be the response to a getlasterror command.
- *
- * The RPC is parsed into @error if it is an error and false is
- * returned.
- *
- * If the operation was successful, true is returned.
- *
- * if @gle_doc is not NULL, then the actual response document for
- * the gle command will be stored as an out parameter. The caller
- * is responsible for freeing it in this case.
- *
- * Returns:
- * true if getlasterror was success; otherwise false.
- *
- * Side effects:
- * @gle_doc will be set if non NULL and a reply was received.
- * @error if return value is false, and @gle_doc is set to NULL.
- *
- *--------------------------------------------------------------------------
- */
-
-bool
-_mongoc_client_recv_gle (mongoc_client_t *client,
- mongoc_server_stream_t *server_stream,
- bson_t **gle_doc,
- bson_error_t *error)
-{
- mongoc_buffer_t buffer;
- mongoc_rpc_t rpc;
- bson_iter_t iter;
- bool ret = false;
- bson_t b;
-
- ENTRY;
-
- BSON_ASSERT (client);
- BSON_ASSERT (server_stream);
-
- if (gle_doc) {
- *gle_doc = NULL;
- }
-
- _mongoc_buffer_init (&buffer, NULL, 0, NULL, NULL);
-
- if (!mongoc_cluster_try_recv (
- &client->cluster, &rpc, &buffer, server_stream, error)) {
- mongoc_topology_invalidate_server (
- client->topology, server_stream->sd->id, error);
-
- GOTO (cleanup);
- }
-
- if (rpc.header.opcode != MONGOC_OPCODE_REPLY) {
- bson_set_error (error,
- MONGOC_ERROR_PROTOCOL,
- MONGOC_ERROR_PROTOCOL_INVALID_REPLY,
- "Received message other than OP_REPLY.");
- GOTO (cleanup);
- }
-
- if (_mongoc_rpc_reply_get_first (&rpc.reply, &b)) {
- if ((rpc.reply.flags & MONGOC_REPLY_QUERY_FAILURE)) {
- _bson_to_error (&b, client->error_api_version, error);
- bson_destroy (&b);
- GOTO (cleanup);
- }
-
- if (gle_doc) {
- *gle_doc = bson_copy (&b);
- }
-
- if (!bson_iter_init_find (&iter, &b, "ok") ||
- BSON_ITER_HOLDS_DOUBLE (&iter)) {
- if (bson_iter_double (&iter) == 0.0) {
- _bson_to_error (&b, client->error_api_version, error);
- }
- }
-
- bson_destroy (&b);
- ret = true;
- }
-
-cleanup:
- _mongoc_buffer_destroy (&buffer);
-
- RETURN (ret);
-}
-
/*
*--------------------------------------------------------------------------
*
* mongoc_client_new --
*
* Create a new mongoc_client_t using the URI provided.
*
* @uri should be a MongoDB URI string such as "mongodb://localhost/"
* More information on the format can be found at
* http://docs.mongodb.org/manual/reference/connection-string/
*
* Returns:
* A newly allocated mongoc_client_t or NULL if @uri_string is
* invalid.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
mongoc_client_t *
mongoc_client_new (const char *uri_string)
{
mongoc_topology_t *topology;
mongoc_client_t *client;
mongoc_uri_t *uri;
if (!uri_string) {
uri_string = "mongodb://127.0.0.1/";
}
if (!(uri = mongoc_uri_new (uri_string))) {
return NULL;
}
topology = mongoc_topology_new (uri, true);
- client = _mongoc_client_new_from_uri (uri, topology);
+ client = _mongoc_client_new_from_uri (topology);
if (!client) {
mongoc_topology_destroy (topology);
}
mongoc_uri_destroy (uri);
return client;
}
/*
*--------------------------------------------------------------------------
*
* mongoc_client_set_ssl_opts
*
* set ssl opts for a client
*
* Returns:
* Nothing
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
#ifdef MONGOC_ENABLE_SSL
void
mongoc_client_set_ssl_opts (mongoc_client_t *client,
const mongoc_ssl_opt_t *opts)
{
BSON_ASSERT (client);
BSON_ASSERT (opts);
_mongoc_ssl_opts_cleanup (&client->ssl_opts);
client->use_ssl = true;
_mongoc_ssl_opts_copy_to (opts, &client->ssl_opts);
if (client->topology->single_threaded) {
mongoc_topology_scanner_set_ssl_opts (client->topology->scanner,
&client->ssl_opts);
}
}
#endif
/*
*--------------------------------------------------------------------------
*
* mongoc_client_new_from_uri --
*
* Create a new mongoc_client_t for a mongoc_uri_t.
*
* Returns:
* A newly allocated mongoc_client_t.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
mongoc_client_t *
mongoc_client_new_from_uri (const mongoc_uri_t *uri)
{
mongoc_topology_t *topology;
topology = mongoc_topology_new (uri, true);
- return _mongoc_client_new_from_uri (uri, topology);
+ /* topology->uri may be different from uri: if this is a mongodb+srv:// URI
+ * then mongoc_topology_new has fetched SRV and TXT records and updated its
+ * uri from them.
+ */
+ return _mongoc_client_new_from_uri (topology);
}
/*
*--------------------------------------------------------------------------
*
* _mongoc_client_new_from_uri --
*
- * Create a new mongoc_client_t for a mongoc_uri_t and a given
- * topology object.
+ * Create a new mongoc_client_t for a given topology object.
*
* Returns:
* A newly allocated mongoc_client_t.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
mongoc_client_t *
-_mongoc_client_new_from_uri (const mongoc_uri_t *uri,
- mongoc_topology_t *topology)
+_mongoc_client_new_from_uri (mongoc_topology_t *topology)
{
mongoc_client_t *client;
const mongoc_read_prefs_t *read_prefs;
const mongoc_read_concern_t *read_concern;
const mongoc_write_concern_t *write_concern;
const char *appname;
- BSON_ASSERT (uri);
+ BSON_ASSERT (topology);
#ifndef MONGOC_ENABLE_SSL
- if (mongoc_uri_get_ssl (uri)) {
+ if (mongoc_uri_get_ssl (topology->uri)) {
MONGOC_ERROR ("Can't create SSL client, SSL not enabled in this build.");
return NULL;
}
#endif
client = (mongoc_client_t *) bson_malloc0 (sizeof *client);
- client->uri = mongoc_uri_copy (uri);
+ client->uri = mongoc_uri_copy (topology->uri);
client->initiator = mongoc_client_default_stream_initiator;
client->initiator_data = client;
client->topology = topology;
client->error_api_version = MONGOC_ERROR_API_VERSION_LEGACY;
client->error_api_set = false;
+ client->client_sessions = mongoc_set_new (8, NULL, NULL);
+ client->csid_rand_seed = (unsigned int) bson_get_monotonic_time ();
write_concern = mongoc_uri_get_write_concern (client->uri);
client->write_concern = mongoc_write_concern_copy (write_concern);
read_concern = mongoc_uri_get_read_concern (client->uri);
client->read_concern = mongoc_read_concern_copy (read_concern);
read_prefs = mongoc_uri_get_read_prefs_t (client->uri);
client->read_prefs = mongoc_read_prefs_copy (read_prefs);
appname =
mongoc_uri_get_option_as_utf8 (client->uri, MONGOC_URI_APPNAME, NULL);
if (appname && client->topology->single_threaded) {
/* the appname should have already been validated */
BSON_ASSERT (mongoc_client_set_appname (client, appname));
}
mongoc_cluster_init (&client->cluster, client->uri, client);
#ifdef MONGOC_ENABLE_SSL
client->use_ssl = false;
if (mongoc_uri_get_ssl (client->uri)) {
mongoc_ssl_opt_t ssl_opt = {0};
_mongoc_ssl_opts_from_uri (&ssl_opt, client->uri);
/* sets use_ssl = true */
mongoc_client_set_ssl_opts (client, &ssl_opt);
}
#endif
mongoc_counter_clients_active_inc ();
return client;
}
/*
*--------------------------------------------------------------------------
*
* mongoc_client_destroy --
*
* Destroys a mongoc_client_t and cleans up all resources associated
* with the client instance.
*
* Returns:
* None.
*
* Side effects:
* @client is destroyed.
*
*--------------------------------------------------------------------------
*/
void
mongoc_client_destroy (mongoc_client_t *client)
{
if (client) {
if (client->topology->single_threaded) {
+ _mongoc_client_end_sessions (client);
mongoc_topology_destroy (client->topology);
}
mongoc_write_concern_destroy (client->write_concern);
mongoc_read_concern_destroy (client->read_concern);
mongoc_read_prefs_destroy (client->read_prefs);
mongoc_cluster_destroy (&client->cluster);
mongoc_uri_destroy (client->uri);
+ mongoc_set_destroy (client->client_sessions);
#ifdef MONGOC_ENABLE_SSL
_mongoc_ssl_opts_cleanup (&client->ssl_opts);
#endif
bson_free (client);
mongoc_counter_clients_active_dec ();
mongoc_counter_clients_disposed_inc ();
}
}
/*
*--------------------------------------------------------------------------
*
* mongoc_client_get_uri --
*
* Fetch the URI used for @client.
*
* Returns:
* A mongoc_uri_t that should not be modified or freed.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
const mongoc_uri_t *
mongoc_client_get_uri (const mongoc_client_t *client)
{
BSON_ASSERT (client);
return client->uri;
}
+/*
+ *--------------------------------------------------------------------------
+ *
+ * mongoc_client_start_session --
+ *
+ * Creates a structure to communicate in a session over @client.
+ *
+ * This structure should be freed when the caller is done with it
+ * using mongoc_client_session_destroy().
+ *
+ * Returns:
+ * A newly allocated mongoc_client_session_t.
+ *
+ * Side effects:
+ * None.
+ *
+ *--------------------------------------------------------------------------
+ */
+
+mongoc_client_session_t *
+mongoc_client_start_session (mongoc_client_t *client,
+ const mongoc_session_opt_t *opts,
+ bson_error_t *error)
+{
+ mongoc_server_session_t *ss;
+ mongoc_client_session_t *cs;
+ uint32_t csid;
+
+ ENTRY;
+
+ ss = _mongoc_client_pop_server_session (client, error);
+ if (!ss) {
+ RETURN (NULL);
+ }
+
+ /* get a random internal id for the session, retrying on collision */
+ do {
+ csid = (uint32_t) _mongoc_rand_simple (&client->csid_rand_seed);
+ } while (mongoc_set_get (client->client_sessions, csid));
+
+ cs = _mongoc_client_session_new (client, ss, opts, csid);
+
+ /* remember session so if we see its client_session_id in a command, we can
+ * find its lsid and clusterTime */
+ mongoc_set_add (client->client_sessions, csid, cs);
+
+ RETURN (cs);
+}
+
+
/*
*--------------------------------------------------------------------------
*
* mongoc_client_get_database --
*
* Fetches a newly allocated database structure to communicate with
* a database over @client.
*
* @database should be a db name such as "test".
*
* This structure should be freed when the caller is done with it
* using mongoc_database_destroy().
*
* Returns:
* A newly allocated mongoc_database_t.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
mongoc_database_t *
mongoc_client_get_database (mongoc_client_t *client, const char *name)
{
BSON_ASSERT (client);
BSON_ASSERT (name);
return _mongoc_database_new (client,
name,
client->read_prefs,
client->read_concern,
client->write_concern);
}
/*
*--------------------------------------------------------------------------
*
* mongoc_client_get_default_database --
*
* Get the database named in the MongoDB connection URI, or NULL
* if none was specified in the URI.
*
* This structure should be freed when the caller is done with it
* using mongoc_database_destroy().
*
* Returns:
* A newly allocated mongoc_database_t or NULL.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
mongoc_database_t *
mongoc_client_get_default_database (mongoc_client_t *client)
{
const char *db;
BSON_ASSERT (client);
db = mongoc_uri_get_database (client->uri);
if (db) {
return mongoc_client_get_database (client, db);
}
return NULL;
}
/*
*--------------------------------------------------------------------------
*
* mongoc_client_get_collection --
*
* This function returns a newly allocated collection structure.
*
* @db should be the name of the database, such as "test".
* @collection should be the name of the collection such as "test".
*
* The above would result in the namespace "test.test".
*
* You should free this structure when you are done with it using
* mongoc_collection_destroy().
*
* Returns:
* A newly allocated mongoc_collection_t that should be freed with
* mongoc_collection_destroy().
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
mongoc_collection_t *
mongoc_client_get_collection (mongoc_client_t *client,
const char *db,
const char *collection)
{
BSON_ASSERT (client);
BSON_ASSERT (db);
BSON_ASSERT (collection);
return _mongoc_collection_new (client,
db,
collection,
client->read_prefs,
client->read_concern,
client->write_concern);
}
/*
*--------------------------------------------------------------------------
*
* mongoc_client_get_gridfs --
*
* This function returns a newly allocated collection structure.
*
* @db should be the name of the database, such as "test".
*
* @prefix optional prefix for GridFS collection names, or NULL. Default
* is "fs", thus the default collection names for GridFS are "fs.files"
* and "fs.chunks".
*
* Returns:
* A newly allocated mongoc_gridfs_t that should be freed with
* mongoc_gridfs_destroy().
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
mongoc_gridfs_t *
mongoc_client_get_gridfs (mongoc_client_t *client,
const char *db,
const char *prefix,
bson_error_t *error)
{
BSON_ASSERT (client);
BSON_ASSERT (db);
if (!prefix) {
prefix = "fs";
}
return _mongoc_gridfs_new (client, db, prefix, error);
}
/*
*--------------------------------------------------------------------------
*
* mongoc_client_get_write_concern --
*
* Fetches the default write concern for @client.
*
* Returns:
* A mongoc_write_concern_t that should not be modified or freed.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
const mongoc_write_concern_t *
mongoc_client_get_write_concern (const mongoc_client_t *client)
{
BSON_ASSERT (client);
return client->write_concern;
}
/*
*--------------------------------------------------------------------------
*
* mongoc_client_set_write_concern --
*
* Sets the default write concern for @client.
*
* Returns:
* None.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
void
mongoc_client_set_write_concern (mongoc_client_t *client,
const mongoc_write_concern_t *write_concern)
{
BSON_ASSERT (client);
if (write_concern != client->write_concern) {
if (client->write_concern) {
mongoc_write_concern_destroy (client->write_concern);
}
client->write_concern = write_concern
? mongoc_write_concern_copy (write_concern)
: mongoc_write_concern_new ();
}
}
/*
*--------------------------------------------------------------------------
*
* mongoc_client_get_read_concern --
*
* Fetches the default read concern for @client.
*
* Returns:
* A mongoc_read_concern_t that should not be modified or freed.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
const mongoc_read_concern_t *
mongoc_client_get_read_concern (const mongoc_client_t *client)
{
BSON_ASSERT (client);
return client->read_concern;
}
/*
*--------------------------------------------------------------------------
*
* mongoc_client_set_read_concern --
*
* Sets the default read concern for @client.
*
* Returns:
* None.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
void
mongoc_client_set_read_concern (mongoc_client_t *client,
const mongoc_read_concern_t *read_concern)
{
BSON_ASSERT (client);
if (read_concern != client->read_concern) {
if (client->read_concern) {
mongoc_read_concern_destroy (client->read_concern);
}
client->read_concern = read_concern
? mongoc_read_concern_copy (read_concern)
: mongoc_read_concern_new ();
}
}
/*
*--------------------------------------------------------------------------
*
* mongoc_client_get_read_prefs --
*
* Fetch the default read preferences for @client.
*
* Returns:
* None.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
const mongoc_read_prefs_t *
mongoc_client_get_read_prefs (const mongoc_client_t *client)
{
BSON_ASSERT (client);
return client->read_prefs;
}
/*
*--------------------------------------------------------------------------
*
* mongoc_client_set_read_prefs --
*
* Set the default read preferences for @client.
*
* Returns:
* None.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
void
mongoc_client_set_read_prefs (mongoc_client_t *client,
const mongoc_read_prefs_t *read_prefs)
{
BSON_ASSERT (client);
if (read_prefs != client->read_prefs) {
if (client->read_prefs) {
mongoc_read_prefs_destroy (client->read_prefs);
}
client->read_prefs = read_prefs
? mongoc_read_prefs_copy (read_prefs)
: mongoc_read_prefs_new (MONGOC_READ_PRIMARY);
}
}
mongoc_cursor_t *
mongoc_client_command (mongoc_client_t *client,
const char *db_name,
mongoc_query_flags_t flags,
uint32_t skip,
uint32_t limit,
uint32_t batch_size,
const bson_t *query,
const bson_t *fields,
const mongoc_read_prefs_t *read_prefs)
{
char ns[MONGOC_NAMESPACE_MAX];
mongoc_cursor_t *cursor;
BSON_ASSERT (client);
BSON_ASSERT (db_name);
BSON_ASSERT (query);
/*
* Allow a caller to provide a fully qualified namespace
*/
if (NULL == strstr (db_name, "$cmd")) {
bson_snprintf (ns, sizeof ns, "%s.$cmd", db_name);
db_name = ns;
}
/* flags, skip, limit, batch_size, fields are unused */
cursor = _mongoc_cursor_new_with_opts (
- client, db_name, true /* is_command */, query, NULL, read_prefs, NULL);
+ client, db_name, false /* is_find */, query, NULL, read_prefs, NULL);
return cursor;
}
+static bool
+_mongoc_client_retryable_write_command_with_stream (
+ mongoc_client_t *client,
+ mongoc_cmd_parts_t *parts,
+ mongoc_server_stream_t *server_stream,
+ bson_t *reply,
+ bson_error_t *error)
+{
+ mongoc_server_stream_t *retry_server_stream = NULL;
+ bson_iter_t txn_number_iter;
+ bool is_retryable = true;
+ bool ret;
+
+ ENTRY;
+
+ BSON_ASSERT (parts->is_retryable_write);
+
+ /* increment the transaction number for the first attempt of each retryable
+ * write command */
+ BSON_ASSERT (bson_iter_init_find (
+ &txn_number_iter, parts->assembled.command, "txnNumber"));
+ bson_iter_overwrite_int64 (
+ &txn_number_iter, ++parts->assembled.session->server_session->txn_number);
+
+retry:
+ ret = mongoc_cluster_run_command_monitored (
+ &client->cluster, &parts->assembled, reply, error);
+
+ /* If a retryable error is encountered and the write is retryable, select
+ * a new writable stream and retry. If server selection fails or the selected
+ * server does not support retryable writes, fall through and allow the
+ * original error to be reported. */
+ if (!ret && is_retryable &&
+ (error->domain == MONGOC_ERROR_STREAM ||
+ mongoc_cluster_is_not_master_error (error))) {
+ bson_error_t ignored_error;
+
+ /* each write command may be retried at most once */
+ is_retryable = false;
+
+ if (retry_server_stream) {
+ mongoc_server_stream_cleanup (retry_server_stream);
+ }
+
+ retry_server_stream =
+ mongoc_cluster_stream_for_writes (&client->cluster, &ignored_error);
+
+ if (retry_server_stream && retry_server_stream->sd->max_wire_version >=
+ WIRE_VERSION_RETRY_WRITES) {
+ parts->assembled.server_stream = retry_server_stream;
+ GOTO (retry);
+ }
+ }
+
+ if (retry_server_stream) {
+ mongoc_server_stream_cleanup (retry_server_stream);
+ }
+
+ RETURN (ret);
+}
+
+
static bool
_mongoc_client_command_with_stream (mongoc_client_t *client,
mongoc_cmd_parts_t *parts,
mongoc_server_stream_t *server_stream,
bson_t *reply,
bson_error_t *error)
{
ENTRY;
parts->assembled.operation_id = ++client->cluster.operation_id;
+ if (!mongoc_cmd_parts_assemble (parts, server_stream, error)) {
+ _mongoc_bson_init_if_set (reply);
+ return false;
+ };
+
+ if (parts->is_retryable_write) {
+ RETURN (_mongoc_client_retryable_write_command_with_stream (
+ client, parts, server_stream, reply, error));
+ }
+
RETURN (mongoc_cluster_run_command_monitored (
- &client->cluster, parts, server_stream, reply, error));
+ &client->cluster, &parts->assembled, reply, error));
}
bool
mongoc_client_command_simple (mongoc_client_t *client,
const char *db_name,
const bson_t *command,
const mongoc_read_prefs_t *read_prefs,
bson_t *reply,
bson_error_t *error)
{
mongoc_cluster_t *cluster;
mongoc_server_stream_t *server_stream = NULL;
mongoc_cmd_parts_t parts;
bool ret;
ENTRY;
BSON_ASSERT (client);
BSON_ASSERT (db_name);
BSON_ASSERT (command);
if (!_mongoc_read_prefs_validate (read_prefs, error)) {
RETURN (false);
}
cluster = &client->cluster;
- mongoc_cmd_parts_init (&parts, db_name, MONGOC_QUERY_NONE, command);
+ mongoc_cmd_parts_init (&parts, client, db_name, MONGOC_QUERY_NONE, command);
parts.read_prefs = read_prefs;
/* Server Selection Spec: "The generic command method has a default read
* preference of mode 'primary'. The generic command method MUST ignore any
* default read preference from client, database or collection
* configuration. The generic command method SHOULD allow an optional read
* preference argument."
*/
server_stream = mongoc_cluster_stream_for_reads (cluster, read_prefs, error);
if (server_stream) {
ret = _mongoc_client_command_with_stream (
client, &parts, server_stream, reply, error);
} else {
if (reply) {
bson_init (reply);
}
ret = false;
}
mongoc_cmd_parts_cleanup (&parts);
mongoc_server_stream_cleanup (server_stream);
RETURN (ret);
}
/*
*--------------------------------------------------------------------------
*
* _mongoc_client_command_with_opts --
*
* Execute a command on the server. If mode is MONGOC_CMD_READ or
* MONGOC_CMD_RW, then read concern is applied from @opts, or else from
* @default_rc, and read preferences are applied from @default_prefs.
* If mode is MONGOC_CMD_WRITE or MONGOC_CMD_RW, then write concern is
* applied from @opts if present, or else from @default_wc.
*
+ * If mode is MONGOC_CMD_RAW, then read concern and write concern are
+ * applied from @opts only. Read preferences are applied from
+ * @read_prefs.
+ *
* The mongoc_client_t's read preference, read concern, and write concern
* are *NOT* applied.
*
* Returns:
* Success or failure.
* A write concern timeout or write concern error is considered a failure.
*
* Side effects:
* @reply is always initialized.
* @error is filled out if the command fails.
*
*--------------------------------------------------------------------------
*/
bool
_mongoc_client_command_with_opts (mongoc_client_t *client,
const char *db_name,
const bson_t *command,
mongoc_command_mode_t mode,
const bson_t *opts,
mongoc_query_flags_t flags,
const mongoc_read_prefs_t *default_prefs,
mongoc_read_concern_t *default_rc,
mongoc_write_concern_t *default_wc,
bson_t *reply,
bson_error_t *error)
{
mongoc_cmd_parts_t parts;
+ const char *command_name;
mongoc_server_stream_t *server_stream = NULL;
mongoc_cluster_t *cluster;
bson_t reply_local;
bson_t *reply_ptr;
uint32_t server_id;
bool ret = false;
ENTRY;
BSON_ASSERT (client);
BSON_ASSERT (db_name);
BSON_ASSERT (command);
- mongoc_cmd_parts_init (&parts, db_name, flags, command);
+ mongoc_cmd_parts_init (&parts, client, db_name, flags, command);
+ parts.is_read_command = (mode & MONGOC_CMD_READ);
parts.is_write_command = (mode & MONGOC_CMD_WRITE);
+ command_name = _mongoc_get_command_name (command);
+
+ if (!command_name) {
+ bson_set_error (error,
+ MONGOC_ERROR_COMMAND,
+ MONGOC_ERROR_COMMAND_INVALID_ARG,
+ "Empty command document");
+ GOTO (err);
+ }
+
reply_ptr = reply ? reply : &reply_local;
- if (mode == MONGOC_CMD_READ) {
+ if (mode == MONGOC_CMD_READ || mode == MONGOC_CMD_RAW) {
/* NULL read pref is ok */
if (!_mongoc_read_prefs_validate (default_prefs, error)) {
GOTO (err);
}
parts.read_prefs = default_prefs;
} else {
/* this is a command that writes */
default_prefs = NULL;
}
cluster = &client->cluster;
if (!_mongoc_get_server_id_from_opts (opts,
MONGOC_ERROR_COMMAND,
MONGOC_ERROR_COMMAND_INVALID_ARG,
&server_id,
error)) {
GOTO (err);
}
if (server_id) {
/* "serverId" passed in opts */
server_stream = mongoc_cluster_stream_for_server (
cluster, server_id, true /* reconnect ok */, error);
if (server_stream && server_stream->sd->type != MONGOC_SERVER_MONGOS) {
parts.user_query_flags |= MONGOC_QUERY_SLAVE_OK;
}
} else if (parts.is_write_command) {
server_stream = mongoc_cluster_stream_for_writes (cluster, error);
} else {
server_stream =
mongoc_cluster_stream_for_reads (cluster, default_prefs, error);
}
if (server_stream) {
+ int32_t wire_version = server_stream->sd->max_wire_version;
bson_iter_t iter;
if (opts && bson_iter_init (&iter, opts)) {
- if (!mongoc_cmd_parts_append_opts (&parts, &iter,
- server_stream->sd->max_wire_version,
- error)) {
+ if (!mongoc_cmd_parts_append_opts (
+ &parts, &iter, wire_version, error)) {
GOTO (err);
}
}
/* use default write concern unless it's in opts */
if ((mode & MONGOC_CMD_WRITE) &&
- server_stream->sd->max_wire_version >=
- WIRE_VERSION_CMD_WRITE_CONCERN &&
!mongoc_write_concern_is_default (default_wc) &&
(!opts || !bson_has_field (opts, "writeConcern"))) {
- bson_append_document (&parts.extra,
- "writeConcern",
- 12,
- _mongoc_write_concern_get_bson (default_wc));
+ bool is_fam = !strcasecmp (command_name, "findandmodify");
+
+ if ((is_fam && wire_version >= WIRE_VERSION_FAM_WRITE_CONCERN) ||
+ (!is_fam && wire_version >= WIRE_VERSION_CMD_WRITE_CONCERN)) {
+ bson_append_document (&parts.extra,
+ "writeConcern",
+ 12,
+ _mongoc_write_concern_get_bson (default_wc));
+ }
}
/* use read prefs and read concern for read commands, unless in opts */
if ((mode & MONGOC_CMD_READ) &&
- server_stream->sd->max_wire_version >= WIRE_VERSION_READ_CONCERN &&
+ wire_version >= WIRE_VERSION_READ_CONCERN &&
!mongoc_read_concern_is_default (default_rc) &&
(!opts || !bson_has_field (opts, "readConcern"))) {
bson_append_document (&parts.extra,
"readConcern",
11,
_mongoc_read_concern_get_bson (default_rc));
}
ret = _mongoc_client_command_with_stream (
client, &parts, server_stream, reply_ptr, error);
if (ret && (mode & MONGOC_CMD_WRITE)) {
ret = !_mongoc_parse_wc_err (reply_ptr, error);
}
if (reply_ptr == &reply_local) {
bson_destroy (reply_ptr);
}
GOTO (done);
}
err:
if (reply) {
bson_init (reply);
}
done:
if (server_stream) {
mongoc_server_stream_cleanup (server_stream);
}
mongoc_cmd_parts_cleanup (&parts);
RETURN (ret);
}
bool
mongoc_client_read_command_with_opts (mongoc_client_t *client,
const char *db_name,
const bson_t *command,
const mongoc_read_prefs_t *read_prefs,
const bson_t *opts,
bson_t *reply,
bson_error_t *error)
{
return _mongoc_client_command_with_opts (
client,
db_name,
command,
MONGOC_CMD_READ,
opts,
MONGOC_QUERY_NONE,
COALESCE (read_prefs, client->read_prefs),
client->read_concern,
client->write_concern,
reply,
error);
}
bool
mongoc_client_write_command_with_opts (mongoc_client_t *client,
const char *db_name,
const bson_t *command,
const bson_t *opts,
bson_t *reply,
bson_error_t *error)
{
return _mongoc_client_command_with_opts (client,
db_name,
command,
MONGOC_CMD_WRITE,
opts,
MONGOC_QUERY_NONE,
client->read_prefs,
client->read_concern,
client->write_concern,
reply,
error);
}
bool
mongoc_client_read_write_command_with_opts (
mongoc_client_t *client,
const char *db_name,
const bson_t *command,
const mongoc_read_prefs_t *read_prefs /* IGNORED */,
const bson_t *opts,
bson_t *reply,
bson_error_t *error)
{
return _mongoc_client_command_with_opts (
client,
db_name,
command,
MONGOC_CMD_RW,
opts,
MONGOC_QUERY_NONE,
COALESCE (read_prefs, client->read_prefs),
client->read_concern,
client->write_concern,
reply,
error);
}
+bool
+mongoc_client_command_with_opts (mongoc_client_t *client,
+ const char *db_name,
+ const bson_t *command,
+ const mongoc_read_prefs_t *read_prefs,
+ const bson_t *opts,
+ bson_t *reply,
+ bson_error_t *error)
+{
+ return _mongoc_client_command_with_opts (client,
+ db_name,
+ command,
+ MONGOC_CMD_RAW,
+ opts,
+ MONGOC_QUERY_NONE,
+ read_prefs,
+ client->read_concern,
+ client->write_concern,
+ reply,
+ error);
+}
+
+
bool
mongoc_client_command_simple_with_server_id (
mongoc_client_t *client,
const char *db_name,
const bson_t *command,
const mongoc_read_prefs_t *read_prefs,
uint32_t server_id,
bson_t *reply,
bson_error_t *error)
{
mongoc_server_stream_t *server_stream;
mongoc_cmd_parts_t parts;
bool ret;
ENTRY;
BSON_ASSERT (client);
BSON_ASSERT (db_name);
BSON_ASSERT (command);
if (!_mongoc_read_prefs_validate (read_prefs, error)) {
RETURN (false);
}
- mongoc_cmd_parts_init (&parts, db_name, MONGOC_QUERY_NONE, command);
- parts.read_prefs = read_prefs;
-
server_stream = mongoc_cluster_stream_for_server (
&client->cluster, server_id, true /* reconnect ok */, error);
if (server_stream) {
+ mongoc_cmd_parts_init (
+ &parts, client, db_name, MONGOC_QUERY_NONE, command);
+ parts.read_prefs = read_prefs;
+
ret = _mongoc_client_command_with_stream (
client, &parts, server_stream, reply, error);
+ mongoc_cmd_parts_cleanup (&parts);
mongoc_server_stream_cleanup (server_stream);
RETURN (ret);
} else {
if (reply) {
bson_init (reply);
}
RETURN (false);
}
}
static void
_mongoc_client_prepare_killcursors_command (int64_t cursor_id,
const char *collection,
bson_t *command)
{
bson_t child;
bson_append_utf8 (command, "killCursors", 11, collection, -1);
bson_append_array_begin (command, "cursors", 7, &child);
bson_append_int64 (&child, "0", 1, cursor_id);
bson_append_array_end (command, &child);
}
void
_mongoc_client_kill_cursor (mongoc_client_t *client,
uint32_t server_id,
int64_t cursor_id,
int64_t operation_id,
const char *db,
- const char *collection)
+ const char *collection,
+ mongoc_client_session_t *cs)
{
mongoc_server_stream_t *server_stream;
ENTRY;
BSON_ASSERT (client);
BSON_ASSERT (cursor_id);
/* don't attempt reconnect if server unavailable, and ignore errors */
server_stream = mongoc_cluster_stream_for_server (
&client->cluster, server_id, false /* reconnect_ok */, NULL /* error */);
if (!server_stream) {
return;
}
if (db && collection &&
server_stream->sd->max_wire_version >= WIRE_VERSION_KILLCURSORS_CMD) {
_mongoc_client_killcursors_command (
- &client->cluster, server_stream, cursor_id, db, collection);
+ &client->cluster, server_stream, cursor_id, db, collection, cs);
} else {
_mongoc_client_op_killcursors (&client->cluster,
server_stream,
cursor_id,
operation_id,
db,
collection);
}
mongoc_server_stream_cleanup (server_stream);
EXIT;
}
static void
_mongoc_client_monitor_op_killcursors (mongoc_cluster_t *cluster,
mongoc_server_stream_t *server_stream,
int64_t cursor_id,
int64_t operation_id,
const char *db,
const char *collection)
{
bson_t doc;
mongoc_client_t *client;
mongoc_apm_command_started_t event;
ENTRY;
client = cluster->client;
if (!client->apm_callbacks.started) {
return;
}
bson_init (&doc);
_mongoc_client_prepare_killcursors_command (cursor_id, collection, &doc);
mongoc_apm_command_started_init (&event,
&doc,
db,
"killCursors",
cluster->request_id,
operation_id,
&server_stream->sd->host,
server_stream->sd->id,
client->apm_context);
client->apm_callbacks.started (&event);
mongoc_apm_command_started_cleanup (&event);
bson_destroy (&doc);
EXIT;
}
static void
_mongoc_client_monitor_op_killcursors_succeeded (
mongoc_cluster_t *cluster,
int64_t duration,
mongoc_server_stream_t *server_stream,
int64_t cursor_id,
int64_t operation_id)
{
mongoc_client_t *client;
bson_t doc;
bson_t cursors_unknown;
mongoc_apm_command_succeeded_t event;
ENTRY;
client = cluster->client;
if (!client->apm_callbacks.succeeded) {
EXIT;
}
/* fake server reply to killCursors command: {ok: 1, cursorsUnknown: [42]} */
bson_init (&doc);
bson_append_int32 (&doc, "ok", 2, 1);
bson_append_array_begin (&doc, "cursorsUnknown", 14, &cursors_unknown);
bson_append_int64 (&cursors_unknown, "0", 1, cursor_id);
bson_append_array_end (&doc, &cursors_unknown);
mongoc_apm_command_succeeded_init (&event,
duration,
&doc,
"killCursors",
cluster->request_id,
operation_id,
&server_stream->sd->host,
server_stream->sd->id,
client->apm_context);
client->apm_callbacks.succeeded (&event);
mongoc_apm_command_succeeded_cleanup (&event);
bson_destroy (&doc);
}
static void
_mongoc_client_monitor_op_killcursors_failed (
mongoc_cluster_t *cluster,
int64_t duration,
mongoc_server_stream_t *server_stream,
const bson_error_t *error,
int64_t operation_id)
{
mongoc_client_t *client;
mongoc_apm_command_failed_t event;
ENTRY;
client = cluster->client;
if (!client->apm_callbacks.failed) {
EXIT;
}
mongoc_apm_command_failed_init (&event,
duration,
"killCursors",
error,
cluster->request_id,
operation_id,
&server_stream->sd->host,
server_stream->sd->id,
client->apm_context);
client->apm_callbacks.failed (&event);
mongoc_apm_command_failed_cleanup (&event);
}
static void
_mongoc_client_op_killcursors (mongoc_cluster_t *cluster,
mongoc_server_stream_t *server_stream,
int64_t cursor_id,
int64_t operation_id,
const char *db,
const char *collection)
{
int64_t started;
mongoc_rpc_t rpc = {{0}};
bson_error_t error;
bool has_ns;
bool r;
/* called by old mongoc_client_kill_cursor without db/collection? */
has_ns = (db && collection);
started = bson_get_monotonic_time ();
++cluster->request_id;
rpc.header.msg_len = 0;
rpc.header.request_id = cluster->request_id;
rpc.header.response_to = 0;
rpc.header.opcode = MONGOC_OPCODE_KILL_CURSORS;
rpc.kill_cursors.zero = 0;
rpc.kill_cursors.cursors = &cursor_id;
rpc.kill_cursors.n_cursors = 1;
if (has_ns) {
_mongoc_client_monitor_op_killcursors (
cluster, server_stream, cursor_id, operation_id, db, collection);
}
- r = mongoc_cluster_sendv_to_server (
- cluster, &rpc, server_stream, NULL, &error);
+ r = mongoc_cluster_legacy_rpc_sendv_to_server (
+ cluster, &rpc, server_stream, &error);
if (has_ns) {
if (r) {
_mongoc_client_monitor_op_killcursors_succeeded (
cluster,
bson_get_monotonic_time () - started,
server_stream,
cursor_id,
operation_id);
} else {
_mongoc_client_monitor_op_killcursors_failed (
cluster,
bson_get_monotonic_time () - started,
server_stream,
&error,
operation_id);
}
}
}
static void
_mongoc_client_killcursors_command (mongoc_cluster_t *cluster,
mongoc_server_stream_t *server_stream,
int64_t cursor_id,
const char *db,
- const char *collection)
+ const char *collection,
+ mongoc_client_session_t *cs)
{
bson_t command = BSON_INITIALIZER;
mongoc_cmd_parts_t parts;
ENTRY;
_mongoc_client_prepare_killcursors_command (cursor_id, collection, &command);
- mongoc_cmd_parts_init (&parts, db, MONGOC_QUERY_SLAVE_OK, &command);
+ mongoc_cmd_parts_init (
+ &parts, cluster->client, db, MONGOC_QUERY_SLAVE_OK, &command);
parts.assembled.operation_id = ++cluster->operation_id;
+ mongoc_cmd_parts_set_session (&parts, cs);
- /* Find, getMore And killCursors Commands Spec: "The result from the
- * killCursors command MAY be safely ignored."
- */
- mongoc_cluster_run_command_monitored (
- cluster, &parts, server_stream, NULL, NULL);
+ if (mongoc_cmd_parts_assemble (&parts, server_stream, NULL)) {
+ /* Find, getMore And killCursors Commands Spec: "The result from the
+ * killCursors command MAY be safely ignored."
+ */
+ mongoc_cluster_run_command_monitored (
+ cluster, &parts.assembled, NULL, NULL);
+ }
mongoc_cmd_parts_cleanup (&parts);
bson_destroy (&command);
EXIT;
}
/*
*--------------------------------------------------------------------------
*
* mongoc_client_kill_cursor --
*
* Destroy a cursor on the server.
*
* NOTE: this is only reliable when connected to a single mongod or
* mongos. If connected to a replica set, the driver attempts to
* kill the cursor on the primary. If connected to multiple mongoses
* the kill-cursors message is sent to a *random* mongos.
*
* If no primary, mongos, or standalone server is known, return
* without attempting to reconnect.
*
* Returns:
* None.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
void
mongoc_client_kill_cursor (mongoc_client_t *client, int64_t cursor_id)
{
mongoc_topology_t *topology;
mongoc_server_description_t *selected_server;
mongoc_read_prefs_t *read_prefs;
bson_error_t error;
uint32_t server_id = 0;
topology = client->topology;
read_prefs = mongoc_read_prefs_new (MONGOC_READ_PRIMARY);
mongoc_mutex_lock (&topology->mutex);
if (!mongoc_topology_compatible (&topology->description, NULL, &error)) {
MONGOC_ERROR ("Could not kill cursor: %s", error.message);
mongoc_mutex_unlock (&topology->mutex);
mongoc_read_prefs_destroy (read_prefs);
return;
}
/* see if there's a known writable server - do no I/O or retries */
selected_server =
mongoc_topology_description_select (&topology->description,
MONGOC_SS_WRITE,
read_prefs,
topology->local_threshold_msec);
if (selected_server) {
server_id = selected_server->id;
}
mongoc_mutex_unlock (&topology->mutex);
if (server_id) {
_mongoc_client_kill_cursor (client,
server_id,
cursor_id,
0 /* operation_id */,
NULL /* db */,
- NULL /* collection */);
+ NULL /* collection */,
+ NULL /* session */);
} else {
MONGOC_INFO ("No server available for mongoc_client_kill_cursor");
}
mongoc_read_prefs_destroy (read_prefs);
}
char **
mongoc_client_get_database_names (mongoc_client_t *client, bson_error_t *error)
+{
+ return mongoc_client_get_database_names_with_opts (client, NULL, error);
+}
+
+
+char **
+mongoc_client_get_database_names_with_opts (mongoc_client_t *client,
+ const bson_t *opts,
+ bson_error_t *error)
{
bson_iter_t iter;
const char *name;
char **ret = NULL;
int i = 0;
mongoc_cursor_t *cursor;
const bson_t *doc;
+ bson_t cmd = BSON_INITIALIZER;
BSON_ASSERT (client);
+ BSON_APPEND_INT32 (&cmd, "listDatabases", 1);
+ BSON_APPEND_BOOL (&cmd, "nameOnly", true);
- cursor = mongoc_client_find_databases (client, error);
+ /* ignore client read prefs */
+ cursor = _mongoc_cursor_new_with_opts (
+ client, "admin", false /* is_find */, NULL, opts, NULL, NULL);
+
+ _mongoc_cursor_array_init (cursor, &cmd, "databases");
+ bson_destroy (&cmd);
while (mongoc_cursor_next (cursor, &doc)) {
if (bson_iter_init (&iter, doc) && bson_iter_find (&iter, "name") &&
BSON_ITER_HOLDS_UTF8 (&iter) &&
(name = bson_iter_utf8 (&iter, NULL))) {
ret = (char **) bson_realloc (ret, sizeof (char *) * (i + 2));
ret[i] = bson_strdup (name);
ret[++i] = NULL;
}
}
if (!ret && !mongoc_cursor_error (cursor, error)) {
ret = (char **) bson_malloc0 (sizeof (void *));
}
mongoc_cursor_destroy (cursor);
return ret;
}
mongoc_cursor_t *
mongoc_client_find_databases (mongoc_client_t *client, bson_error_t *error)
+{
+ /* existing bug in this deprecated API: error pointer is unused */
+ return mongoc_client_find_databases_with_opts (client, NULL);
+}
+
+
+mongoc_cursor_t *
+mongoc_client_find_databases_with_opts (mongoc_client_t *client,
+ const bson_t *opts)
{
bson_t cmd = BSON_INITIALIZER;
mongoc_cursor_t *cursor;
BSON_ASSERT (client);
BSON_APPEND_INT32 (&cmd, "listDatabases", 1);
/* ignore client read prefs */
cursor = _mongoc_cursor_new_with_opts (
- client, "admin", true /* is_command */, NULL, NULL, NULL, NULL);
+ client, "admin", false /* is_find */, NULL, opts, NULL, NULL);
_mongoc_cursor_array_init (cursor, &cmd, "databases");
bson_destroy (&cmd);
return cursor;
}
int32_t
mongoc_client_get_max_message_size (mongoc_client_t *client) /* IN */
{
BSON_ASSERT (client);
return mongoc_cluster_get_max_msg_size (&client->cluster);
}
int32_t
mongoc_client_get_max_bson_size (mongoc_client_t *client) /* IN */
{
BSON_ASSERT (client);
return mongoc_cluster_get_max_bson_obj_size (&client->cluster);
}
bool
mongoc_client_get_server_status (mongoc_client_t *client, /* IN */
mongoc_read_prefs_t *read_prefs, /* IN */
bson_t *reply, /* OUT */
bson_error_t *error) /* OUT */
{
bson_t cmd = BSON_INITIALIZER;
bool ret = false;
BSON_ASSERT (client);
BSON_APPEND_INT32 (&cmd, "serverStatus", 1);
ret = mongoc_client_command_simple (
client, "admin", &cmd, read_prefs, reply, error);
bson_destroy (&cmd);
return ret;
}
void
mongoc_client_set_stream_initiator (mongoc_client_t *client,
mongoc_stream_initiator_t initiator,
void *user_data)
{
BSON_ASSERT (client);
if (!initiator) {
initiator = mongoc_client_default_stream_initiator;
user_data = client;
} else {
MONGOC_DEBUG ("Using custom stream initiator.");
}
client->initiator = initiator;
client->initiator_data = user_data;
if (client->topology->single_threaded) {
mongoc_topology_scanner_set_stream_initiator (
client->topology->scanner, initiator, user_data);
}
}
bool
_mongoc_client_set_apm_callbacks_private (mongoc_client_t *client,
mongoc_apm_callbacks_t *callbacks,
void *context)
{
if (callbacks) {
memcpy (
&client->apm_callbacks, callbacks, sizeof (mongoc_apm_callbacks_t));
} else {
memset (&client->apm_callbacks, 0, sizeof (mongoc_apm_callbacks_t));
}
client->apm_context = context;
mongoc_topology_set_apm_callbacks (client->topology, callbacks, context);
return true;
}
bool
mongoc_client_set_apm_callbacks (mongoc_client_t *client,
mongoc_apm_callbacks_t *callbacks,
void *context)
{
if (!client->topology->single_threaded) {
MONGOC_ERROR ("Cannot set callbacks on a pooled client, use "
"mongoc_client_pool_set_apm_callbacks");
return false;
}
return _mongoc_client_set_apm_callbacks_private (client, callbacks, context);
}
mongoc_server_description_t *
mongoc_client_get_server_description (mongoc_client_t *client,
uint32_t server_id)
{
/* the error info isn't useful */
return mongoc_topology_server_by_id (client->topology, server_id, NULL);
}
mongoc_server_description_t **
mongoc_client_get_server_descriptions (const mongoc_client_t *client,
size_t *n /* OUT */)
{
mongoc_topology_t *topology;
mongoc_server_description_t **sds;
BSON_ASSERT (client);
BSON_ASSERT (n);
topology = client->topology;
/* in case the client is pooled */
mongoc_mutex_lock (&topology->mutex);
sds = mongoc_topology_description_get_servers (&topology->description, n);
mongoc_mutex_unlock (&topology->mutex);
return sds;
}
void
mongoc_server_descriptions_destroy_all (mongoc_server_description_t **sds,
size_t n)
{
size_t i;
for (i = 0; i < n; ++i) {
mongoc_server_description_destroy (sds[i]);
}
bson_free (sds);
}
mongoc_server_description_t *
mongoc_client_select_server (mongoc_client_t *client,
bool for_writes,
const mongoc_read_prefs_t *prefs,
bson_error_t *error)
{
mongoc_ss_optype_t optype = for_writes ? MONGOC_SS_WRITE : MONGOC_SS_READ;
mongoc_server_description_t *sd;
if (for_writes && prefs) {
bson_set_error (error,
MONGOC_ERROR_SERVER_SELECTION,
MONGOC_ERROR_SERVER_SELECTION_FAILURE,
"Cannot use read preferences with for_writes = true");
return NULL;
}
if (!_mongoc_read_prefs_validate (prefs, error)) {
return NULL;
}
sd = mongoc_topology_select (client->topology, optype, prefs, error);
if (!sd) {
return NULL;
}
if (mongoc_cluster_check_interval (&client->cluster, sd->id)) {
/* check not required, or it succeeded */
return sd;
}
/* check failed, retry once */
mongoc_server_description_destroy (sd);
sd = mongoc_topology_select (client->topology, optype, prefs, error);
if (sd) {
return sd;
}
return NULL;
}
bool
mongoc_client_set_error_api (mongoc_client_t *client, int32_t version)
{
if (!client->topology->single_threaded) {
MONGOC_ERROR ("Cannot set Error API Version on a pooled client, use "
"mongoc_client_pool_set_error_api");
return false;
}
if (version != MONGOC_ERROR_API_VERSION_LEGACY &&
version != MONGOC_ERROR_API_VERSION_2) {
MONGOC_ERROR ("Unsupported Error API Version: %" PRId32, version);
return false;
}
if (client->error_api_set) {
MONGOC_ERROR ("Can only set Error API Version once");
return false;
}
client->error_api_version = version;
client->error_api_set = true;
return true;
}
bool
mongoc_client_set_appname (mongoc_client_t *client, const char *appname)
{
if (!client->topology->single_threaded) {
MONGOC_ERROR ("Cannot call set_appname on a client from a pool");
return false;
}
return _mongoc_topology_set_appname (client->topology, appname);
}
+
+mongoc_server_session_t *
+_mongoc_client_pop_server_session (mongoc_client_t *client, bson_error_t *error)
+{
+ return _mongoc_topology_pop_server_session (client->topology, error);
+}
+
+/*
+ *--------------------------------------------------------------------------
+ *
+ * _mongoc_client_lookup_session --
+ *
+ * Retrieve a mongoc_client_session_t associated with @client_session_id.
+ * Use this to find the "lsid" and "$clusterTime" to send in the server
+ * command.
+ *
+ * Returns:
+ * True on success, false on error and @error is set.
+ *
+ * Side effects:
+ * None.
+ *
+ *--------------------------------------------------------------------------
+ */
+bool
+_mongoc_client_lookup_session (const mongoc_client_t *client,
+ uint32_t client_session_id,
+ mongoc_client_session_t **cs /* OUT */,
+ bson_error_t *error /* OUT */)
+{
+ ENTRY;
+
+ *cs = mongoc_set_get (client->client_sessions, client_session_id);
+
+ if (*cs) {
+ RETURN (true);
+ }
+
+ bson_set_error (error,
+ MONGOC_ERROR_COMMAND,
+ MONGOC_ERROR_COMMAND_INVALID_ARG,
+ "Invalid sessionId");
+
+ RETURN (false);
+}
+
+void
+_mongoc_client_unregister_session (mongoc_client_t *client,
+ mongoc_client_session_t *session)
+{
+ mongoc_set_rm (client->client_sessions, session->client_session_id);
+}
+
+void
+_mongoc_client_push_server_session (mongoc_client_t *client,
+ mongoc_server_session_t *server_session)
+{
+ _mongoc_topology_push_server_session (client->topology, server_session);
+}
+
+/*
+ *--------------------------------------------------------------------------
+ *
+ * mongoc_client_end_sessions --
+ *
+ * End all server sessions in the topology's server session pool.
+ * Don't block long: if server selection or connecting fails, quit.
+ *
+ * The server session pool becomes invalid, but it's *not* cleared.
+ * Destroy the topology after this without using any sessions.
+ *
+ *--------------------------------------------------------------------------
+ */
+
+void
+_mongoc_client_end_sessions (mongoc_client_t *client)
+{
+ mongoc_topology_t *t = client->topology;
+ mongoc_read_prefs_t *prefs;
+ bson_error_t error;
+ uint32_t server_id;
+ bson_t cmd = BSON_INITIALIZER;
+ mongoc_server_stream_t *stream;
+ mongoc_cmd_parts_t parts;
+ mongoc_cluster_t *cluster = &client->cluster;
+ bool r;
+
+ if (t->session_pool) {
+ prefs = mongoc_read_prefs_new (MONGOC_READ_PRIMARY_PREFERRED);
+ server_id =
+ mongoc_topology_select_server_id (t, MONGOC_SS_READ, prefs, &error);
+
+ mongoc_read_prefs_destroy (prefs);
+ if (!server_id) {
+ MONGOC_WARNING ("Couldn't send \"endSessions\": %s", error.message);
+ return;
+ }
+
+ stream = mongoc_cluster_stream_for_server (
+ cluster, server_id, false /* reconnect_ok */, &error);
+
+ if (!stream) {
+ MONGOC_WARNING ("Couldn't send \"endSessions\": %s", error.message);
+ return;
+ }
+
+ _mongoc_topology_end_sessions_cmd (t, &cmd);
+ mongoc_cmd_parts_init (
+ &parts, client, "admin", MONGOC_QUERY_SLAVE_OK, &cmd);
+ parts.assembled.operation_id = ++cluster->operation_id;
+ parts.prohibit_lsid = true;
+
+ r = mongoc_cmd_parts_assemble (&parts, stream, &error);
+ if (!r) {
+ MONGOC_WARNING ("Couldn't construct \"endSessions\" command: %s",
+ error.message);
+ } else {
+ r = mongoc_cluster_run_command_monitored (
+ cluster, &parts.assembled, NULL, &error);
+
+ if (!r) {
+ MONGOC_WARNING ("Couldn't send \"endSessions\": %s", error.message);
+ }
+ }
+
+ bson_destroy (&cmd);
+ mongoc_cmd_parts_cleanup (&parts);
+ mongoc_server_stream_cleanup (stream);
+ }
+}
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-client.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-client.h
similarity index 87%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-client.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-client.h
index fc6a0615..37cee778 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-client.h
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-client.h
@@ -1,238 +1,261 @@
/*
* Copyright 2013 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MONGOC_CLIENT_H
#define MONGOC_CLIENT_H
#if !defined(MONGOC_INSIDE) && !defined(MONGOC_COMPILATION)
#error "Only <mongoc.h> can be included directly."
#endif
#include <bson.h>
#include "mongoc-macros.h"
#include "mongoc-apm.h"
#include "mongoc-collection.h"
#include "mongoc-config.h"
#include "mongoc-cursor.h"
#include "mongoc-database.h"
#include "mongoc-gridfs.h"
#include "mongoc-index.h"
#include "mongoc-read-prefs.h"
#ifdef MONGOC_ENABLE_SSL
#include "mongoc-ssl.h"
#endif
#include "mongoc-stream.h"
#include "mongoc-uri.h"
#include "mongoc-write-concern.h"
#include "mongoc-read-concern.h"
#include "mongoc-server-description.h"
-
BSON_BEGIN_DECLS
#define MONGOC_NAMESPACE_MAX 128
#ifndef MONGOC_DEFAULT_CONNECTTIMEOUTMS
#define MONGOC_DEFAULT_CONNECTTIMEOUTMS (10 * 1000L)
#endif
#ifndef MONGOC_DEFAULT_SOCKETTIMEOUTMS
/*
* NOTE: The default socket timeout for connections is 5 minutes. This
* means that if your MongoDB server dies or becomes unavailable
* it will take 5 minutes to detect this.
*
* You can change this by providing sockettimeoutms= in your
* connection URI.
*/
#define MONGOC_DEFAULT_SOCKETTIMEOUTMS (1000L * 60L * 5L)
#endif
/**
* mongoc_client_t:
*
* The mongoc_client_t structure maintains information about a connection to
* a MongoDB server.
*/
typedef struct _mongoc_client_t mongoc_client_t;
+typedef struct _mongoc_client_session_t mongoc_client_session_t;
+typedef struct _mongoc_session_opt_t mongoc_session_opt_t;
+
/**
* mongoc_stream_initiator_t:
* @uri: The uri and options for the stream.
* @host: The host and port (or UNIX domain socket path) to connect to.
* @user_data: The pointer passed to mongoc_client_set_stream_initiator.
* @error: A location for an error.
*
* Creates a new mongoc_stream_t for the host and port. Begin a
* non-blocking connect and return immediately.
*
* This can be used by language bindings to create network transports other
* than those built into libmongoc. An example of such would be the streams
* API provided by PHP.
*
* Returns: A newly allocated mongoc_stream_t or NULL on failure.
*/
typedef mongoc_stream_t *(*mongoc_stream_initiator_t) (
const mongoc_uri_t *uri,
const mongoc_host_list_t *host,
void *user_data,
bson_error_t *error);
MONGOC_EXPORT (mongoc_client_t *)
mongoc_client_new (const char *uri_string);
MONGOC_EXPORT (mongoc_client_t *)
mongoc_client_new_from_uri (const mongoc_uri_t *uri);
MONGOC_EXPORT (const mongoc_uri_t *)
mongoc_client_get_uri (const mongoc_client_t *client);
MONGOC_EXPORT (void)
mongoc_client_set_stream_initiator (mongoc_client_t *client,
mongoc_stream_initiator_t initiator,
void *user_data);
MONGOC_EXPORT (mongoc_cursor_t *)
mongoc_client_command (mongoc_client_t *client,
const char *db_name,
mongoc_query_flags_t flags,
uint32_t skip,
uint32_t limit,
uint32_t batch_size,
const bson_t *query,
const bson_t *fields,
const mongoc_read_prefs_t *read_prefs);
MONGOC_EXPORT (void)
mongoc_client_kill_cursor (mongoc_client_t *client,
int64_t cursor_id) BSON_GNUC_DEPRECATED;
MONGOC_EXPORT (bool)
mongoc_client_command_simple (mongoc_client_t *client,
const char *db_name,
const bson_t *command,
const mongoc_read_prefs_t *read_prefs,
bson_t *reply,
bson_error_t *error);
MONGOC_EXPORT (bool)
mongoc_client_read_command_with_opts (mongoc_client_t *client,
const char *db_name,
const bson_t *command,
const mongoc_read_prefs_t *read_prefs,
const bson_t *opts,
bson_t *reply,
bson_error_t *error);
MONGOC_EXPORT (bool)
mongoc_client_write_command_with_opts (mongoc_client_t *client,
const char *db_name,
const bson_t *command,
const bson_t *opts,
bson_t *reply,
bson_error_t *error);
MONGOC_EXPORT (bool)
mongoc_client_read_write_command_with_opts (
mongoc_client_t *client,
const char *db_name,
const bson_t *command,
const mongoc_read_prefs_t *read_prefs /* IGNORED */,
const bson_t *opts,
bson_t *reply,
bson_error_t *error);
MONGOC_EXPORT (bool)
+mongoc_client_command_with_opts (mongoc_client_t *client,
+ const char *db_name,
+ const bson_t *command,
+ const mongoc_read_prefs_t *read_prefs,
+ const bson_t *opts,
+ bson_t *reply,
+ bson_error_t *error);
+MONGOC_EXPORT (bool)
mongoc_client_command_simple_with_server_id (
mongoc_client_t *client,
const char *db_name,
const bson_t *command,
const mongoc_read_prefs_t *read_prefs,
uint32_t server_id,
bson_t *reply,
bson_error_t *error);
MONGOC_EXPORT (void)
mongoc_client_destroy (mongoc_client_t *client);
+MONGOC_EXPORT (mongoc_client_session_t *)
+mongoc_client_start_session (mongoc_client_t *client,
+ const mongoc_session_opt_t *opts,
+ bson_error_t *error) BSON_GNUC_WARN_UNUSED_RESULT;
MONGOC_EXPORT (mongoc_database_t *)
mongoc_client_get_database (mongoc_client_t *client, const char *name);
MONGOC_EXPORT (mongoc_database_t *)
mongoc_client_get_default_database (mongoc_client_t *client);
MONGOC_EXPORT (mongoc_gridfs_t *)
mongoc_client_get_gridfs (mongoc_client_t *client,
const char *db,
const char *prefix,
bson_error_t *error);
MONGOC_EXPORT (mongoc_collection_t *)
mongoc_client_get_collection (mongoc_client_t *client,
const char *db,
const char *collection);
MONGOC_EXPORT (char **)
-mongoc_client_get_database_names (mongoc_client_t *client, bson_error_t *error);
+mongoc_client_get_database_names (mongoc_client_t *client, bson_error_t *error)
+ BSON_GNUC_DEPRECATED_FOR (mongoc_client_get_database_names_with_opts);
+MONGOC_EXPORT (char **)
+mongoc_client_get_database_names_with_opts (mongoc_client_t *client,
+ const bson_t *opts,
+ bson_error_t *error);
+MONGOC_EXPORT (mongoc_cursor_t *)
+mongoc_client_find_databases (mongoc_client_t *client, bson_error_t *error)
+ BSON_GNUC_DEPRECATED_FOR (mongoc_client_find_databases_with_opts);
MONGOC_EXPORT (mongoc_cursor_t *)
-mongoc_client_find_databases (mongoc_client_t *client, bson_error_t *error);
+mongoc_client_find_databases_with_opts (mongoc_client_t *client,
+ const bson_t *opts);
MONGOC_EXPORT (bool)
mongoc_client_get_server_status (mongoc_client_t *client,
mongoc_read_prefs_t *read_prefs,
bson_t *reply,
bson_error_t *error);
MONGOC_EXPORT (int32_t)
mongoc_client_get_max_message_size (mongoc_client_t *client)
BSON_GNUC_DEPRECATED;
MONGOC_EXPORT (int32_t)
mongoc_client_get_max_bson_size (mongoc_client_t *client) BSON_GNUC_DEPRECATED;
MONGOC_EXPORT (const mongoc_write_concern_t *)
mongoc_client_get_write_concern (const mongoc_client_t *client);
MONGOC_EXPORT (void)
mongoc_client_set_write_concern (mongoc_client_t *client,
const mongoc_write_concern_t *write_concern);
MONGOC_EXPORT (const mongoc_read_concern_t *)
mongoc_client_get_read_concern (const mongoc_client_t *client);
MONGOC_EXPORT (void)
mongoc_client_set_read_concern (mongoc_client_t *client,
const mongoc_read_concern_t *read_concern);
MONGOC_EXPORT (const mongoc_read_prefs_t *)
mongoc_client_get_read_prefs (const mongoc_client_t *client);
MONGOC_EXPORT (void)
mongoc_client_set_read_prefs (mongoc_client_t *client,
const mongoc_read_prefs_t *read_prefs);
#ifdef MONGOC_ENABLE_SSL
MONGOC_EXPORT (void)
mongoc_client_set_ssl_opts (mongoc_client_t *client,
const mongoc_ssl_opt_t *opts);
#endif
MONGOC_EXPORT (bool)
mongoc_client_set_apm_callbacks (mongoc_client_t *client,
mongoc_apm_callbacks_t *callbacks,
void *context);
MONGOC_EXPORT (mongoc_server_description_t *)
mongoc_client_get_server_description (mongoc_client_t *client,
uint32_t server_id);
MONGOC_EXPORT (mongoc_server_description_t **)
mongoc_client_get_server_descriptions (const mongoc_client_t *client,
size_t *n);
MONGOC_EXPORT (void)
mongoc_server_descriptions_destroy_all (mongoc_server_description_t **sds,
size_t n);
MONGOC_EXPORT (mongoc_server_description_t *)
mongoc_client_select_server (mongoc_client_t *client,
bool for_writes,
const mongoc_read_prefs_t *prefs,
bson_error_t *error);
MONGOC_EXPORT (bool)
mongoc_client_set_error_api (mongoc_client_t *client, int32_t version);
MONGOC_EXPORT (bool)
mongoc_client_set_appname (mongoc_client_t *client, const char *appname);
BSON_END_DECLS
#endif /* MONGOC_CLIENT_H */
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-cluster-cyrus-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-cluster-cyrus-private.h
similarity index 94%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-cluster-cyrus-private.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-cluster-cyrus-private.h
index a37a21de..a4d1e844 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-cluster-cyrus-private.h
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-cluster-cyrus-private.h
@@ -1,33 +1,33 @@
/*
* Copyright 2017 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MONGOC_CLUSTER_CYRUS_PRIVATE_H
#define MONGOC_CLUSTER_CYRUS_PRIVATE_H
#if !defined(MONGOC_COMPILATION)
#error "Only <mongoc.h> can be included directly."
#endif
#include "mongoc-config.h"
#include "mongoc-cluster-private.h"
#include <bson.h>
bool
_mongoc_cluster_auth_node_cyrus (mongoc_cluster_t *cluster,
mongoc_stream_t *stream,
- const char *hostname,
+ mongoc_server_description_t *sd,
bson_error_t *error);
#endif /* MONGOC_CLUSTER_CYRUS_PRIVATE_H */
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-cluster-cyrus.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-cluster-cyrus.c
similarity index 80%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-cluster-cyrus.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-cluster-cyrus.c
index 0802a9e7..e51594b3 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-cluster-cyrus.c
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-cluster-cyrus.c
@@ -1,127 +1,141 @@
/*
* Copyright 2017 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "mongoc-config.h"
#ifdef MONGOC_ENABLE_SASL_CYRUS
+#include "mongoc-client-private.h"
#include "mongoc-cyrus-private.h"
#include "mongoc-cluster-cyrus-private.h"
#include "mongoc-error.h"
#include "mongoc-trace-private.h"
bool
_mongoc_cluster_auth_node_cyrus (mongoc_cluster_t *cluster,
mongoc_stream_t *stream,
- const char *hostname,
+ mongoc_server_description_t *sd,
bson_error_t *error)
{
mongoc_cmd_parts_t parts;
uint32_t buflen = 0;
mongoc_cyrus_t sasl;
bson_iter_t iter;
bool ret = false;
const char *tmpstr;
uint8_t buf[4096] = {0};
bson_t cmd;
bson_t reply;
int conv_id = 0;
+ mongoc_server_stream_t *server_stream;
BSON_ASSERT (cluster);
BSON_ASSERT (stream);
if (!_mongoc_cyrus_new_from_cluster (
- &sasl, cluster, stream, hostname, error)) {
+ &sasl, cluster, stream, sd->host.host, error)) {
return false;
}
for (;;) {
- mongoc_cmd_parts_init (&parts, "$external", MONGOC_QUERY_SLAVE_OK, &cmd);
+ mongoc_cmd_parts_init (
+ &parts, cluster->client, "$external", MONGOC_QUERY_SLAVE_OK, &cmd);
if (!_mongoc_cyrus_step (
&sasl, buf, buflen, buf, sizeof buf, &buflen, error)) {
goto failure;
}
bson_init (&cmd);
if (sasl.step == 1) {
_mongoc_cluster_build_sasl_start (
&cmd, sasl.credentials.mechanism, (const char *) buf, buflen);
} else {
_mongoc_cluster_build_sasl_continue (
&cmd, conv_id, (const char *) buf, buflen);
}
TRACE ("SASL: authenticating (step %d)", sasl.step);
+ server_stream = _mongoc_cluster_create_server_stream (
+ cluster->client->topology, sd->id, stream, error);
+
+ if (!mongoc_cmd_parts_assemble (&parts, server_stream, error)) {
+ mongoc_server_stream_cleanup (server_stream);
+ bson_destroy (&cmd);
+ goto failure;
+ }
+
if (!mongoc_cluster_run_command_private (
- cluster, &parts, stream, 0, &reply, error)) {
+ cluster, &parts.assembled, &reply, error)) {
+ mongoc_server_stream_cleanup (server_stream);
bson_destroy (&cmd);
bson_destroy (&reply);
goto failure;
}
+ mongoc_server_stream_cleanup (server_stream);
bson_destroy (&cmd);
if (bson_iter_init_find (&iter, &reply, "done") &&
bson_iter_as_bool (&iter)) {
bson_destroy (&reply);
mongoc_cmd_parts_cleanup (&parts);
break;
}
conv_id = _mongoc_cluster_get_conversation_id (&reply);
if (!bson_iter_init_find (&iter, &reply, "payload") ||
!BSON_ITER_HOLDS_UTF8 (&iter)) {
MONGOC_DEBUG ("SASL: authentication failed");
bson_destroy (&reply);
bson_set_error (error,
MONGOC_ERROR_CLIENT,
MONGOC_ERROR_CLIENT_AUTHENTICATE,
"Received invalid SASL reply from MongoDB server.");
goto failure;
}
tmpstr = bson_iter_utf8 (&iter, &buflen);
if (buflen > sizeof buf) {
bson_set_error (error,
MONGOC_ERROR_CLIENT,
MONGOC_ERROR_CLIENT_AUTHENTICATE,
"SASL reply from MongoDB is too large.");
bson_destroy (&reply);
goto failure;
}
memcpy (buf, tmpstr, buflen);
bson_destroy (&reply);
mongoc_cmd_parts_cleanup (&parts);
}
TRACE ("%s", "SASL: authenticated");
ret = true;
failure:
_mongoc_cyrus_destroy (&sasl);
mongoc_cmd_parts_cleanup (&parts);
return ret;
}
#endif
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-cluster-gssapi-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-cluster-gssapi-private.h
similarity index 94%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-cluster-gssapi-private.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-cluster-gssapi-private.h
index 6e82da3c..cf3d414c 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-cluster-gssapi-private.h
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-cluster-gssapi-private.h
@@ -1,33 +1,33 @@
/*
* Copyright 2017 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MONGOC_CLUSTER_GSSAPI_PRIVATE_H
#define MONGOC_CLUSTER_GSSAPI_PRIVATE_H
#if !defined(MONGOC_COMPILATION)
#error "Only <mongoc.h> can be included directly."
#endif
#include "mongoc-config.h"
#include "mongoc-cluster-private.h"
#include <bson.h>
bool
_mongoc_cluster_auth_node_gssapi (mongoc_cluster_t *cluster,
mongoc_stream_t *stream,
- const char *hostname,
+ mongoc_server_description_t *sd,
bson_error_t *error);
#endif /* MONGOC_CLUSTER_GSSAPI_PRIVATE_H */
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-cluster-gssapi.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-cluster-gssapi.c
similarity index 95%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-cluster-gssapi.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-cluster-gssapi.c
index 466e63f3..9126a692 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-cluster-gssapi.c
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-cluster-gssapi.c
@@ -1,53 +1,53 @@
/*
* Copyright 2017 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "mongoc-config.h"
#ifdef MONGOC_ENABLE_SASL_GSSAPI
#include "mongoc-cluster-gssapi-private.h"
#include "mongoc-cluster-sasl-private.h"
#include "mongoc-gssapi-private.h"
#include "mongoc-error.h"
#include "mongoc-util-private.h"
/*
*--------------------------------------------------------------------------
*
* _mongoc_cluster_auth_node_gssapi --
*
* Perform authentication for a cluster node using GSSAPI
*
* Returns:
* true if successful; otherwise false and @error is set.
*
* Side effects:
* error may be set.
*
*--------------------------------------------------------------------------
*/
bool
_mongoc_cluster_auth_node_gssapi (mongoc_cluster_t *cluster,
mongoc_stream_t *stream,
- const char *hostname,
+ mongoc_server_description_t *sd,
bson_error_t *error)
{
return false;
}
#endif
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-cluster-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-cluster-private.h
similarity index 82%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-cluster-private.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-cluster-private.h
index 4c4d3922..fb079088 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-cluster-private.h
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-cluster-private.h
@@ -1,171 +1,178 @@
/*
* Copyright 2013 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MONGOC_CLUSTER_PRIVATE_H
#define MONGOC_CLUSTER_PRIVATE_H
#if !defined(MONGOC_COMPILATION)
#error "Only <mongoc.h> can be included directly."
#endif
#include <bson.h>
#include "mongoc-array-private.h"
#include "mongoc-buffer-private.h"
#include "mongoc-config.h"
#include "mongoc-client.h"
#include "mongoc-list-private.h"
#include "mongoc-opcode.h"
-#include "mongoc-read-prefs.h"
#include "mongoc-rpc-private.h"
#include "mongoc-server-stream-private.h"
#include "mongoc-set-private.h"
#include "mongoc-stream.h"
+#include "mongoc-topology-private.h"
#include "mongoc-topology-description-private.h"
-#include "mongoc-uri.h"
#include "mongoc-write-concern.h"
#include "mongoc-scram-private.h"
#include "mongoc-cmd-private.h"
BSON_BEGIN_DECLS
typedef struct _mongoc_cluster_node_t {
mongoc_stream_t *stream;
char *connection_address;
int32_t max_wire_version;
int32_t min_wire_version;
int32_t max_write_batch_size;
int32_t max_bson_obj_size;
int32_t max_msg_size;
int64_t timestamp;
} mongoc_cluster_node_t;
typedef struct _mongoc_cluster_t {
int64_t operation_id;
uint32_t request_id;
uint32_t sockettimeoutms;
uint8_t scram_client_key[MONGOC_SCRAM_HASH_SIZE];
uint8_t scram_server_key[MONGOC_SCRAM_HASH_SIZE];
uint8_t scram_salted_password[MONGOC_SCRAM_HASH_SIZE];
uint32_t socketcheckintervalms;
mongoc_uri_t *uri;
unsigned requires_auth : 1;
mongoc_client_t *client;
mongoc_set_t *nodes;
mongoc_array_t iov;
} mongoc_cluster_t;
+bool
+mongoc_cluster_is_not_master_error (const bson_error_t *error);
+
void
mongoc_cluster_init (mongoc_cluster_t *cluster,
const mongoc_uri_t *uri,
void *client);
void
mongoc_cluster_destroy (mongoc_cluster_t *cluster);
void
mongoc_cluster_disconnect_node (mongoc_cluster_t *cluster,
uint32_t id,
bool invalidate,
const bson_error_t *why);
int32_t
mongoc_cluster_get_max_bson_obj_size (mongoc_cluster_t *cluster);
int32_t
mongoc_cluster_get_max_msg_size (mongoc_cluster_t *cluster);
-int32_t
-mongoc_cluster_node_max_wire_version (mongoc_cluster_t *cluster,
- uint32_t server_id);
-
size_t
_mongoc_cluster_buffer_iovec (mongoc_iovec_t *iov,
size_t iovcnt,
int skip,
char *buffer);
bool
mongoc_cluster_check_interval (mongoc_cluster_t *cluster, uint32_t server_id);
bool
-mongoc_cluster_sendv_to_server (mongoc_cluster_t *cluster,
- mongoc_rpc_t *rpcs,
- mongoc_server_stream_t *server_stream,
- const mongoc_write_concern_t *write_concern,
- bson_error_t *error);
+mongoc_cluster_legacy_rpc_sendv_to_server (
+ mongoc_cluster_t *cluster,
+ mongoc_rpc_t *rpcs,
+ mongoc_server_stream_t *server_stream,
+ bson_error_t *error);
bool
mongoc_cluster_try_recv (mongoc_cluster_t *cluster,
mongoc_rpc_t *rpc,
mongoc_buffer_t *buffer,
mongoc_server_stream_t *server_stream,
bson_error_t *error);
mongoc_server_stream_t *
mongoc_cluster_stream_for_reads (mongoc_cluster_t *cluster,
const mongoc_read_prefs_t *read_prefs,
bson_error_t *error);
mongoc_server_stream_t *
mongoc_cluster_stream_for_writes (mongoc_cluster_t *cluster,
bson_error_t *error);
mongoc_server_stream_t *
mongoc_cluster_stream_for_server (mongoc_cluster_t *cluster,
uint32_t server_id,
bool reconnect_ok,
bson_error_t *error);
bool
mongoc_cluster_run_command_monitored (mongoc_cluster_t *cluster,
- mongoc_cmd_parts_t *parts,
- mongoc_server_stream_t *server_stream,
+ mongoc_cmd_t *cmd,
bson_t *reply,
bson_error_t *error);
+bool
+mongoc_cluster_run_command_parts (mongoc_cluster_t *cluster,
+ mongoc_server_stream_t *server_stream,
+ mongoc_cmd_parts_t *parts,
+ bson_t *reply,
+ bson_error_t *error);
+
bool
mongoc_cluster_run_command_private (mongoc_cluster_t *cluster,
- mongoc_cmd_parts_t *parts,
- mongoc_stream_t *stream,
- uint32_t server_id,
+ mongoc_cmd_t *cmd,
bson_t *reply,
bson_error_t *error);
void
_mongoc_cluster_build_sasl_start (bson_t *cmd,
const char *mechanism,
const char *buf,
uint32_t buflen);
void
_mongoc_cluster_build_sasl_continue (bson_t *cmd,
int conv_id,
const char *buf,
uint32_t buflen);
int
_mongoc_cluster_get_conversation_id (const bson_t *reply);
+mongoc_server_stream_t *
+_mongoc_cluster_create_server_stream (mongoc_topology_t *topology,
+ uint32_t server_id,
+ mongoc_stream_t *stream,
+ bson_error_t *error /* OUT */);
BSON_END_DECLS
#endif /* MONGOC_CLUSTER_PRIVATE_H */
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-cluster-sasl-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-cluster-sasl-private.h
similarity index 94%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-cluster-sasl-private.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-cluster-sasl-private.h
index 64eefdc1..6be02677 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-cluster-sasl-private.h
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-cluster-sasl-private.h
@@ -1,33 +1,33 @@
/*
* Copyright 2017 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MONGOC_CLUSTER_SASL_PRIVATE_H
#define MONGOC_CLUSTER_SASL_PRIVATE_H
#if !defined(MONGOC_COMPILATION)
#error "Only <mongoc.h> can be included directly."
#endif
#include "mongoc-config.h"
#include "mongoc-cluster-private.h"
#include <bson.h>
bool
_mongoc_cluster_auth_node_sasl (mongoc_cluster_t *cluster,
mongoc_stream_t *stream,
- const char *hostname,
+ mongoc_server_description_t *sd,
bson_error_t *error);
#endif /* MONGOC_CLUSTER_SASL_PRIVATE_H */
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-cluster-sasl.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-cluster-sasl.c
similarity index 91%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-cluster-sasl.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-cluster-sasl.c
index 3d7771c8..bdbf09fe 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-cluster-sasl.c
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-cluster-sasl.c
@@ -1,107 +1,107 @@
/*
* Copyright 2017 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* for size_t */
#include <bson.h>
#include "mongoc-config.h"
#ifdef MONGOC_ENABLE_SASL
#include "mongoc-cluster-private.h"
#include "mongoc-log.h"
#include "mongoc-trace-private.h"
#include "mongoc-stream-private.h"
#include "mongoc-stream-socket.h"
#include "mongoc-error.h"
#include "mongoc-util-private.h"
#ifdef MONGOC_ENABLE_SASL_CYRUS
#include "mongoc-cluster-cyrus-private.h"
#endif
#ifdef MONGOC_ENABLE_SASL_SSPI
#include "mongoc-cluster-sspi-private.h"
#endif
#ifdef MONGOC_ENABLE_SASL_GSSAPI
#include "mongoc-cluster-gssapi-private.h"
#endif
void
_mongoc_cluster_build_sasl_start (bson_t *cmd,
const char *mechanism,
const char *buf,
uint32_t buflen)
{
BSON_APPEND_INT32 (cmd, "saslStart", 1);
BSON_APPEND_UTF8 (cmd, "mechanism", "GSSAPI");
bson_append_utf8 (cmd, "payload", 7, buf, buflen);
BSON_APPEND_INT32 (cmd, "autoAuthorize", 1);
}
void
_mongoc_cluster_build_sasl_continue (bson_t *cmd,
int conv_id,
const char *buf,
uint32_t buflen)
{
BSON_APPEND_INT32 (cmd, "saslContinue", 1);
BSON_APPEND_INT32 (cmd, "conversationId", conv_id);
bson_append_utf8 (cmd, "payload", 7, buf, buflen);
}
int
_mongoc_cluster_get_conversation_id (const bson_t *reply)
{
bson_iter_t iter;
if (bson_iter_init_find (&iter, reply, "conversationId") &&
BSON_ITER_HOLDS_INT32 (&iter)) {
return bson_iter_int32 (&iter);
}
return 0;
}
/*
*--------------------------------------------------------------------------
*
* _mongoc_cluster_auth_node_sasl --
*
* Perform authentication for a cluster node using SASL. This is
* only supported for GSSAPI at the moment.
*
* Returns:
* true if successful; otherwise false and @error is set.
*
* Side effects:
* error may be set.
*
*--------------------------------------------------------------------------
*/
bool
_mongoc_cluster_auth_node_sasl (mongoc_cluster_t *cluster,
mongoc_stream_t *stream,
- const char *hostname,
+ mongoc_server_description_t *sd,
bson_error_t *error)
{
#ifdef MONGOC_ENABLE_SASL_CYRUS
- return _mongoc_cluster_auth_node_cyrus (cluster, stream, hostname, error);
+ return _mongoc_cluster_auth_node_cyrus (cluster, stream, sd, error);
#endif
#ifdef MONGOC_ENABLE_SASL_SSPI
- return _mongoc_cluster_auth_node_sspi (cluster, stream, hostname, error);
+ return _mongoc_cluster_auth_node_sspi (cluster, stream, sd, error);
#endif
#ifdef MONGOC_ENABLE_SASL_GSSAPI
- return _mongoc_cluster_auth_node_gssapi (cluster, stream, hostname, error);
+ return _mongoc_cluster_auth_node_gssapi (cluster, stream, sd, error);
#endif
}
#endif
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-cluster-sspi-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-cluster-sspi-private.h
similarity index 94%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-cluster-sspi-private.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-cluster-sspi-private.h
index d63d5128..17503041 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-cluster-sspi-private.h
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-cluster-sspi-private.h
@@ -1,33 +1,33 @@
/*
* Copyright 2017 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MONGOC_CLUSTER_SSPI_PRIVATE_H
#define MONGOC_CLUSTER_SSPI_PRIVATE_H
#if !defined(MONGOC_COMPILATION)
#error "Only <mongoc.h> can be included directly."
#endif
#include "mongoc-config.h"
#include "mongoc-cluster-private.h"
#include <bson.h>
bool
_mongoc_cluster_auth_node_sspi (mongoc_cluster_t *cluster,
mongoc_stream_t *stream,
- const char *hostname,
+ mongoc_server_description_t *sd,
bson_error_t *error);
#endif /* MONGOC_CLUSTER_SSPI_PRIVATE_H */
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-cluster-sspi.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-cluster-sspi.c
similarity index 82%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-cluster-sspi.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-cluster-sspi.c
index 5d69e61a..90eab36d 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-cluster-sspi.c
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-cluster-sspi.c
@@ -1,293 +1,276 @@
/*
* Copyright 2017 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "mongoc-config.h"
#ifdef MONGOC_ENABLE_SASL_SSPI
-
+#include "mongoc-client-private.h"
#include "mongoc-cluster-sspi-private.h"
#include "mongoc-cluster-sasl-private.h"
#include "mongoc-sasl-private.h"
#include "mongoc-sspi-private.h"
#include "mongoc-error.h"
#include "mongoc-util-private.h"
mongoc_sspi_client_state_t *
_mongoc_cluster_sspi_new (mongoc_uri_t *uri, const char *hostname)
{
WCHAR *service; /* L"serviceName@hostname@REALM" */
const char *service_name = "mongodb";
ULONG flags = ISC_REQ_MUTUAL_AUTH;
const char *service_realm = NULL;
char *service_ascii = NULL;
mongoc_sspi_client_state_t *state;
const char *tmp_creds;
int service_ascii_len;
const bson_t *options;
int tmp_creds_len;
bson_t properties;
bson_iter_t iter;
int service_len;
int user_len = 0;
int pass_len = 0;
WCHAR *pass = NULL;
WCHAR *user = NULL;
int res;
options = mongoc_uri_get_options (uri);
if (!mongoc_uri_get_mechanism_properties (uri, &properties)) {
bson_init (&properties);
}
if (bson_iter_init_find_case (
&iter, options, MONGOC_URI_GSSAPISERVICENAME) &&
BSON_ITER_HOLDS_UTF8 (&iter)) {
service_name = bson_iter_utf8 (&iter, NULL);
}
if (bson_iter_init_find_case (&iter, &properties, "SERVICE_NAME") &&
BSON_ITER_HOLDS_UTF8 (&iter)) {
service_name = bson_iter_utf8 (&iter, NULL);
}
if (bson_iter_init_find_case (&iter, &properties, "SERVICE_REALM") &&
BSON_ITER_HOLDS_UTF8 (&iter)) {
service_realm = bson_iter_utf8 (&iter, NULL);
service_ascii =
bson_strdup_printf ("%s@%s@%s", service_name, hostname, service_realm);
} else {
service_ascii = bson_strdup_printf ("%s@%s", service_name, hostname);
}
service_ascii_len = strlen (service_ascii);
/* this is donated to the sspi */
service = calloc (service_ascii_len + 1, sizeof (WCHAR));
service_len = MultiByteToWideChar (
CP_UTF8, 0, service_ascii, service_ascii_len, service, service_ascii_len);
service[service_len] = L'\0';
bson_free (service_ascii);
tmp_creds = mongoc_uri_get_password (uri);
if (tmp_creds) {
tmp_creds_len = strlen (tmp_creds);
/* this is donated to the sspi */
pass = calloc (tmp_creds_len + 1, sizeof (WCHAR));
pass_len = MultiByteToWideChar (
CP_UTF8, 0, tmp_creds, tmp_creds_len, pass, tmp_creds_len);
pass[pass_len] = L'\0';
}
tmp_creds = mongoc_uri_get_username (uri);
if (tmp_creds) {
tmp_creds_len = strlen (tmp_creds);
/* this is donated to the sspi */
user = calloc (tmp_creds_len + 1, sizeof (WCHAR));
user_len = MultiByteToWideChar (
CP_UTF8, 0, tmp_creds, tmp_creds_len, user, tmp_creds_len);
user[user_len] = L'\0';
}
state = (mongoc_sspi_client_state_t *) bson_malloc0 (sizeof *state);
res = _mongoc_sspi_auth_sspi_client_init (
service, flags, user, user_len, NULL, 0, pass, pass_len, state);
if (res != MONGOC_SSPI_AUTH_GSS_ERROR) {
return state;
}
bson_free (state);
return NULL;
}
/*
*--------------------------------------------------------------------------
*
* _mongoc_cluster_auth_node_sspi --
*
* Perform authentication for a cluster node using SSPI
*
* Returns:
* true if successful; otherwise false and @error is set.
*
* Side effects:
* error may be set.
*
*--------------------------------------------------------------------------
*/
bool
_mongoc_cluster_auth_node_sspi (mongoc_cluster_t *cluster,
mongoc_stream_t *stream,
- const char *hostname,
+ mongoc_server_description_t *sd,
bson_error_t *error)
{
mongoc_cmd_parts_t parts;
mongoc_sspi_client_state_t *state;
- uint8_t buf[4096] = {0};
+ SEC_CHAR buf[4096] = {0};
bson_iter_t iter;
uint32_t buflen;
bson_t reply;
- char *tmpstr;
+ const char *tmpstr;
int conv_id;
bson_t cmd;
int res = MONGOC_SSPI_AUTH_GSS_CONTINUE;
int step;
- bool canonicalize = false;
- const bson_t *options;
- bson_t properties;
- char real_name[BSON_HOST_NAME_MAX + 1];
-
- options = mongoc_uri_get_options (cluster->uri);
-
- if (bson_iter_init_find_case (
- &iter, options, MONGOC_URI_CANONICALIZEHOSTNAME) &&
- BSON_ITER_HOLDS_UTF8 (&iter)) {
- canonicalize = bson_iter_bool (&iter);
- }
-
- if (mongoc_uri_get_mechanism_properties (cluster->uri, &properties)) {
- if (bson_iter_init_find_case (
- &iter, &properties, "CANONICALIZE_HOST_NAME") &&
- BSON_ITER_HOLDS_UTF8 (&iter)) {
- canonicalize = !strcasecmp (bson_iter_utf8 (&iter, NULL), "true");
- }
- bson_destroy (&properties);
- }
-
- if (canonicalize && _mongoc_sasl_get_canonicalized_name (
- stream, real_name, sizeof real_name, error)) {
- state = _mongoc_cluster_sspi_new (cluster->uri, real_name);
- } else {
- state = _mongoc_cluster_sspi_new (cluster->uri, hostname);
- }
+ mongoc_server_stream_t *server_stream;
+ state = _mongoc_cluster_sspi_new (cluster->uri, sd->host.host);
if (!state) {
bson_set_error (error,
MONGOC_ERROR_CLIENT,
MONGOC_ERROR_CLIENT_AUTHENTICATE,
"Couldn't initialize SSPI service.");
goto failure;
}
for (step = 0;; step++) {
- mongoc_cmd_parts_init (&parts, "$external", MONGOC_QUERY_SLAVE_OK, &cmd);
+ mongoc_cmd_parts_init (
+ &parts, cluster->client, "$external", MONGOC_QUERY_SLAVE_OK, &cmd);
bson_init (&cmd);
if (res == MONGOC_SSPI_AUTH_GSS_CONTINUE) {
res = _mongoc_sspi_auth_sspi_client_step (state, buf);
} else if (res == MONGOC_SSPI_AUTH_GSS_COMPLETE) {
char *response;
const char *tmp_creds = mongoc_uri_get_username (cluster->uri);
int tmp_creds_len = strlen (tmp_creds);
res = _mongoc_sspi_auth_sspi_client_unwrap (state, buf);
response = bson_strdup (state->response);
_mongoc_sspi_auth_sspi_client_wrap (
- state, response, tmp_creds, tmp_creds_len, 0);
+ state, response, (SEC_CHAR*) tmp_creds, tmp_creds_len, 0);
bson_free (response);
}
if (res == MONGOC_SSPI_AUTH_GSS_ERROR) {
bson_set_error (error,
MONGOC_ERROR_CLIENT,
MONGOC_ERROR_CLIENT_AUTHENTICATE,
"Received invalid SSPI data.");
mongoc_cmd_parts_cleanup (&parts);
bson_destroy (&cmd);
break;
}
if (step == 0) {
_mongoc_cluster_build_sasl_start (
&cmd, "GSSAPI", state->response, strlen (state->response));
} else {
if (state->response) {
_mongoc_cluster_build_sasl_continue (
&cmd, conv_id, state->response, strlen (state->response));
} else {
_mongoc_cluster_build_sasl_continue (&cmd, conv_id, "", 0);
}
}
- if (!mongoc_cluster_run_command_private (cluster,
- &parts,
- stream,
- 0,
- &reply,
- error)) {
+ server_stream = _mongoc_cluster_create_server_stream (
+ cluster->client->topology, sd->id, stream, error);
+
+ if (!mongoc_cmd_parts_assemble (&parts, server_stream, error)) {
+ mongoc_server_stream_cleanup (server_stream);
+ mongoc_cmd_parts_cleanup (&parts);
+ bson_destroy (&cmd);
+ break;
+ }
+
+ if (!mongoc_cluster_run_command_private (
+ cluster, &parts.assembled, &reply, error)) {
+ mongoc_server_stream_cleanup (server_stream);
mongoc_cmd_parts_cleanup (&parts);
bson_destroy (&cmd);
bson_destroy (&reply);
break;
}
+ mongoc_server_stream_cleanup (server_stream);
mongoc_cmd_parts_cleanup (&parts);
bson_destroy (&cmd);
if (bson_iter_init_find (&iter, &reply, "done") &&
bson_iter_as_bool (&iter)) {
bson_destroy (&reply);
break;
}
conv_id = _mongoc_cluster_get_conversation_id (&reply);
if (!bson_iter_init_find (&iter, &reply, "payload") ||
!BSON_ITER_HOLDS_UTF8 (&iter)) {
bson_destroy (&reply);
bson_set_error (error,
MONGOC_ERROR_CLIENT,
MONGOC_ERROR_CLIENT_AUTHENTICATE,
"Received invalid SASL reply from MongoDB server.");
break;
}
tmpstr = bson_iter_utf8 (&iter, &buflen);
if (buflen > sizeof buf) {
bson_set_error (error,
MONGOC_ERROR_CLIENT,
MONGOC_ERROR_CLIENT_AUTHENTICATE,
"SASL reply from MongoDB is too large.");
bson_destroy (&reply);
break;
}
memcpy (buf, tmpstr, buflen);
bson_destroy (&reply);
}
bson_free (state);
failure:
if (error->domain) {
return false;
}
return true;
}
#endif
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-cluster.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-cluster.c
similarity index 78%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-cluster.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-cluster.c
index 13b19422..1f4a5dab 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-cluster.c
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-cluster.c
@@ -1,2545 +1,2683 @@
/*
* Copyright 2013 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "mongoc-config.h"
#include <string.h>
#include "mongoc-cluster-private.h"
#include "mongoc-client-private.h"
#include "mongoc-counters-private.h"
#include "mongoc-config.h"
#include "mongoc-error.h"
#include "mongoc-host-list-private.h"
#include "mongoc-log.h"
#ifdef MONGOC_ENABLE_SASL
#include "mongoc-cluster-sasl-private.h"
#endif
#ifdef MONGOC_ENABLE_SSL
#include "mongoc-ssl.h"
#include "mongoc-ssl-private.h"
#include "mongoc-stream-tls.h"
#endif
#include "mongoc-b64-private.h"
#include "mongoc-scram-private.h"
#include "mongoc-set-private.h"
#include "mongoc-socket.h"
#include "mongoc-stream-private.h"
#include "mongoc-stream-socket.h"
#include "mongoc-stream-tls.h"
#include "mongoc-thread-private.h"
#include "mongoc-topology-private.h"
#include "mongoc-trace-private.h"
#include "mongoc-util-private.h"
#include "mongoc-write-concern-private.h"
#include "mongoc-uri-private.h"
#include "mongoc-rpc-private.h"
#include "mongoc-compression-private.h"
#include "mongoc-cmd-private.h"
#undef MONGOC_LOG_DOMAIN
#define MONGOC_LOG_DOMAIN "cluster"
#define CHECK_CLOSED_DURATION_MSEC 1000
#define DB_AND_CMD_FROM_COLLECTION(outstr, name) \
do { \
const char *dot = strchr (name, '.'); \
if (!dot || ((dot - name) > (sizeof outstr - 6))) { \
bson_snprintf (outstr, sizeof outstr, "admin.$cmd"); \
} else { \
memcpy (outstr, name, dot - name); \
memcpy (outstr + (dot - name), ".$cmd", 6); \
} \
} while (0)
-#define IS_NOT_COMMAND(name) (!!strcasecmp (command_name, name))
+#define IS_NOT_COMMAND(_name) (!!strcasecmp (cmd->command_name, _name))
static mongoc_server_stream_t *
mongoc_cluster_fetch_stream_single (mongoc_cluster_t *cluster,
uint32_t server_id,
bool reconnect_ok,
bson_error_t *error);
static mongoc_server_stream_t *
mongoc_cluster_fetch_stream_pooled (mongoc_cluster_t *cluster,
uint32_t server_id,
bool reconnect_ok,
bson_error_t *error);
+static bool
+mongoc_cluster_run_opmsg (mongoc_cluster_t *cluster,
+ mongoc_cmd_t *cmd,
+ bson_t *reply,
+ bson_error_t *error);
+
static void
_bson_error_message_printf (bson_error_t *error, const char *format, ...)
BSON_GNUC_PRINTF (2, 3);
-/*
- *--------------------------------------------------------------------------
- *
- * _mongoc_cluster_inc_egress_rpc --
- *
- * Helper to increment the counter for a particular RPC based on
- * it's opcode.
- *
- * Returns:
- * None.
- *
- * Side effects:
- * None.
- *
- *--------------------------------------------------------------------------
- */
-
-static void
-_mongoc_cluster_inc_egress_rpc (const mongoc_rpc_t *rpc)
-{
- mongoc_counter_op_egress_total_inc ();
-
- switch (rpc->header.opcode) {
- case MONGOC_OPCODE_DELETE:
- mongoc_counter_op_egress_delete_inc ();
- break;
- case MONGOC_OPCODE_UPDATE:
- mongoc_counter_op_egress_update_inc ();
- break;
- case MONGOC_OPCODE_INSERT:
- mongoc_counter_op_egress_insert_inc ();
- break;
- case MONGOC_OPCODE_KILL_CURSORS:
- mongoc_counter_op_egress_killcursors_inc ();
- break;
- case MONGOC_OPCODE_GET_MORE:
- mongoc_counter_op_egress_getmore_inc ();
- break;
- case MONGOC_OPCODE_REPLY:
- mongoc_counter_op_egress_reply_inc ();
- break;
- case MONGOC_OPCODE_MSG:
- mongoc_counter_op_egress_msg_inc ();
- break;
- case MONGOC_OPCODE_QUERY:
- mongoc_counter_op_egress_query_inc ();
- break;
- case MONGOC_OPCODE_COMPRESSED:
- mongoc_counter_op_egress_compressed_inc ();
- break;
- default:
- BSON_ASSERT (false);
- break;
- }
-}
-
-/*
- *--------------------------------------------------------------------------
- *
- * _mongoc_cluster_inc_ingress_rpc --
- *
- * Helper to increment the counter for a particular RPC based on
- * it's opcode.
- *
- * Returns:
- * None.
- *
- * Side effects:
- * None.
- *
- *--------------------------------------------------------------------------
- */
-
-static void
-_mongoc_cluster_inc_ingress_rpc (const mongoc_rpc_t *rpc)
-{
- mongoc_counter_op_ingress_total_inc ();
-
- switch (rpc->header.opcode) {
- case MONGOC_OPCODE_DELETE:
- mongoc_counter_op_ingress_delete_inc ();
- break;
- case MONGOC_OPCODE_UPDATE:
- mongoc_counter_op_ingress_update_inc ();
- break;
- case MONGOC_OPCODE_INSERT:
- mongoc_counter_op_ingress_insert_inc ();
- break;
- case MONGOC_OPCODE_KILL_CURSORS:
- mongoc_counter_op_ingress_killcursors_inc ();
- break;
- case MONGOC_OPCODE_GET_MORE:
- mongoc_counter_op_ingress_getmore_inc ();
- break;
- case MONGOC_OPCODE_REPLY:
- mongoc_counter_op_ingress_reply_inc ();
- break;
- case MONGOC_OPCODE_MSG:
- mongoc_counter_op_ingress_msg_inc ();
- break;
- case MONGOC_OPCODE_QUERY:
- mongoc_counter_op_ingress_query_inc ();
- break;
- case MONGOC_OPCODE_COMPRESSED:
- mongoc_counter_op_ingress_compressed_inc ();
- break;
- default:
- BSON_ASSERT (false);
- break;
- }
-}
-
-
size_t
_mongoc_cluster_buffer_iovec (mongoc_iovec_t *iov,
size_t iovcnt,
int skip,
char *buffer)
{
int n;
size_t buffer_offset = 0;
int total_iov_len = 0;
int difference = 0;
for (n = 0; n < iovcnt; n++) {
total_iov_len += iov[n].iov_len;
if (total_iov_len <= skip) {
continue;
}
/* If this iovec starts before the skip, and takes the total count
* beyond the skip, we need to figure out the portion of the iovec
* we should skip passed */
if (total_iov_len - iov[n].iov_len < skip) {
difference = skip - (total_iov_len - iov[n].iov_len);
} else {
difference = 0;
}
memcpy (buffer + buffer_offset,
- iov[n].iov_base + difference,
+ ((char *) iov[n].iov_base) + difference,
iov[n].iov_len - difference);
buffer_offset += iov[n].iov_len - difference;
}
return buffer_offset;
}
/* Allows caller to safely overwrite error->message with a formatted string,
* even if the formatted string includes original error->message. */
static void
_bson_error_message_printf (bson_error_t *error, const char *format, ...)
{
va_list args;
char error_message[sizeof error->message];
if (error) {
va_start (args, format);
bson_vsnprintf (error_message, sizeof error->message, format, args);
va_end (args);
bson_strncpy (error->message, error_message, sizeof error->message);
}
}
#define RUN_CMD_ERR(_domain, _code, _msg) \
do { \
bson_set_error (error, _domain, _code, _msg); \
_bson_error_message_printf ( \
error, \
"Failed to send \"%s\" command with database \"%s\": %s", \
- command_name, \
+ cmd->command_name, \
cmd->db_name, \
error->message); \
} while (0)
+
/*
*--------------------------------------------------------------------------
*
- * mongoc_cluster_run_command_internal --
+ * mongoc_cluster_run_command_opquery --
*
- * Internal function to run a command on a given stream.
- * @error and @reply are optional out-pointers.
+ * Internal function to run a command on a given stream. @error and
+ * @reply are optional out-pointers.
*
* Returns:
* true if successful; otherwise false and @error is set.
*
* Side effects:
* @reply is set and should ALWAYS be released with bson_destroy().
* On failure, @error is filled out. If this was a network error
* and server_id is nonzero, the cluster disconnects from the server.
*
*--------------------------------------------------------------------------
*/
static bool
-mongoc_cluster_run_command_internal (mongoc_cluster_t *cluster,
- mongoc_cmd_t *cmd,
- mongoc_stream_t *stream,
- int32_t compressor_id,
- bool monitored,
- const mongoc_host_list_t *host,
- bson_t *reply,
- bson_error_t *error)
+mongoc_cluster_run_command_opquery (mongoc_cluster_t *cluster,
+ mongoc_cmd_t *cmd,
+ mongoc_stream_t *stream,
+ int32_t compressor_id,
+ bson_t *reply,
+ bson_error_t *error)
{
- int64_t started;
- const char *command_name;
- mongoc_apm_callbacks_t *callbacks;
const size_t reply_header_size = sizeof (mongoc_rpc_reply_header_t);
uint8_t reply_header_buf[sizeof (mongoc_rpc_reply_header_t)];
- uint8_t *reply_buf; /* reply body */
- mongoc_rpc_t rpc; /* sent to server */
- bson_error_t err_local; /* in case the passed-in "error" is NULL */
+ uint8_t *reply_buf; /* reply body */
+ mongoc_rpc_t rpc; /* sent to server */
bson_t reply_local;
bson_t *reply_ptr;
char cmd_ns[MONGOC_NAMESPACE_MAX];
uint32_t request_id;
int32_t msg_len;
size_t doc_len;
- mongoc_apm_command_started_t started_event;
- mongoc_apm_command_succeeded_t succeeded_event;
- mongoc_apm_command_failed_t failed_event;
bool ret = false;
-#ifdef MONGOC_ENABLE_COMPRESSION
char *output = NULL;
-#endif
+ uint32_t server_id;
ENTRY;
BSON_ASSERT (cluster);
BSON_ASSERT (cmd);
BSON_ASSERT (stream);
- started = bson_get_monotonic_time ();
-
/*
* setup
*/
reply_ptr = reply ? reply : &reply_local;
bson_init (reply_ptr);
- callbacks = &cluster->client->apm_callbacks;
-
- if (!error) {
- error = &err_local;
- }
error->code = 0;
/*
* prepare the request
*/
- command_name = _mongoc_get_command_name (cmd->command);
- if (!command_name) {
- bson_set_error (error,
- MONGOC_ERROR_COMMAND,
- MONGOC_ERROR_COMMAND_INVALID_ARG,
- "Empty command document");
-
- /* haven't fired command-started event, so don't fire command-failed */
- monitored = false;
- GOTO (done);
- }
-
_mongoc_array_clear (&cluster->iov);
bson_snprintf (cmd_ns, sizeof cmd_ns, "%s.$cmd", cmd->db_name);
request_id = ++cluster->request_id;
_mongoc_rpc_prep_command (&rpc, cmd_ns, cmd);
rpc.header.request_id = request_id;
+ server_id = cmd->server_stream->sd->id;
- _mongoc_cluster_inc_egress_rpc (&rpc);
_mongoc_rpc_gather (&rpc, &cluster->iov);
_mongoc_rpc_swab_to_le (&rpc);
-#ifdef MONGOC_ENABLE_COMPRESSION
- if (compressor_id && IS_NOT_COMMAND ("ismaster") &&
+ if (compressor_id != -1 && IS_NOT_COMMAND ("ismaster") &&
IS_NOT_COMMAND ("saslstart") && IS_NOT_COMMAND ("saslcontinue") &&
IS_NOT_COMMAND ("getnonce") && IS_NOT_COMMAND ("authenticate") &&
IS_NOT_COMMAND ("createuser") && IS_NOT_COMMAND ("updateuser") &&
IS_NOT_COMMAND ("copydbsaslstart") &&
IS_NOT_COMMAND ("copydbgetnonce") && IS_NOT_COMMAND ("copydb")) {
output = _mongoc_rpc_compress (cluster, compressor_id, &rpc, error);
if (output == NULL) {
- monitored = false;
GOTO (done);
}
}
-#endif
-
- if (monitored && callbacks->started) {
- mongoc_apm_command_started_init (&started_event,
- cmd->command,
- cmd->db_name,
- command_name,
- request_id,
- cmd->operation_id,
- host,
- cmd->server_id,
- cluster->client->apm_context);
-
- callbacks->started (&started_event);
- mongoc_apm_command_started_cleanup (&started_event);
- }
if (cluster->client->in_exhaust) {
bson_set_error (error,
MONGOC_ERROR_CLIENT,
MONGOC_ERROR_CLIENT_IN_EXHAUST,
"A cursor derived from this client is in exhaust.");
GOTO (done);
}
/*
* send and receive
*/
if (!_mongoc_stream_writev_full (stream,
cluster->iov.data,
cluster->iov.len,
cluster->sockettimeoutms,
error)) {
- mongoc_cluster_disconnect_node (cluster, cmd->server_id, true, error);
+ mongoc_cluster_disconnect_node (cluster, server_id, true, error);
/* add info about the command to writev_full's error message */
_bson_error_message_printf (
error,
"Failed to send \"%s\" command with database \"%s\": %s",
- command_name,
+ cmd->command_name,
cmd->db_name,
error->message);
GOTO (done);
}
if (reply_header_size != mongoc_stream_read (stream,
&reply_header_buf,
reply_header_size,
reply_header_size,
cluster->sockettimeoutms)) {
RUN_CMD_ERR (MONGOC_ERROR_STREAM,
MONGOC_ERROR_STREAM_SOCKET,
"socket error or timeout");
mongoc_cluster_disconnect_node (
- cluster, cmd->server_id, !mongoc_stream_timed_out (stream), error);
+ cluster, server_id, !mongoc_stream_timed_out (stream), error);
GOTO (done);
}
memcpy (&msg_len, reply_header_buf, 4);
msg_len = BSON_UINT32_FROM_LE (msg_len);
if ((msg_len < reply_header_size) ||
(msg_len > MONGOC_DEFAULT_MAX_MSG_SIZE)) {
+ mongoc_cluster_disconnect_node (cluster, server_id, true, error);
GOTO (done);
}
if (!_mongoc_rpc_scatter_reply_header_only (
&rpc, reply_header_buf, reply_header_size)) {
+ mongoc_cluster_disconnect_node (cluster, server_id, true, error);
GOTO (done);
}
doc_len = (size_t) msg_len - reply_header_size;
if (BSON_UINT32_FROM_LE (rpc.header.opcode) == MONGOC_OPCODE_COMPRESSED) {
bson_t tmp = BSON_INITIALIZER;
uint8_t *buf = NULL;
size_t len = BSON_UINT32_FROM_LE (rpc.compressed.uncompressed_size) +
sizeof (mongoc_rpc_header_t);
reply_buf = bson_malloc0 (msg_len);
memcpy (reply_buf, reply_header_buf, reply_header_size);
if (doc_len != mongoc_stream_read (stream,
reply_buf + reply_header_size,
doc_len,
doc_len,
cluster->sockettimeoutms)) {
RUN_CMD_ERR (MONGOC_ERROR_STREAM,
MONGOC_ERROR_STREAM_SOCKET,
"socket error or timeout");
+ mongoc_cluster_disconnect_node (cluster, server_id, true, error);
GOTO (done);
}
if (!_mongoc_rpc_scatter (&rpc, reply_buf, msg_len)) {
GOTO (done);
}
buf = bson_malloc0 (len);
if (!_mongoc_rpc_decompress (&rpc, buf, len)) {
RUN_CMD_ERR (MONGOC_ERROR_PROTOCOL,
MONGOC_ERROR_PROTOCOL_INVALID_REPLY,
"Could not decompress server reply");
bson_free (reply_buf);
bson_free (buf);
GOTO (done);
}
_mongoc_rpc_swab_from_le (&rpc);
- _mongoc_cluster_inc_ingress_rpc (&rpc);
_mongoc_rpc_get_first_document (&rpc, &tmp);
bson_copy_to (&tmp, reply_ptr);
bson_free (reply_buf);
bson_free (buf);
} else if (BSON_UINT32_FROM_LE (rpc.header.opcode) == MONGOC_OPCODE_REPLY &&
BSON_UINT32_FROM_LE (rpc.reply_header.n_returned) == 1) {
reply_buf = bson_reserve_buffer (reply_ptr, (uint32_t) doc_len);
BSON_ASSERT (reply_buf);
if (doc_len != mongoc_stream_read (stream,
(void *) reply_buf,
doc_len,
doc_len,
cluster->sockettimeoutms)) {
RUN_CMD_ERR (MONGOC_ERROR_STREAM,
MONGOC_ERROR_STREAM_SOCKET,
"socket error or timeout");
+ mongoc_cluster_disconnect_node (cluster, server_id, true, error);
GOTO (done);
}
_mongoc_rpc_swab_from_le (&rpc);
} else {
GOTO (done);
}
- _mongoc_cluster_inc_ingress_rpc (&rpc);
if (!_mongoc_cmd_check_ok (
reply_ptr, cluster->client->error_api_version, error)) {
GOTO (done);
}
ret = true;
- if (monitored && callbacks->succeeded) {
- mongoc_apm_command_succeeded_init (&succeeded_event,
- bson_get_monotonic_time () - started,
- reply_ptr,
- command_name,
- request_id,
- cmd->operation_id,
- host,
- cmd->server_id,
- cluster->client->apm_context);
-
- callbacks->succeeded (&succeeded_event);
- mongoc_apm_command_succeeded_cleanup (&succeeded_event);
- }
done:
if (!ret && error->code == 0) {
/* generic error */
RUN_CMD_ERR (MONGOC_ERROR_PROTOCOL,
MONGOC_ERROR_PROTOCOL_INVALID_REPLY,
"Invalid reply from server.");
}
- if (!ret && monitored && callbacks->failed) {
- mongoc_apm_command_failed_init (&failed_event,
- bson_get_monotonic_time () - started,
- command_name,
- error,
- request_id,
- cmd->operation_id,
- host,
- cmd->server_id,
- cluster->client->apm_context);
-
- callbacks->failed (&failed_event);
- mongoc_apm_command_failed_cleanup (&failed_event);
- }
-
if (reply_ptr == &reply_local) {
bson_destroy (reply_ptr);
}
-#ifdef MONGOC_ENABLE_COMPRESSION
bson_free (output);
-#endif
RETURN (ret);
}
+
+bool
+mongoc_cluster_is_not_master_error (const bson_error_t *error)
+{
+ return !strncmp (error->message, "not master", 10) ||
+ !strncmp (error->message, "node is recovering", 18);
+}
+
+
+static void
+handle_not_master_error (mongoc_cluster_t *cluster,
+ uint32_t server_id,
+ const bson_error_t *error)
+{
+ if (mongoc_cluster_is_not_master_error (error)) {
+ /* Server Discovery and Monitoring Spec: "When the client sees a 'not
+ * master' or 'node is recovering' error it MUST replace the server's
+ * description with a default ServerDescription of type Unknown."
+ */
+ mongoc_topology_invalidate_server (
+ cluster->client->topology, server_id, error);
+ }
+}
+
/*
*--------------------------------------------------------------------------
*
* mongoc_cluster_run_command_monitored --
*
* Internal function to run a command on a given stream.
* @error and @reply are optional out-pointers.
*
* Returns:
* true if successful; otherwise false and @error is set.
*
* Side effects:
* If the client's APM callbacks are set, they are executed.
* @reply is set and should ALWAYS be released with bson_destroy().
*
*--------------------------------------------------------------------------
*/
bool
mongoc_cluster_run_command_monitored (mongoc_cluster_t *cluster,
- mongoc_cmd_parts_t *parts,
- mongoc_server_stream_t *server_stream,
+ mongoc_cmd_t *cmd,
bson_t *reply,
bson_error_t *error)
{
- int32_t compressor_id =
-#ifdef MONGOC_ENABLE_COMPRESSION
- mongoc_server_description_compressor_id (server_stream->sd);
-#else
- 0;
-#endif
+ bool retval;
+ uint32_t request_id = ++cluster->request_id;
+ uint32_t server_id;
+ mongoc_apm_callbacks_t *callbacks;
+ mongoc_apm_command_started_t started_event;
+ mongoc_apm_command_succeeded_t succeeded_event;
+ mongoc_apm_command_failed_t failed_event;
+ int64_t started = bson_get_monotonic_time ();
+ const mongoc_server_stream_t *server_stream;
+ bson_t reply_local;
+ bson_error_t error_local;
+ int32_t compressor_id;
+
+ server_stream = cmd->server_stream;
+ server_id = server_stream->sd->id;
+ compressor_id = mongoc_server_description_compressor_id (server_stream->sd);
+
+ callbacks = &cluster->client->apm_callbacks;
+ if (!reply) {
+ reply = &reply_local;
+ }
+ if (!error) {
+ error = &error_local;
+ }
- mongoc_cmd_parts_assemble (parts, server_stream);
+ if (callbacks->started) {
+ mongoc_apm_command_started_init_with_cmd (
+ &started_event, cmd, request_id, cluster->client->apm_context);
- return mongoc_cluster_run_command_internal (cluster,
- &parts->assembled,
- server_stream->stream,
- compressor_id,
- true,
- &server_stream->sd->host,
- reply,
- error);
+ callbacks->started (&started_event);
+ mongoc_apm_command_started_cleanup (&started_event);
+ }
+
+ if (server_stream->sd->max_wire_version >= WIRE_VERSION_OP_MSG) {
+ retval = mongoc_cluster_run_opmsg (cluster, cmd, reply, error);
+ } else {
+ retval = mongoc_cluster_run_command_opquery (
+ cluster, cmd, server_stream->stream, compressor_id, reply, error);
+ }
+ if (retval && callbacks->succeeded) {
+ mongoc_apm_command_succeeded_init (&succeeded_event,
+ bson_get_monotonic_time () - started,
+ reply,
+ cmd->command_name,
+ request_id,
+ cmd->operation_id,
+ &server_stream->sd->host,
+ server_id,
+ cluster->client->apm_context);
+
+ callbacks->succeeded (&succeeded_event);
+ mongoc_apm_command_succeeded_cleanup (&succeeded_event);
+ }
+ if (!retval && callbacks->failed) {
+ mongoc_apm_command_failed_init (&failed_event,
+ bson_get_monotonic_time () - started,
+ cmd->command_name,
+ error,
+ request_id,
+ cmd->operation_id,
+ &server_stream->sd->host,
+ server_id,
+ cluster->client->apm_context);
+
+ callbacks->failed (&failed_event);
+ mongoc_apm_command_failed_cleanup (&failed_event);
+ }
+ if (!retval) {
+ handle_not_master_error (cluster, server_id, error);
+ }
+ if (reply == &reply_local) {
+ bson_destroy (&reply_local);
+ }
+
+ _mongoc_topology_update_last_used (cluster->client->topology, server_id);
+
+ return retval;
}
/*
*--------------------------------------------------------------------------
*
* mongoc_cluster_run_command_private --
*
* Internal function to run a command on a given stream.
* @error and @reply are optional out-pointers.
* The client's APM callbacks are not executed.
*
* Returns:
* true if successful; otherwise false and @error is set.
*
* Side effects:
* @reply is set and should ALWAYS be released with bson_destroy().
*
*--------------------------------------------------------------------------
*/
bool
mongoc_cluster_run_command_private (mongoc_cluster_t *cluster,
- mongoc_cmd_parts_t *parts,
- mongoc_stream_t *stream,
- uint32_t server_id,
+ mongoc_cmd_t *cmd,
bson_t *reply,
bson_error_t *error)
{
- mongoc_cmd_parts_assemble_simple (parts, server_id);
-
- /* monitored = false */
- return mongoc_cluster_run_command_internal (cluster,
- &parts->assembled,
- stream,
- 0,
- /* not monitored */
- false,
- NULL,
- reply,
- error);
+ bool retval;
+ const mongoc_server_stream_t *server_stream;
+ bson_t reply_local;
+ bson_error_t error_local;
+
+ if (!error) {
+ error = &error_local;
+ }
+
+ if (!reply) {
+ reply = &reply_local;
+ }
+ server_stream = cmd->server_stream;
+ if (server_stream->sd->max_wire_version >= WIRE_VERSION_OP_MSG) {
+ retval = mongoc_cluster_run_opmsg (cluster, cmd, reply, error);
+ } else {
+ retval = mongoc_cluster_run_command_opquery (
+ cluster, cmd, cmd->server_stream->stream, -1, reply, error);
+ }
+ if (reply == &reply_local) {
+ bson_destroy (&reply_local);
+ }
+ if (!retval) {
+ handle_not_master_error (cluster, server_stream->sd->id, error);
+ }
+
+ _mongoc_topology_update_last_used (cluster->client->topology,
+ server_stream->sd->id);
+
+ return retval;
+}
+
+/*
+ *--------------------------------------------------------------------------
+ *
+ * mongoc_cluster_run_command_parts --
+ *
+ * Internal function to assemble command parts and run a command
+ * on a given stream. @error and @reply are optional out-pointers.
+ * The client's APM callbacks are not executed.
+ *
+ * Returns:
+ * true if successful; otherwise false and @error is set.
+ *
+ * Side effects:
+ * @reply is set and should ALWAYS be released with bson_destroy().
+ * mongoc_cmd_parts_cleanup will be always be called on parts. The
+ * caller should *not* call cleanup on the parts.
+ *
+ *--------------------------------------------------------------------------
+ */
+
+bool
+mongoc_cluster_run_command_parts (mongoc_cluster_t *cluster,
+ mongoc_server_stream_t *server_stream,
+ mongoc_cmd_parts_t *parts,
+ bson_t *reply,
+ bson_error_t *error)
+{
+ bool ret;
+
+ if (!mongoc_cmd_parts_assemble (parts, server_stream, error)) {
+ _mongoc_bson_init_if_set (reply);
+ mongoc_cmd_parts_cleanup (parts);
+ return false;
+ }
+
+ ret = mongoc_cluster_run_command_private (
+ cluster, &parts->assembled, reply, error);
+ mongoc_cmd_parts_cleanup (parts);
+ return ret;
}
/*
*--------------------------------------------------------------------------
*
* _mongoc_stream_run_ismaster --
*
* Run an ismaster command on the given stream.
*
* Returns:
* A mongoc_server_description_t you must destroy. If the call failed
* its error is set and its type is MONGOC_SERVER_UNKNOWN.
*
*--------------------------------------------------------------------------
*/
static mongoc_server_description_t *
_mongoc_stream_run_ismaster (mongoc_cluster_t *cluster,
mongoc_stream_t *stream,
const char *address,
uint32_t server_id)
{
const bson_t *command;
mongoc_cmd_parts_t parts;
bson_t reply;
- bson_error_t error = {0};
+ bson_error_t error;
int64_t start;
int64_t rtt_msec;
mongoc_server_description_t *sd;
+ mongoc_server_stream_t *server_stream;
bool r;
ENTRY;
BSON_ASSERT (cluster);
BSON_ASSERT (stream);
command = _mongoc_topology_scanner_get_ismaster (
cluster->client->topology->scanner);
- mongoc_cmd_parts_init (&parts, "admin", MONGOC_QUERY_SLAVE_OK, command);
-
start = bson_get_monotonic_time ();
- mongoc_cluster_run_command_private (
- cluster, &parts, stream, server_id, &reply, &error);
+ server_stream = _mongoc_cluster_create_server_stream (
+ cluster->client->topology, server_id, stream, &error);
+ if (!server_stream) {
+ RETURN (NULL);
+ }
+
+ mongoc_cmd_parts_init (
+ &parts, cluster->client, "admin", MONGOC_QUERY_SLAVE_OK, command);
+ parts.prohibit_lsid = true;
+ if (!mongoc_cluster_run_command_parts (
+ cluster, server_stream, &parts, &reply, &error)) {
+ mongoc_server_stream_cleanup (server_stream);
+ RETURN (NULL);
+ }
rtt_msec = (bson_get_monotonic_time () - start) / 1000;
sd = (mongoc_server_description_t *) bson_malloc0 (
sizeof (mongoc_server_description_t));
mongoc_server_description_init (sd, address, server_id);
/* send the error from run_command IN to handle_ismaster */
mongoc_server_description_handle_ismaster (sd, &reply, rtt_msec, &error);
bson_destroy (&reply);
r = _mongoc_topology_update_from_handshake (cluster->client->topology, sd);
if (!r) {
mongoc_server_description_reset (sd);
bson_set_error (&sd->error,
MONGOC_ERROR_STREAM,
MONGOC_ERROR_STREAM_NOT_ESTABLISHED,
"\"%s\" removed from topology",
address);
}
+ mongoc_server_stream_cleanup (server_stream);
+
RETURN (sd);
}
/*
*--------------------------------------------------------------------------
*
* _mongoc_cluster_run_ismaster --
*
* Run an ismaster command for the given node and handle result.
*
* Returns:
- * True if ismaster ran successfully, false otherwise.
+ * mongoc_server_description_t on success, NULL otherwise.
+ * the mongoc_server_description_t MUST BE DESTROYED BY THE CALLER.
*
* Side effects:
* Makes a blocking I/O call, updates cluster->topology->description
* with ismaster result.
*
*--------------------------------------------------------------------------
*/
-static bool
+static mongoc_server_description_t *
_mongoc_cluster_run_ismaster (mongoc_cluster_t *cluster,
mongoc_cluster_node_t *node,
uint32_t server_id,
bson_error_t *error /* OUT */)
{
- bool r;
mongoc_server_description_t *sd;
ENTRY;
BSON_ASSERT (cluster);
BSON_ASSERT (node);
BSON_ASSERT (node->stream);
sd = _mongoc_stream_run_ismaster (
cluster, node->stream, node->connection_address, server_id);
if (sd->type == MONGOC_SERVER_UNKNOWN) {
- r = false;
memcpy (error, &sd->error, sizeof (bson_error_t));
+ mongoc_server_description_destroy (sd);
+ return NULL;
} else {
- r = true;
node->max_write_batch_size = sd->max_write_batch_size;
node->min_wire_version = sd->min_wire_version;
node->max_wire_version = sd->max_wire_version;
node->max_bson_obj_size = sd->max_bson_obj_size;
node->max_msg_size = sd->max_msg_size;
}
- mongoc_server_description_destroy (sd);
-
- return r;
+ return sd;
}
/*
*--------------------------------------------------------------------------
*
* _mongoc_cluster_build_basic_auth_digest --
*
* Computes the Basic Authentication digest using the credentials
* configured for @cluster and the @nonce provided.
*
* The result should be freed by the caller using bson_free() when
* they are finished with it.
*
* Returns:
* A newly allocated string containing the digest.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
static char *
_mongoc_cluster_build_basic_auth_digest (mongoc_cluster_t *cluster,
const char *nonce)
{
const char *username;
const char *password;
char *password_digest;
char *password_md5;
char *digest_in;
char *ret;
ENTRY;
/*
* The following generates the digest to be used for basic authentication
* with a MongoDB server. More information on the format can be found
* at the following location:
*
* http://docs.mongodb.org/meta-driver/latest/legacy/
* implement-authentication-in-driver/
*/
BSON_ASSERT (cluster);
BSON_ASSERT (cluster->uri);
username = mongoc_uri_get_username (cluster->uri);
password = mongoc_uri_get_password (cluster->uri);
password_digest = bson_strdup_printf ("%s:mongo:%s", username, password);
password_md5 = _mongoc_hex_md5 (password_digest);
digest_in = bson_strdup_printf ("%s%s%s", nonce, username, password_md5);
ret = _mongoc_hex_md5 (digest_in);
bson_free (digest_in);
bson_free (password_md5);
bson_free (password_digest);
RETURN (ret);
}
/*
*--------------------------------------------------------------------------
*
* _mongoc_cluster_auth_node_cr --
*
* Performs authentication of @node using the credentials provided
* when configuring the @cluster instance.
*
* This is the Challenge-Response mode of authentication.
*
* Returns:
* true if authentication was successful; otherwise false and
* @error is set.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
static bool
_mongoc_cluster_auth_node_cr (mongoc_cluster_t *cluster,
mongoc_stream_t *stream,
+ mongoc_server_description_t *sd,
bson_error_t *error)
{
mongoc_cmd_parts_t parts;
bson_iter_t iter;
const char *auth_source;
- bson_t command = {0};
- bson_t reply = {0};
+ bson_t command;
+ bson_t reply;
char *digest;
char *nonce;
bool ret;
+ mongoc_server_stream_t *server_stream;
ENTRY;
BSON_ASSERT (cluster);
BSON_ASSERT (stream);
if (!(auth_source = mongoc_uri_get_auth_source (cluster->uri)) ||
(*auth_source == '\0')) {
auth_source = "admin";
}
/*
* To authenticate a node using basic authentication, we need to first
* get the nonce from the server. We use that to hash our password which
* is sent as a reply to the server. If everything went good we get a
* success notification back from the server.
*/
/*
* Execute the getnonce command to fetch the nonce used for generating
* md5 digest of our password information.
*/
bson_init (&command);
bson_append_int32 (&command, "getnonce", 8, 1);
- mongoc_cmd_parts_init (&parts, auth_source, MONGOC_QUERY_SLAVE_OK, &command);
- if (!mongoc_cluster_run_command_private (
- cluster, &parts, stream, 0, &reply, error)) {
+ mongoc_cmd_parts_init (
+ &parts, cluster->client, auth_source, MONGOC_QUERY_SLAVE_OK, &command);
+ parts.prohibit_lsid = true;
+ server_stream = _mongoc_cluster_create_server_stream (
+ cluster->client->topology, sd->id, stream, error);
+
+ if (!mongoc_cluster_run_command_parts (
+ cluster, server_stream, &parts, &reply, error)) {
+ mongoc_server_stream_cleanup (server_stream);
bson_destroy (&command);
bson_destroy (&reply);
RETURN (false);
}
bson_destroy (&command);
if (!bson_iter_init_find_case (&iter, &reply, "nonce")) {
bson_set_error (error,
MONGOC_ERROR_CLIENT,
MONGOC_ERROR_CLIENT_GETNONCE,
"Invalid reply from getnonce");
bson_destroy (&reply);
RETURN (false);
}
/*
* Build our command to perform the authentication.
*/
nonce = bson_iter_dup_utf8 (&iter, NULL);
digest = _mongoc_cluster_build_basic_auth_digest (cluster, nonce);
bson_init (&command);
bson_append_int32 (&command, "authenticate", 12, 1);
bson_append_utf8 (
&command, "user", 4, mongoc_uri_get_username (cluster->uri), -1);
bson_append_utf8 (&command, "nonce", 5, nonce, -1);
bson_append_utf8 (&command, "key", 3, digest, -1);
bson_destroy (&reply);
bson_free (nonce);
bson_free (digest);
/*
* Execute the authenticate command. mongoc_cluster_run_command_private
* checks for {ok: 1} in the response.
*/
- mongoc_cmd_parts_cleanup (&parts);
- mongoc_cmd_parts_init (&parts, auth_source, MONGOC_QUERY_SLAVE_OK, &command);
- ret = mongoc_cluster_run_command_private (
- cluster, &parts, stream, 0, &reply, error);
+ mongoc_cmd_parts_init (
+ &parts, cluster->client, auth_source, MONGOC_QUERY_SLAVE_OK, &command);
+ parts.prohibit_lsid = true;
+ ret = mongoc_cluster_run_command_parts (
+ cluster, server_stream, &parts, &reply, error);
+
if (!ret) {
/* error->message is already set */
error->domain = MONGOC_ERROR_CLIENT;
error->code = MONGOC_ERROR_CLIENT_AUTHENTICATE;
}
- mongoc_cmd_parts_cleanup (&parts);
+ mongoc_server_stream_cleanup (server_stream);
bson_destroy (&command);
bson_destroy (&reply);
RETURN (ret);
}
/*
*--------------------------------------------------------------------------
*
* _mongoc_cluster_auth_node_plain --
*
* Perform SASL PLAIN authentication for @node. We do this manually
* instead of using the SASL module because its rather simplistic.
*
* Returns:
* true if successful; otherwise false and error is set.
*
* Side effects:
* error may be set.
*
*--------------------------------------------------------------------------
*/
static bool
_mongoc_cluster_auth_node_plain (mongoc_cluster_t *cluster,
mongoc_stream_t *stream,
+ mongoc_server_description_t *sd,
bson_error_t *error)
{
mongoc_cmd_parts_t parts;
char buf[4096];
int buflen = 0;
const char *username;
const char *password;
bson_t b = BSON_INITIALIZER;
bson_t reply;
size_t len;
char *str;
bool ret;
+ mongoc_server_stream_t *server_stream;
BSON_ASSERT (cluster);
BSON_ASSERT (stream);
username = mongoc_uri_get_username (cluster->uri);
if (!username) {
username = "";
}
password = mongoc_uri_get_password (cluster->uri);
if (!password) {
password = "";
}
str = bson_strdup_printf ("%c%s%c%s", '\0', username, '\0', password);
len = strlen (username) + strlen (password) + 2;
buflen = mongoc_b64_ntop ((const uint8_t *) str, len, buf, sizeof buf);
bson_free (str);
if (buflen == -1) {
bson_set_error (error,
MONGOC_ERROR_CLIENT,
MONGOC_ERROR_CLIENT_AUTHENTICATE,
"failed base64 encoding message");
return false;
}
BSON_APPEND_INT32 (&b, "saslStart", 1);
BSON_APPEND_UTF8 (&b, "mechanism", "PLAIN");
bson_append_utf8 (&b, "payload", 7, (const char *) buf, buflen);
BSON_APPEND_INT32 (&b, "autoAuthorize", 1);
- mongoc_cmd_parts_init (&parts, "$external", MONGOC_QUERY_SLAVE_OK, &b);
- ret = mongoc_cluster_run_command_private (
- cluster, &parts, stream, 0, &reply, error);
-
+ mongoc_cmd_parts_init (
+ &parts, cluster->client, "$external", MONGOC_QUERY_SLAVE_OK, &b);
+ parts.prohibit_lsid = true;
+ server_stream = _mongoc_cluster_create_server_stream (
+ cluster->client->topology, sd->id, stream, error);
+ ret = mongoc_cluster_run_command_parts (
+ cluster, server_stream, &parts, &reply, error);
+ mongoc_server_stream_cleanup (server_stream);
if (!ret) {
/* error->message is already set */
error->domain = MONGOC_ERROR_CLIENT;
error->code = MONGOC_ERROR_CLIENT_AUTHENTICATE;
}
- mongoc_cmd_parts_cleanup (&parts);
bson_destroy (&b);
bson_destroy (&reply);
return ret;
}
#ifdef MONGOC_ENABLE_SSL
static bool
_mongoc_cluster_auth_node_x509 (mongoc_cluster_t *cluster,
mongoc_stream_t *stream,
+ mongoc_server_description_t *sd,
bson_error_t *error)
{
mongoc_cmd_parts_t parts;
const char *username_from_uri = NULL;
char *username_from_subject = NULL;
bson_t cmd;
bson_t reply;
bool ret;
+ mongoc_server_stream_t *server_stream;
BSON_ASSERT (cluster);
BSON_ASSERT (stream);
username_from_uri = mongoc_uri_get_username (cluster->uri);
if (username_from_uri) {
TRACE ("%s", "X509: got username from URI");
} else {
if (!cluster->client->ssl_opts.pem_file) {
bson_set_error (error,
MONGOC_ERROR_CLIENT,
MONGOC_ERROR_CLIENT_AUTHENTICATE,
"cannot determine username for "
"X-509 authentication.");
return false;
}
username_from_subject = mongoc_ssl_extract_subject (
cluster->client->ssl_opts.pem_file, cluster->client->ssl_opts.pem_pwd);
if (!username_from_subject) {
bson_set_error (error,
MONGOC_ERROR_CLIENT,
MONGOC_ERROR_CLIENT_AUTHENTICATE,
"No username provided for X509 authentication.");
return false;
}
TRACE ("%s", "X509: got username from certificate");
}
bson_init (&cmd);
BSON_APPEND_INT32 (&cmd, "authenticate", 1);
BSON_APPEND_UTF8 (&cmd, "mechanism", "MONGODB-X509");
BSON_APPEND_UTF8 (&cmd,
"user",
username_from_uri ? username_from_uri
: username_from_subject);
- mongoc_cmd_parts_init (&parts, "$external", MONGOC_QUERY_SLAVE_OK, &cmd);
- ret = mongoc_cluster_run_command_private (
- cluster, &parts, stream, 0, &reply, error);
-
+ mongoc_cmd_parts_init (
+ &parts, cluster->client, "$external", MONGOC_QUERY_SLAVE_OK, &cmd);
+ parts.prohibit_lsid = true;
+ server_stream = _mongoc_cluster_create_server_stream (
+ cluster->client->topology, sd->id, stream, error);
+ ret = mongoc_cluster_run_command_parts (
+ cluster, server_stream, &parts, &reply, error);
+ mongoc_server_stream_cleanup (server_stream);
if (!ret) {
/* error->message is already set */
error->domain = MONGOC_ERROR_CLIENT;
error->code = MONGOC_ERROR_CLIENT_AUTHENTICATE;
}
if (username_from_subject) {
bson_free (username_from_subject);
}
- mongoc_cmd_parts_cleanup (&parts);
bson_destroy (&cmd);
bson_destroy (&reply);
return ret;
}
#endif
#ifdef MONGOC_ENABLE_CRYPTO
static bool
_mongoc_cluster_auth_node_scram (mongoc_cluster_t *cluster,
mongoc_stream_t *stream,
+ mongoc_server_description_t *sd,
bson_error_t *error)
{
mongoc_cmd_parts_t parts;
uint32_t buflen = 0;
mongoc_scram_t scram;
bson_iter_t iter;
bool ret = false;
const char *tmpstr;
const char *auth_source;
uint8_t buf[4096] = {0};
bson_t cmd;
bson_t reply;
int conv_id = 0;
bson_subtype_t btype;
+ mongoc_server_stream_t *server_stream;
BSON_ASSERT (cluster);
BSON_ASSERT (stream);
if (!(auth_source = mongoc_uri_get_auth_source (cluster->uri)) ||
(*auth_source == '\0')) {
auth_source = "admin";
}
_mongoc_scram_init (&scram);
_mongoc_scram_set_pass (&scram, mongoc_uri_get_password (cluster->uri));
_mongoc_scram_set_user (&scram, mongoc_uri_get_username (cluster->uri));
if (*cluster->scram_client_key) {
_mongoc_scram_set_client_key (
&scram, cluster->scram_client_key, sizeof (cluster->scram_client_key));
}
if (*cluster->scram_server_key) {
_mongoc_scram_set_server_key (
&scram, cluster->scram_server_key, sizeof (cluster->scram_server_key));
}
if (*cluster->scram_salted_password) {
_mongoc_scram_set_salted_password (
&scram,
cluster->scram_salted_password,
sizeof (cluster->scram_salted_password));
}
for (;;) {
if (!_mongoc_scram_step (
&scram, buf, buflen, buf, sizeof buf, &buflen, error)) {
goto failure;
}
bson_init (&cmd);
if (scram.step == 1) {
BSON_APPEND_INT32 (&cmd, "saslStart", 1);
BSON_APPEND_UTF8 (&cmd, "mechanism", "SCRAM-SHA-1");
bson_append_binary (
&cmd, "payload", 7, BSON_SUBTYPE_BINARY, buf, buflen);
BSON_APPEND_INT32 (&cmd, "autoAuthorize", 1);
} else {
BSON_APPEND_INT32 (&cmd, "saslContinue", 1);
BSON_APPEND_INT32 (&cmd, "conversationId", conv_id);
bson_append_binary (
&cmd, "payload", 7, BSON_SUBTYPE_BINARY, buf, buflen);
}
TRACE ("SCRAM: authenticating (step %d)", scram.step);
- mongoc_cmd_parts_init (&parts, auth_source, MONGOC_QUERY_SLAVE_OK, &cmd);
- if (!mongoc_cluster_run_command_private (
- cluster, &parts, stream, 0, &reply, error)) {
+ mongoc_cmd_parts_init (
+ &parts, cluster->client, auth_source, MONGOC_QUERY_SLAVE_OK, &cmd);
+ parts.prohibit_lsid = true;
+ server_stream = _mongoc_cluster_create_server_stream (
+ cluster->client->topology, sd->id, stream, error);
+ if (!mongoc_cluster_run_command_parts (
+ cluster, server_stream, &parts, &reply, error)) {
+ mongoc_server_stream_cleanup (server_stream);
bson_destroy (&cmd);
bson_destroy (&reply);
/* error->message is already set */
error->domain = MONGOC_ERROR_CLIENT;
error->code = MONGOC_ERROR_CLIENT_AUTHENTICATE;
goto failure;
}
+ mongoc_server_stream_cleanup (server_stream);
- mongoc_cmd_parts_cleanup (&parts);
bson_destroy (&cmd);
if (bson_iter_init_find (&iter, &reply, "done") &&
bson_iter_as_bool (&iter)) {
bson_destroy (&reply);
break;
}
if (!bson_iter_init_find (&iter, &reply, "conversationId") ||
!BSON_ITER_HOLDS_INT32 (&iter) ||
!(conv_id = bson_iter_int32 (&iter)) ||
!bson_iter_init_find (&iter, &reply, "payload") ||
!BSON_ITER_HOLDS_BINARY (&iter)) {
const char *errmsg =
"Received invalid SCRAM reply from MongoDB server.";
MONGOC_DEBUG ("SCRAM: authentication failed");
if (bson_iter_init_find (&iter, &reply, "errmsg") &&
BSON_ITER_HOLDS_UTF8 (&iter)) {
errmsg = bson_iter_utf8 (&iter, NULL);
}
bson_set_error (error,
MONGOC_ERROR_CLIENT,
MONGOC_ERROR_CLIENT_AUTHENTICATE,
"%s",
errmsg);
bson_destroy (&reply);
goto failure;
}
bson_iter_binary (&iter, &btype, &buflen, (const uint8_t **) &tmpstr);
if (buflen > sizeof buf) {
bson_set_error (error,
MONGOC_ERROR_CLIENT,
MONGOC_ERROR_CLIENT_AUTHENTICATE,
"SCRAM reply from MongoDB is too large.");
bson_destroy (&reply);
goto failure;
}
memcpy (buf, tmpstr, buflen);
bson_destroy (&reply);
}
TRACE ("%s", "SCRAM: authenticated");
ret = true;
memcpy (cluster->scram_client_key,
scram.client_key,
sizeof (cluster->scram_client_key));
memcpy (cluster->scram_server_key,
scram.server_key,
sizeof (cluster->scram_server_key));
memcpy (cluster->scram_salted_password,
scram.salted_password,
sizeof (cluster->scram_salted_password));
failure:
_mongoc_scram_destroy (&scram);
return ret;
}
#endif
/*
*--------------------------------------------------------------------------
*
* _mongoc_cluster_auth_node --
*
* Authenticate a cluster node depending on the required mechanism.
*
* Returns:
* true if authenticated. false on failure and @error is set.
*
* Side effects:
* @error is set on failure.
*
*--------------------------------------------------------------------------
*/
static bool
_mongoc_cluster_auth_node (mongoc_cluster_t *cluster,
mongoc_stream_t *stream,
- const char *hostname,
- int32_t max_wire_version,
+ mongoc_server_description_t *sd,
bson_error_t *error)
{
bool ret = false;
const char *mechanism;
ENTRY;
BSON_ASSERT (cluster);
BSON_ASSERT (stream);
mechanism = mongoc_uri_get_auth_mechanism (cluster->uri);
- /* Use cached max_wire_version, not value from sd */
if (!mechanism) {
- if (max_wire_version < WIRE_VERSION_SCRAM_DEFAULT) {
+ if (sd->max_wire_version < WIRE_VERSION_SCRAM_DEFAULT) {
mechanism = "MONGODB-CR";
} else {
mechanism = "SCRAM-SHA-1";
}
}
if (0 == strcasecmp (mechanism, "MONGODB-CR")) {
- ret = _mongoc_cluster_auth_node_cr (cluster, stream, error);
+ ret = _mongoc_cluster_auth_node_cr (cluster, stream, sd, error);
} else if (0 == strcasecmp (mechanism, "MONGODB-X509")) {
#ifdef MONGOC_ENABLE_SSL
- ret = _mongoc_cluster_auth_node_x509 (cluster, stream, error);
+ ret = _mongoc_cluster_auth_node_x509 (cluster, stream, sd, error);
#else
bson_set_error (error,
MONGOC_ERROR_CLIENT,
MONGOC_ERROR_CLIENT_AUTHENTICATE,
"The \"%s\" authentication mechanism requires libmongoc "
"built with --enable-ssl",
mechanism);
#endif
} else if (0 == strcasecmp (mechanism, "SCRAM-SHA-1")) {
#ifdef MONGOC_ENABLE_CRYPTO
- ret = _mongoc_cluster_auth_node_scram (cluster, stream, error);
+ ret = _mongoc_cluster_auth_node_scram (cluster, stream, sd, error);
#else
bson_set_error (error,
MONGOC_ERROR_CLIENT,
MONGOC_ERROR_CLIENT_AUTHENTICATE,
"The \"%s\" authentication mechanism requires libmongoc "
"built with --enable-ssl",
mechanism);
#endif
} else if (0 == strcasecmp (mechanism, "GSSAPI")) {
#ifdef MONGOC_ENABLE_SASL
- ret = _mongoc_cluster_auth_node_sasl (cluster, stream, hostname, error);
+ ret = _mongoc_cluster_auth_node_sasl (cluster, stream, sd, error);
#else
bson_set_error (error,
MONGOC_ERROR_CLIENT,
MONGOC_ERROR_CLIENT_AUTHENTICATE,
"The \"%s\" authentication mechanism requires libmongoc "
"built with --enable-sasl",
mechanism);
#endif
} else if (0 == strcasecmp (mechanism, "PLAIN")) {
- ret = _mongoc_cluster_auth_node_plain (cluster, stream, error);
+ ret = _mongoc_cluster_auth_node_plain (cluster, stream, sd, error);
} else {
bson_set_error (error,
MONGOC_ERROR_CLIENT,
MONGOC_ERROR_CLIENT_AUTHENTICATE,
"Unknown authentication mechanism \"%s\".",
mechanism);
}
if (!ret) {
mongoc_counter_auth_failure_inc ();
MONGOC_DEBUG ("Authentication failed: %s", error->message);
} else {
mongoc_counter_auth_success_inc ();
TRACE ("%s", "Authentication succeeded");
}
RETURN (ret);
}
/*
*--------------------------------------------------------------------------
*
* mongoc_cluster_disconnect_node --
*
* Remove a node from the set of nodes. This should be done if
* a stream in the set is found to be invalid. If @invalidate is
* true, also mark the server Unknown in the topology description,
* passing the error information from @why as the reason.
*
* WARNING: pointers to a disconnected mongoc_cluster_node_t or
* its stream are now invalid, be careful of dangling pointers.
*
* Returns:
* None.
*
* Side effects:
* Removes node from cluster's set of nodes, and frees the
* mongoc_cluster_node_t if pooled.
*
*--------------------------------------------------------------------------
*/
void
mongoc_cluster_disconnect_node (mongoc_cluster_t *cluster,
uint32_t server_id,
bool invalidate,
const bson_error_t *why /* IN */)
{
mongoc_topology_t *topology = cluster->client->topology;
ENTRY;
if (topology->single_threaded) {
mongoc_topology_scanner_node_t *scanner_node;
scanner_node =
mongoc_topology_scanner_get_node (topology->scanner, server_id);
/* might never actually have connected */
if (scanner_node && scanner_node->stream) {
mongoc_topology_scanner_node_disconnect (scanner_node, true);
}
} else {
mongoc_set_rm (cluster->nodes, server_id);
}
if (invalidate) {
mongoc_topology_invalidate_server (topology, server_id, why);
}
EXIT;
}
static void
_mongoc_cluster_node_destroy (mongoc_cluster_node_t *node)
{
/* Failure, or Replica Set reconfigure without this node */
mongoc_stream_failed (node->stream);
bson_free (node->connection_address);
bson_free (node);
}
static void
_mongoc_cluster_node_dtor (void *data_, void *ctx_)
{
mongoc_cluster_node_t *node = (mongoc_cluster_node_t *) data_;
_mongoc_cluster_node_destroy (node);
}
static mongoc_cluster_node_t *
_mongoc_cluster_node_new (mongoc_stream_t *stream,
const char *connection_address)
{
mongoc_cluster_node_t *node;
if (!stream) {
return NULL;
}
node = (mongoc_cluster_node_t *) bson_malloc0 (sizeof *node);
node->stream = stream;
node->connection_address = bson_strdup (connection_address);
node->timestamp = bson_get_monotonic_time ();
node->max_wire_version = MONGOC_DEFAULT_WIRE_VERSION;
node->min_wire_version = MONGOC_DEFAULT_WIRE_VERSION;
node->max_write_batch_size = MONGOC_DEFAULT_WRITE_BATCH_SIZE;
node->max_bson_obj_size = MONGOC_DEFAULT_BSON_OBJ_SIZE;
node->max_msg_size = MONGOC_DEFAULT_MAX_MSG_SIZE;
return node;
}
/*
*--------------------------------------------------------------------------
*
* mongoc_cluster_add_node --
*
* Add a new node to this cluster for the given server description.
*
* NOTE: does NOT check if this server is already in the cluster.
*
* Returns:
* A stream connected to the server, or NULL on failure.
*
* Side effects:
* Adds a cluster node, or sets error on failure.
*
*--------------------------------------------------------------------------
*/
static mongoc_stream_t *
_mongoc_cluster_add_node (mongoc_cluster_t *cluster,
uint32_t server_id,
bson_error_t *error /* OUT */)
{
mongoc_host_list_t *host = NULL;
mongoc_cluster_node_t *cluster_node = NULL;
mongoc_stream_t *stream;
+ mongoc_server_description_t *sd;
ENTRY;
BSON_ASSERT (cluster);
BSON_ASSERT (!cluster->client->topology->single_threaded);
host =
_mongoc_topology_host_by_id (cluster->client->topology, server_id, error);
if (!host) {
GOTO (error);
}
TRACE ("Adding new server to cluster: %s", host->host_and_port);
stream = _mongoc_client_create_stream (cluster->client, host, error);
if (!stream) {
MONGOC_WARNING (
"Failed connection to %s (%s)", host->host_and_port, error->message);
GOTO (error);
}
/* take critical fields from a fresh ismaster */
cluster_node = _mongoc_cluster_node_new (stream, host->host_and_port);
- if (!_mongoc_cluster_run_ismaster (
- cluster, cluster_node, server_id, error)) {
+ sd = _mongoc_cluster_run_ismaster (cluster, cluster_node, server_id, error);
+ if (!sd) {
GOTO (error);
}
if (cluster->requires_auth) {
- if (!_mongoc_cluster_auth_node (cluster,
- cluster_node->stream,
- host->host,
- cluster_node->max_wire_version,
- error)) {
+ if (!_mongoc_cluster_auth_node (
+ cluster, cluster_node->stream, sd, error)) {
MONGOC_WARNING ("Failed authentication to %s (%s)",
host->host_and_port,
error->message);
+ mongoc_server_description_destroy (sd);
GOTO (error);
}
}
+ mongoc_server_description_destroy (sd);
mongoc_set_add (cluster->nodes, server_id, cluster_node);
_mongoc_host_list_destroy_all (host);
RETURN (stream);
error:
_mongoc_host_list_destroy_all (host); /* null ok */
if (cluster_node) {
_mongoc_cluster_node_destroy (cluster_node); /* also destroys stream */
}
RETURN (NULL);
}
static void
node_not_found (mongoc_topology_t *topology,
uint32_t server_id,
bson_error_t *error /* OUT */)
{
mongoc_server_description_t *sd;
if (!error) {
return;
}
sd = mongoc_topology_server_by_id (topology, server_id, error);
if (!sd) {
return;
}
if (sd->error.code) {
memcpy (error, &sd->error, sizeof *error);
} else {
bson_set_error (error,
MONGOC_ERROR_STREAM,
MONGOC_ERROR_STREAM_NOT_ESTABLISHED,
"Could not find node %s",
sd->host.host_and_port);
}
mongoc_server_description_destroy (sd);
}
static void
stream_not_found (mongoc_topology_t *topology,
uint32_t server_id,
const char *connection_address,
bson_error_t *error /* OUT */)
{
mongoc_server_description_t *sd;
sd = mongoc_topology_server_by_id (topology, server_id, error);
if (error) {
if (sd && sd->error.code) {
memcpy (error, &sd->error, sizeof *error);
} else {
bson_set_error (error,
MONGOC_ERROR_STREAM,
MONGOC_ERROR_STREAM_NOT_ESTABLISHED,
"Could not find stream for node %s",
connection_address);
}
}
if (sd) {
mongoc_server_description_destroy (sd);
}
}
mongoc_server_stream_t *
_mongoc_cluster_stream_for_server (mongoc_cluster_t *cluster,
uint32_t server_id,
bool reconnect_ok,
bson_error_t *error /* OUT */)
{
mongoc_topology_t *topology;
mongoc_server_stream_t *server_stream;
bson_error_t err_local;
/* if fetch_stream fails we need a place to receive error details and pass
* them to mongoc_topology_invalidate_server. */
bson_error_t *err_ptr = error ? error : &err_local;
ENTRY;
topology = cluster->client->topology;
/* in the single-threaded use case we share topology's streams */
if (topology->single_threaded) {
server_stream = mongoc_cluster_fetch_stream_single (
cluster, server_id, reconnect_ok, err_ptr);
} else {
server_stream = mongoc_cluster_fetch_stream_pooled (
cluster, server_id, reconnect_ok, err_ptr);
}
if (!server_stream) {
/* Server Discovery And Monitoring Spec: "When an application operation
* fails because of any network error besides a socket timeout, the
* client MUST replace the server's description with a default
* ServerDescription of type Unknown, and fill the ServerDescription's
* error field with useful information."
*
* error was filled by fetch_stream_single/pooled, pass it to disconnect()
*/
mongoc_cluster_disconnect_node (cluster, server_id, true, err_ptr);
}
RETURN (server_stream);
}
/*
*--------------------------------------------------------------------------
*
* mongoc_cluster_stream_for_server --
*
* Fetch the stream for @server_id. If @reconnect_ok and there is no
* valid stream, attempts to reconnect; if not @reconnect_ok then only
* an existing stream can be returned, or NULL.
*
* Returns:
* A mongoc_server_stream_t, or NULL
*
* Side effects:
* May add a node or reconnect one, if @reconnect_ok.
* Authenticates the stream if needed.
* May set @error.
*
*--------------------------------------------------------------------------
*/
mongoc_server_stream_t *
mongoc_cluster_stream_for_server (mongoc_cluster_t *cluster,
uint32_t server_id,
bool reconnect_ok,
bson_error_t *error)
{
mongoc_server_stream_t *server_stream = NULL;
bson_error_t err_local = {0};
ENTRY;
BSON_ASSERT (cluster);
BSON_ASSERT (server_id);
if (!error) {
error = &err_local;
}
server_stream = _mongoc_cluster_stream_for_server (
cluster, server_id, reconnect_ok, error);
if (!server_stream) {
/* failed */
mongoc_cluster_disconnect_node (cluster, server_id, true, error);
}
RETURN (server_stream);
}
static mongoc_server_stream_t *
mongoc_cluster_fetch_stream_single (mongoc_cluster_t *cluster,
uint32_t server_id,
bool reconnect_ok,
bson_error_t *error /* OUT */)
{
mongoc_topology_t *topology;
mongoc_server_description_t *sd;
mongoc_stream_t *stream;
mongoc_topology_scanner_node_t *scanner_node;
int64_t expire_at;
topology = cluster->client->topology;
scanner_node =
mongoc_topology_scanner_get_node (topology->scanner, server_id);
BSON_ASSERT (scanner_node && !scanner_node->retired);
stream = scanner_node->stream;
if (stream) {
sd = mongoc_topology_server_by_id (topology, server_id, error);
if (!sd) {
return NULL;
}
} else {
if (!reconnect_ok) {
stream_not_found (
topology, server_id, scanner_node->host.host_and_port, error);
return NULL;
}
if (!mongoc_topology_scanner_node_setup (scanner_node, error)) {
return NULL;
}
stream = scanner_node->stream;
expire_at =
bson_get_monotonic_time () + topology->connect_timeout_msec * 1000;
if (!mongoc_stream_wait (stream, expire_at)) {
bson_set_error (error,
MONGOC_ERROR_STREAM,
MONGOC_ERROR_STREAM_CONNECT,
"Failed to connect to target host: '%s'",
scanner_node->host.host_and_port);
return NULL;
}
#ifdef MONGOC_ENABLE_SSL
if (cluster->client->use_ssl) {
bool r;
mongoc_stream_t *tls_stream;
for (tls_stream = stream; tls_stream->type != MONGOC_STREAM_TLS;
tls_stream = mongoc_stream_get_base_stream (tls_stream)) {
}
r = mongoc_stream_tls_handshake_block (
tls_stream,
scanner_node->host.host,
(int32_t) topology->connect_timeout_msec * 1000,
error);
if (!r) {
mongoc_topology_scanner_node_disconnect (scanner_node, true);
return NULL;
}
}
#endif
sd = _mongoc_stream_run_ismaster (
cluster, stream, scanner_node->host.host_and_port, server_id);
+
+ if (!sd) {
+ return NULL;
+ }
}
if (sd->type == MONGOC_SERVER_UNKNOWN) {
memcpy (error, &sd->error, sizeof *error);
mongoc_server_description_destroy (sd);
return NULL;
}
/* stream open but not auth'ed: first use since connect or reconnect */
if (cluster->requires_auth && !scanner_node->has_auth) {
- if (!_mongoc_cluster_auth_node (cluster,
- stream,
- sd->host.host,
- sd->max_wire_version,
- &sd->error)) {
+ if (!_mongoc_cluster_auth_node (cluster, stream, sd, &sd->error)) {
memcpy (error, &sd->error, sizeof *error);
mongoc_server_description_destroy (sd);
return NULL;
}
scanner_node->has_auth = true;
}
- return mongoc_server_stream_new (topology->description.type, sd, stream);
+ return mongoc_server_stream_new (&topology->description, sd, stream);
}
-static mongoc_server_stream_t *
+mongoc_server_stream_t *
_mongoc_cluster_create_server_stream (mongoc_topology_t *topology,
uint32_t server_id,
mongoc_stream_t *stream,
bson_error_t *error /* OUT */)
{
mongoc_server_description_t *sd;
+ mongoc_server_stream_t *server_stream = NULL;
- sd = mongoc_topology_server_by_id (topology, server_id, error);
+ /* can't just use mongoc_topology_server_by_id(), since we must hold the
+ * lock while copying topology->description.logical_time below */
+ mongoc_mutex_lock (&topology->mutex);
- if (!sd) {
- return NULL;
+ sd = mongoc_server_description_new_copy (
+ mongoc_topology_description_server_by_id (
+ &topology->description, server_id, error));
+
+ if (sd) {
+ server_stream =
+ mongoc_server_stream_new (&topology->description, sd, stream);
}
- return mongoc_server_stream_new (
- _mongoc_topology_get_type (topology), sd, stream);
+ mongoc_mutex_unlock (&topology->mutex);
+
+ return server_stream;
}
static mongoc_server_stream_t *
mongoc_cluster_fetch_stream_pooled (mongoc_cluster_t *cluster,
uint32_t server_id,
bool reconnect_ok,
bson_error_t *error /* OUT */)
{
mongoc_topology_t *topology;
mongoc_stream_t *stream;
mongoc_cluster_node_t *cluster_node;
int64_t timestamp;
cluster_node =
(mongoc_cluster_node_t *) mongoc_set_get (cluster->nodes, server_id);
topology = cluster->client->topology;
if (cluster_node) {
BSON_ASSERT (cluster_node->stream);
timestamp = mongoc_topology_server_timestamp (topology, server_id);
if (timestamp == -1 || cluster_node->timestamp < timestamp) {
/* topology change or net error during background scan made us remove
* or replace server description since node's birth. destroy node. */
mongoc_cluster_disconnect_node (
cluster, server_id, false /* invalidate */, NULL);
} else {
return _mongoc_cluster_create_server_stream (
topology, server_id, cluster_node->stream, error);
}
}
/* no node, or out of date */
if (!reconnect_ok) {
node_not_found (topology, server_id, error);
return NULL;
}
stream = _mongoc_cluster_add_node (cluster, server_id, error);
if (stream) {
return _mongoc_cluster_create_server_stream (
topology, server_id, stream, error);
} else {
return NULL;
}
}
/*
*--------------------------------------------------------------------------
*
* mongoc_cluster_init --
*
* Initializes @cluster using the @uri and @client provided. The
* @uri is used to determine the "mode" of the cluster. Based on the
* uri we can determine if we are connected to a single host, a
* replicaSet, or a shardedCluster.
*
* Returns:
* None.
*
* Side effects:
* @cluster is initialized.
*
*--------------------------------------------------------------------------
*/
void
mongoc_cluster_init (mongoc_cluster_t *cluster,
const mongoc_uri_t *uri,
void *client)
{
ENTRY;
BSON_ASSERT (cluster);
BSON_ASSERT (uri);
memset (cluster, 0, sizeof *cluster);
cluster->uri = mongoc_uri_copy (uri);
cluster->client = (mongoc_client_t *) client;
cluster->requires_auth =
(mongoc_uri_get_username (uri) || mongoc_uri_get_auth_mechanism (uri));
cluster->sockettimeoutms = mongoc_uri_get_option_as_int32 (
uri, MONGOC_URI_SOCKETTIMEOUTMS, MONGOC_DEFAULT_SOCKETTIMEOUTMS);
cluster->socketcheckintervalms =
mongoc_uri_get_option_as_int32 (uri,
MONGOC_URI_SOCKETCHECKINTERVALMS,
MONGOC_TOPOLOGY_SOCKET_CHECK_INTERVAL_MS);
/* TODO for single-threaded case we don't need this */
cluster->nodes = mongoc_set_new (8, _mongoc_cluster_node_dtor, NULL);
_mongoc_array_init (&cluster->iov, sizeof (mongoc_iovec_t));
cluster->operation_id = rand ();
EXIT;
}
/*
*--------------------------------------------------------------------------
*
* mongoc_cluster_destroy --
*
* Clean up after @cluster and destroy all active connections.
* All resources for @cluster are released.
*
* Returns:
* None.
*
* Side effects:
* Everything.
*
*--------------------------------------------------------------------------
*/
void
mongoc_cluster_destroy (mongoc_cluster_t *cluster) /* INOUT */
{
ENTRY;
BSON_ASSERT (cluster);
mongoc_uri_destroy (cluster->uri);
mongoc_set_destroy (cluster->nodes);
_mongoc_array_destroy (&cluster->iov);
EXIT;
}
/*
*--------------------------------------------------------------------------
*
* mongoc_cluster_stream_for_optype --
*
* Internal server selection.
*
* Returns:
* A mongoc_server_stream_t on which you must call
* mongoc_server_stream_cleanup, or NULL on failure (sets @error)
*
* Side effects:
* May set @error.
* May add new nodes to @cluster->nodes.
*
*--------------------------------------------------------------------------
*/
static mongoc_server_stream_t *
_mongoc_cluster_stream_for_optype (mongoc_cluster_t *cluster,
mongoc_ss_optype_t optype,
const mongoc_read_prefs_t *read_prefs,
bson_error_t *error)
{
mongoc_server_stream_t *server_stream;
uint32_t server_id;
mongoc_topology_t *topology = cluster->client->topology;
ENTRY;
BSON_ASSERT (cluster);
server_id =
mongoc_topology_select_server_id (topology, optype, read_prefs, error);
if (!server_id) {
RETURN (NULL);
}
if (!mongoc_cluster_check_interval (cluster, server_id)) {
/* Server Selection Spec: try once more */
server_id =
mongoc_topology_select_server_id (topology, optype, read_prefs, error);
if (!server_id) {
RETURN (NULL);
}
}
/* connect or reconnect to server if necessary */
server_stream = _mongoc_cluster_stream_for_server (
cluster, server_id, true /* reconnect_ok */, error);
RETURN (server_stream);
}
/*
*--------------------------------------------------------------------------
*
* mongoc_cluster_stream_for_reads --
*
* Internal server selection.
*
* Returns:
* A mongoc_server_stream_t on which you must call
* mongoc_server_stream_cleanup, or NULL on failure (sets @error)
*
* Side effects:
* May set @error.
* May add new nodes to @cluster->nodes.
*
*--------------------------------------------------------------------------
*/
mongoc_server_stream_t *
mongoc_cluster_stream_for_reads (mongoc_cluster_t *cluster,
const mongoc_read_prefs_t *read_prefs,
bson_error_t *error)
{
return _mongoc_cluster_stream_for_optype (
cluster, MONGOC_SS_READ, read_prefs, error);
}
/*
*--------------------------------------------------------------------------
*
* mongoc_cluster_stream_for_writes --
*
* Get a stream for write operations.
*
* Returns:
* A mongoc_server_stream_t on which you must call
* mongoc_server_stream_cleanup, or NULL on failure (sets @error)
*
* Side effects:
* May set @error.
* May add new nodes to @cluster->nodes.
*
*--------------------------------------------------------------------------
*/
mongoc_server_stream_t *
mongoc_cluster_stream_for_writes (mongoc_cluster_t *cluster,
bson_error_t *error)
{
return _mongoc_cluster_stream_for_optype (
cluster, MONGOC_SS_WRITE, NULL, error);
}
static bool
_mongoc_cluster_min_of_max_obj_size_sds (void *item, void *ctx)
{
mongoc_server_description_t *sd = (mongoc_server_description_t *) item;
int32_t *current_min = (int32_t *) ctx;
if (sd->max_bson_obj_size < *current_min) {
*current_min = sd->max_bson_obj_size;
}
return true;
}
static bool
_mongoc_cluster_min_of_max_obj_size_nodes (void *item, void *ctx)
{
mongoc_cluster_node_t *node = (mongoc_cluster_node_t *) item;
int32_t *current_min = (int32_t *) ctx;
if (node->max_bson_obj_size < *current_min) {
*current_min = node->max_bson_obj_size;
}
return true;
}
static bool
_mongoc_cluster_min_of_max_msg_size_sds (void *item, void *ctx)
{
mongoc_server_description_t *sd = (mongoc_server_description_t *) item;
int32_t *current_min = (int32_t *) ctx;
if (sd->max_msg_size < *current_min) {
*current_min = sd->max_msg_size;
}
return true;
}
static bool
_mongoc_cluster_min_of_max_msg_size_nodes (void *item, void *ctx)
{
mongoc_cluster_node_t *node = (mongoc_cluster_node_t *) item;
int32_t *current_min = (int32_t *) ctx;
if (node->max_msg_size < *current_min) {
*current_min = node->max_msg_size;
}
return true;
}
/*
*--------------------------------------------------------------------------
*
* mongoc_cluster_get_max_bson_obj_size --
*
* Return the minimum max_bson_obj_size across all servers in cluster.
*
* NOTE: this method uses the topology's mutex.
*
* Returns:
* The minimum max_bson_obj_size.
*
* Side effects:
* None
*
*--------------------------------------------------------------------------
*/
int32_t
mongoc_cluster_get_max_bson_obj_size (mongoc_cluster_t *cluster)
{
int32_t max_bson_obj_size = -1;
max_bson_obj_size = MONGOC_DEFAULT_BSON_OBJ_SIZE;
if (!cluster->client->topology->single_threaded) {
mongoc_set_for_each (cluster->nodes,
_mongoc_cluster_min_of_max_obj_size_nodes,
&max_bson_obj_size);
} else {
mongoc_set_for_each (cluster->client->topology->description.servers,
_mongoc_cluster_min_of_max_obj_size_sds,
&max_bson_obj_size);
}
return max_bson_obj_size;
}
/*
*--------------------------------------------------------------------------
*
* mongoc_cluster_get_max_msg_size --
*
* Return the minimum max msg size across all servers in cluster.
*
* NOTE: this method uses the topology's mutex.
*
* Returns:
* The minimum max_msg_size
*
* Side effects:
* None
*
*--------------------------------------------------------------------------
*/
int32_t
mongoc_cluster_get_max_msg_size (mongoc_cluster_t *cluster)
{
int32_t max_msg_size = MONGOC_DEFAULT_MAX_MSG_SIZE;
if (!cluster->client->topology->single_threaded) {
mongoc_set_for_each (cluster->nodes,
_mongoc_cluster_min_of_max_msg_size_nodes,
&max_msg_size);
} else {
mongoc_set_for_each (cluster->client->topology->description.servers,
_mongoc_cluster_min_of_max_msg_size_sds,
&max_msg_size);
}
return max_msg_size;
}
/*
*--------------------------------------------------------------------------
*
* mongoc_cluster_check_interval --
*
* Server Selection Spec:
*
* Only for single-threaded drivers.
*
* If a server is selected that has an existing connection that has been
* idle for socketCheckIntervalMS, the driver MUST check the connection
* with the "ping" command. If the ping succeeds, use the selected
* connection. If not, set the server's type to Unknown and update the
* Topology Description according to the Server Discovery and Monitoring
* Spec, and attempt once more to select a server.
*
* Returns:
* True if the check succeeded or no check was required, false if the
* check failed.
*
* Side effects:
* If a check fails, closes stream and may set server type Unknown.
*
*--------------------------------------------------------------------------
*/
bool
mongoc_cluster_check_interval (mongoc_cluster_t *cluster, uint32_t server_id)
{
mongoc_cmd_parts_t parts;
mongoc_topology_t *topology;
mongoc_topology_scanner_node_t *scanner_node;
mongoc_stream_t *stream;
int64_t now;
bson_t command;
bson_error_t error;
bool r = true;
+ mongoc_server_stream_t *server_stream;
topology = cluster->client->topology;
if (!topology->single_threaded) {
return true;
}
scanner_node =
mongoc_topology_scanner_get_node (topology->scanner, server_id);
if (!scanner_node) {
return false;
}
BSON_ASSERT (!scanner_node->retired);
stream = scanner_node->stream;
if (!stream) {
return false;
}
now = bson_get_monotonic_time ();
if (scanner_node->last_used + (1000 * CHECK_CLOSED_DURATION_MSEC) < now) {
if (mongoc_stream_check_closed (stream)) {
bson_set_error (&error,
MONGOC_ERROR_STREAM,
MONGOC_ERROR_STREAM_SOCKET,
"connection closed");
mongoc_cluster_disconnect_node (cluster, server_id, true, &error);
return false;
}
}
if (scanner_node->last_used + (1000 * cluster->socketcheckintervalms) <
now) {
bson_init (&command);
BSON_APPEND_INT32 (&command, "ping", 1);
- mongoc_cmd_parts_init (&parts, "admin", MONGOC_QUERY_SLAVE_OK, &command);
- r = mongoc_cluster_run_command_private (
- cluster, &parts, stream, server_id, NULL, &error /* OUT */);
-
- mongoc_cmd_parts_cleanup (&parts);
+ mongoc_cmd_parts_init (
+ &parts, cluster->client, "admin", MONGOC_QUERY_SLAVE_OK, &command);
+ parts.prohibit_lsid = true;
+ server_stream = _mongoc_cluster_create_server_stream (
+ cluster->client->topology, server_id, stream, &error);
+ r = mongoc_cluster_run_command_parts (
+ cluster, server_stream, &parts, NULL, &error);
+
+ mongoc_server_stream_cleanup (server_stream);
bson_destroy (&command);
if (!r) {
mongoc_cluster_disconnect_node (cluster, server_id, true, &error);
}
}
return r;
}
/*
*--------------------------------------------------------------------------
*
- * mongoc_cluster_sendv_to_server --
+ * mongoc_cluster_legacy_rpc_sendv_to_server --
*
- * Sends the given RPCs to the given server.
+ * Sends the given RPCs to the given server. Used for OP_QUERY cursors,
+ * OP_KILLCURSORS, and legacy writes with OP_INSERT, OP_UPDATE, and
+ * OP_DELETE. This function is *not* in the OP_QUERY command path.
*
* Returns:
* True if successful.
*
* Side effects:
* @rpc may be mutated and should be considered invalid after calling
* this method.
*
* @error may be set.
*
*--------------------------------------------------------------------------
*/
bool
-mongoc_cluster_sendv_to_server (mongoc_cluster_t *cluster,
- mongoc_rpc_t *rpc,
- mongoc_server_stream_t *server_stream,
- const mongoc_write_concern_t *write_concern,
- bson_error_t *error)
+mongoc_cluster_legacy_rpc_sendv_to_server (
+ mongoc_cluster_t *cluster,
+ mongoc_rpc_t *rpc,
+ mongoc_server_stream_t *server_stream,
+ bson_error_t *error)
{
uint32_t server_id;
- mongoc_topology_scanner_node_t *scanner_node;
- const bson_t *b;
- mongoc_rpc_t gle;
- bool need_gle;
- char cmdname[140];
int32_t max_msg_size;
bool ret = false;
-#ifdef MONGOC_ENABLE_COMPRESSION
int32_t compressor_id = 0;
char *output = NULL;
-#endif
ENTRY;
BSON_ASSERT (cluster);
BSON_ASSERT (rpc);
BSON_ASSERT (server_stream);
server_id = server_stream->sd->id;
if (cluster->client->in_exhaust) {
bson_set_error (error,
MONGOC_ERROR_CLIENT,
MONGOC_ERROR_CLIENT_IN_EXHAUST,
"A cursor derived from this client is in exhaust.");
GOTO (done);
}
- if (!write_concern) {
- write_concern = cluster->client->write_concern;
- }
-
_mongoc_array_clear (&cluster->iov);
-#ifdef MONGOC_ENABLE_COMPRESSION
compressor_id = mongoc_server_description_compressor_id (server_stream->sd);
-#endif
- need_gle = _mongoc_rpc_needs_gle (rpc, write_concern);
- _mongoc_cluster_inc_egress_rpc (rpc);
_mongoc_rpc_gather (rpc, &cluster->iov);
_mongoc_rpc_swab_to_le (rpc);
-#ifdef MONGOC_ENABLE_COMPRESSION
- if (compressor_id) {
+ if (compressor_id != -1) {
output = _mongoc_rpc_compress (cluster, compressor_id, rpc, error);
if (output == NULL) {
GOTO (done);
}
}
-#endif
max_msg_size = mongoc_server_stream_max_msg_size (server_stream);
if (BSON_UINT32_FROM_LE (rpc->header.msg_len) > max_msg_size) {
bson_set_error (error,
MONGOC_ERROR_CLIENT,
MONGOC_ERROR_CLIENT_TOO_BIG,
"Attempted to send an RPC larger than the "
"max allowed message size. Was %u, allowed %u.",
BSON_UINT32_FROM_LE (rpc->header.msg_len),
max_msg_size);
GOTO (done);
}
- if (need_gle) {
- gle.header.msg_len = 0;
- gle.header.request_id = ++cluster->request_id;
- gle.header.response_to = 0;
- gle.header.opcode = MONGOC_OPCODE_QUERY;
- gle.query.flags = MONGOC_QUERY_NONE;
-
- switch (BSON_UINT32_FROM_LE (rpc->header.opcode)) {
- case MONGOC_OPCODE_INSERT:
- DB_AND_CMD_FROM_COLLECTION (cmdname, rpc->insert.collection);
- break;
- case MONGOC_OPCODE_DELETE:
- DB_AND_CMD_FROM_COLLECTION (cmdname, rpc->delete_.collection);
- break;
- case MONGOC_OPCODE_UPDATE:
- DB_AND_CMD_FROM_COLLECTION (cmdname, rpc->update.collection);
- break;
- default:
- BSON_ASSERT (false);
- DB_AND_CMD_FROM_COLLECTION (cmdname, "admin.$cmd");
- break;
- }
-
- gle.query.collection = cmdname;
- gle.query.skip = 0;
- gle.query.n_return = 1;
- b = _mongoc_write_concern_get_gle (
- (mongoc_write_concern_t *) write_concern);
- gle.query.query = bson_get_data (b);
- gle.query.fields = NULL;
- _mongoc_rpc_gather (&gle, &cluster->iov);
- _mongoc_rpc_swab_to_le (&gle);
- }
-
if (!_mongoc_stream_writev_full (server_stream->stream,
cluster->iov.data,
cluster->iov.len,
cluster->sockettimeoutms,
error)) {
GOTO (done);
}
- if (cluster->client->topology->single_threaded) {
- scanner_node = mongoc_topology_scanner_get_node (
- cluster->client->topology->scanner, server_id);
-
- if (scanner_node) {
- scanner_node->last_used = bson_get_monotonic_time ();
- }
- }
+ _mongoc_topology_update_last_used (cluster->client->topology, server_id);
ret = true;
done:
-#ifdef MONGOC_ENABLE_COMPRESSION
if (compressor_id) {
bson_free (output);
}
-#endif
RETURN (ret);
}
/*
*--------------------------------------------------------------------------
*
* mongoc_cluster_try_recv --
*
* Tries to receive the next event from the MongoDB server.
* The contents are loaded into @buffer and then
* scattered into the @rpc structure. @rpc is valid as long as
* @buffer contains the contents read into it.
*
* Callers that can optimize a reuse of @buffer should do so. It
* can save many memory allocations.
*
* Returns:
* True if successful.
*
* Side effects:
* @rpc is set on success, @error on failure.
* @buffer will be filled with the input data.
*
*--------------------------------------------------------------------------
*/
bool
mongoc_cluster_try_recv (mongoc_cluster_t *cluster,
mongoc_rpc_t *rpc,
mongoc_buffer_t *buffer,
mongoc_server_stream_t *server_stream,
bson_error_t *error)
{
uint32_t server_id;
bson_error_t err_local;
int32_t msg_len;
int32_t max_msg_size;
off_t pos;
ENTRY;
BSON_ASSERT (cluster);
BSON_ASSERT (rpc);
BSON_ASSERT (buffer);
BSON_ASSERT (server_stream);
server_id = server_stream->sd->id;
TRACE ("Waiting for reply from server_id \"%u\"", server_id);
if (!error) {
error = &err_local;
}
/*
* Buffer the message length to determine how much more to read.
*/
pos = buffer->len;
if (!_mongoc_buffer_append_from_stream (
buffer, server_stream->stream, 4, cluster->sockettimeoutms, error)) {
MONGOC_DEBUG (
"Could not read 4 bytes, stream probably closed or timed out");
mongoc_counter_protocol_ingress_error_inc ();
mongoc_cluster_disconnect_node (
cluster,
server_id,
!mongoc_stream_timed_out (server_stream->stream),
error);
RETURN (false);
}
/*
* Read the msg length from the buffer.
*/
memcpy (&msg_len, &buffer->data[buffer->off + pos], 4);
msg_len = BSON_UINT32_FROM_LE (msg_len);
max_msg_size = mongoc_server_stream_max_msg_size (server_stream);
if ((msg_len < 16) || (msg_len > max_msg_size)) {
bson_set_error (error,
MONGOC_ERROR_PROTOCOL,
MONGOC_ERROR_PROTOCOL_INVALID_REPLY,
"Corrupt or malicious reply received.");
mongoc_cluster_disconnect_node (cluster, server_id, true, error);
mongoc_counter_protocol_ingress_error_inc ();
RETURN (false);
}
/*
* Read the rest of the message from the stream.
*/
if (!_mongoc_buffer_append_from_stream (buffer,
server_stream->stream,
msg_len - 4,
cluster->sockettimeoutms,
error)) {
mongoc_cluster_disconnect_node (
cluster,
server_id,
!mongoc_stream_timed_out (server_stream->stream),
error);
mongoc_counter_protocol_ingress_error_inc ();
RETURN (false);
}
/*
* Scatter the buffer into the rpc structure.
*/
if (!_mongoc_rpc_scatter (rpc, &buffer->data[buffer->off + pos], msg_len)) {
bson_set_error (error,
MONGOC_ERROR_PROTOCOL,
MONGOC_ERROR_PROTOCOL_INVALID_REPLY,
"Failed to decode reply from server.");
mongoc_cluster_disconnect_node (cluster, server_id, true, error);
mongoc_counter_protocol_ingress_error_inc ();
RETURN (false);
}
if (BSON_UINT32_FROM_LE (rpc->header.opcode) == MONGOC_OPCODE_COMPRESSED) {
uint8_t *buf = NULL;
size_t len = BSON_UINT32_FROM_LE (rpc->compressed.uncompressed_size) +
sizeof (mongoc_rpc_header_t);
buf = bson_malloc0 (len);
if (!_mongoc_rpc_decompress (rpc, buf, len)) {
bson_free (buf);
bson_set_error (error,
MONGOC_ERROR_PROTOCOL,
MONGOC_ERROR_PROTOCOL_INVALID_REPLY,
"Could not decompress server reply");
RETURN (false);
}
_mongoc_buffer_destroy (buffer);
_mongoc_buffer_init (buffer, buf, len, NULL, NULL);
}
_mongoc_rpc_swab_from_le (rpc);
- _mongoc_cluster_inc_ingress_rpc (rpc);
-
RETURN (true);
}
+
+static bool
+mongoc_cluster_run_opmsg (mongoc_cluster_t *cluster,
+ mongoc_cmd_t *cmd,
+ bson_t *reply,
+ bson_error_t *error)
+{
+ mongoc_rpc_section_t section[2];
+ mongoc_buffer_t buffer;
+ bson_t reply_local; /* only statically initialized */
+ char *output = NULL;
+ mongoc_rpc_t rpc;
+ int32_t msg_len;
+ bool ok;
+ const mongoc_server_stream_t *server_stream;
+
+ server_stream = cmd->server_stream;
+ if (!cmd->command_name) {
+ bson_set_error (error,
+ MONGOC_ERROR_COMMAND,
+ MONGOC_ERROR_COMMAND_INVALID_ARG,
+ "Empty command document");
+ _mongoc_bson_init_if_set (reply);
+ return false;
+ }
+ if (cluster->client->in_exhaust) {
+ bson_set_error (error,
+ MONGOC_ERROR_CLIENT,
+ MONGOC_ERROR_CLIENT_IN_EXHAUST,
+ "A cursor derived from this client is in exhaust.");
+ _mongoc_bson_init_if_set (reply);
+ return false;
+ }
+
+ _mongoc_array_clear (&cluster->iov);
+ _mongoc_buffer_init (&buffer, NULL, 0, NULL, NULL);
+
+ rpc.header.msg_len = 0;
+ rpc.header.request_id = ++cluster->request_id;
+ rpc.header.response_to = 0;
+ rpc.header.opcode = MONGOC_OPCODE_MSG;
+ rpc.msg.flags = 0;
+ rpc.msg.n_sections = 1;
+
+ section[0].payload_type = 0;
+ section[0].payload.bson_document = bson_get_data (cmd->command);
+ rpc.msg.sections[0] = section[0];
+
+ if (cmd->payload) {
+ section[1].payload_type = 1;
+ section[1].payload.sequence.size = cmd->payload_size +
+ strlen (cmd->payload_identifier) + 1 +
+ sizeof (int32_t);
+ section[1].payload.sequence.identifier = cmd->payload_identifier;
+ section[1].payload.sequence.bson_documents = cmd->payload;
+ rpc.msg.sections[1] = section[1];
+ rpc.msg.n_sections++;
+ }
+
+ _mongoc_rpc_gather (&rpc, &cluster->iov);
+ _mongoc_rpc_swab_to_le (&rpc);
+
+ if (mongoc_cmd_is_compressible (cmd)) {
+ int32_t compressor_id =
+ mongoc_server_description_compressor_id (server_stream->sd);
+
+ TRACE (
+ "Function '%s' is compressible: %d", cmd->command_name, compressor_id);
+ if (compressor_id != -1) {
+ output = _mongoc_rpc_compress (cluster, compressor_id, &rpc, error);
+ if (output == NULL) {
+ _mongoc_bson_init_if_set (reply);
+ _mongoc_buffer_destroy (&buffer);
+ return false;
+ }
+ }
+ }
+ ok = _mongoc_stream_writev_full (server_stream->stream,
+ (mongoc_iovec_t *) cluster->iov.data,
+ cluster->iov.len,
+ cluster->sockettimeoutms,
+ error);
+ if (!ok) {
+ mongoc_cluster_disconnect_node (
+ cluster, server_stream->sd->id, true, error);
+ bson_free (output);
+ _mongoc_bson_init_if_set (reply);
+ _mongoc_buffer_destroy (&buffer);
+ return false;
+ }
+
+ ok = _mongoc_buffer_append_from_stream (
+ &buffer, server_stream->stream, 4, cluster->sockettimeoutms, error);
+ if (!ok) {
+ mongoc_cluster_disconnect_node (
+ cluster, server_stream->sd->id, true, error);
+ bson_free (output);
+ _mongoc_bson_init_if_set (reply);
+ _mongoc_buffer_destroy (&buffer);
+ return false;
+ }
+
+ BSON_ASSERT (buffer.len == 4);
+ memcpy (&msg_len, buffer.data, 4);
+ msg_len = BSON_UINT32_FROM_LE (msg_len);
+ if ((msg_len < 16) || (msg_len > server_stream->sd->max_msg_size)) {
+ bson_set_error (
+ error,
+ MONGOC_ERROR_PROTOCOL,
+ MONGOC_ERROR_PROTOCOL_INVALID_REPLY,
+ "Message size %d is not within expected range 16-%d bytes",
+ msg_len,
+ server_stream->sd->max_msg_size);
+ mongoc_cluster_disconnect_node (
+ cluster, server_stream->sd->id, true, error);
+ bson_free (output);
+ _mongoc_bson_init_if_set (reply);
+ _mongoc_buffer_destroy (&buffer);
+ return false;
+ }
+
+ ok = _mongoc_buffer_append_from_stream (&buffer,
+ server_stream->stream,
+ (size_t) msg_len - 4,
+ cluster->sockettimeoutms,
+ error);
+ if (!ok) {
+ mongoc_cluster_disconnect_node (
+ cluster, server_stream->sd->id, true, error);
+ bson_free (output);
+ _mongoc_bson_init_if_set (reply);
+ _mongoc_buffer_destroy (&buffer);
+ return false;
+ }
+
+ ok = _mongoc_rpc_scatter (&rpc, buffer.data, buffer.len);
+ if (!ok) {
+ bson_set_error (error,
+ MONGOC_ERROR_PROTOCOL,
+ MONGOC_ERROR_PROTOCOL_INVALID_REPLY,
+ "Malformed message from server");
+ bson_free (output);
+ _mongoc_bson_init_if_set (reply);
+ _mongoc_buffer_destroy (&buffer);
+ return false;
+ }
+ if (BSON_UINT32_FROM_LE (rpc.header.opcode) == MONGOC_OPCODE_COMPRESSED) {
+ size_t len = BSON_UINT32_FROM_LE (rpc.compressed.uncompressed_size) +
+ sizeof (mongoc_rpc_header_t);
+
+ output = bson_realloc (output, len);
+ if (!_mongoc_rpc_decompress (&rpc, (uint8_t *) output, len)) {
+ bson_set_error (error,
+ MONGOC_ERROR_PROTOCOL,
+ MONGOC_ERROR_PROTOCOL_INVALID_REPLY,
+ "Could not decompress message from server");
+ mongoc_cluster_disconnect_node (
+ cluster, server_stream->sd->id, true, error);
+ bson_free (output);
+ _mongoc_bson_init_if_set (reply);
+ _mongoc_buffer_destroy (&buffer);
+ return false;
+ }
+ }
+ _mongoc_rpc_swab_from_le (&rpc);
+
+ memcpy (&msg_len, rpc.msg.sections[0].payload.bson_document, 4);
+ msg_len = BSON_UINT32_FROM_LE (msg_len);
+ bson_init_static (
+ &reply_local, rpc.msg.sections[0].payload.bson_document, msg_len);
+
+ _mongoc_topology_update_cluster_time (cluster->client->topology,
+ &reply_local);
+ ok = _mongoc_cmd_check_ok (
+ &reply_local, cluster->client->error_api_version, error);
+
+ if (cmd->session) {
+ _mongoc_client_session_handle_reply (
+ cmd->session, cmd->is_acknowledged, &reply_local);
+ }
+
+ if (reply) {
+ bson_copy_to (&reply_local, reply);
+ }
+
+ _mongoc_buffer_destroy (&buffer);
+ bson_free (output);
+
+ return ok;
+}
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-cmd-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-cmd-private.h
similarity index 71%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-cmd-private.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-cmd-private.h
index 03c7041d..ecd0c0dd 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-cmd-private.h
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-cmd-private.h
@@ -1,86 +1,110 @@
/*
* Copyright 2017 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Internal struct to represent a command we will send to the server - command
* parameters are collected in a mongoc_cmd_parts_t until we know the server's
* wire version and whether it is mongos, then we collect the parts into a
* mongoc_cmd_t, and gather that into a mongoc_rpc_t.
*/
#ifndef MONGOC_CMD_PRIVATE_H
#define MONGOC_CMD_PRIVATE_H
#if !defined(MONGOC_COMPILATION)
#error "Only <mongoc.h> can be included directly."
#endif
#include <bson.h>
#include "mongoc-server-stream-private.h"
#include "mongoc-read-prefs.h"
#include "mongoc.h"
BSON_BEGIN_DECLS
+typedef enum {
+ MONGOC_CMD_PARTS_ALLOW_TXN_NUMBER_UNKNOWN,
+ MONGOC_CMD_PARTS_ALLOW_TXN_NUMBER_YES,
+ MONGOC_CMD_PARTS_ALLOW_TXN_NUMBER_NO
+} mongoc_cmd_parts_allow_txn_number_t;
+
typedef struct _mongoc_cmd_t {
const char *db_name;
mongoc_query_flags_t query_flags;
const bson_t *command;
- uint32_t server_id;
+ const char *command_name;
+ const uint8_t *payload;
+ int32_t payload_size;
+ const char *payload_identifier;
+ const mongoc_server_stream_t *server_stream;
int64_t operation_id;
+ mongoc_client_session_t *session;
+ bool is_acknowledged;
} mongoc_cmd_t;
typedef struct _mongoc_cmd_parts_t {
mongoc_cmd_t assembled;
mongoc_query_flags_t user_query_flags;
const bson_t *body;
+ bson_t read_concern_document;
bson_t extra;
const mongoc_read_prefs_t *read_prefs;
bson_t assembled_body;
+ bool is_read_command;
bool is_write_command;
+ bool prohibit_lsid;
+ mongoc_cmd_parts_allow_txn_number_t allow_txn_number;
+ bool is_retryable_write;
+ bool has_temp_session;
+ mongoc_client_t *client;
} mongoc_cmd_parts_t;
void
mongoc_cmd_parts_init (mongoc_cmd_parts_t *op,
+ mongoc_client_t *client,
const char *db_name,
mongoc_query_flags_t user_query_flags,
const bson_t *command_body);
+void
+mongoc_cmd_parts_set_session (mongoc_cmd_parts_t *parts,
+ mongoc_client_session_t *cs);
bool
mongoc_cmd_parts_append_opts (mongoc_cmd_parts_t *parts,
bson_iter_t *iter,
int max_wire_version,
bson_error_t *error);
-void
+bool
mongoc_cmd_parts_assemble (mongoc_cmd_parts_t *parts,
- const mongoc_server_stream_t *server_stream);
+ const mongoc_server_stream_t *server_stream,
+ bson_error_t *error);
-void
-mongoc_cmd_parts_assemble_simple (mongoc_cmd_parts_t *op, uint32_t server_id);
+bool
+mongoc_cmd_is_compressible (mongoc_cmd_t *cmd);
void
mongoc_cmd_parts_cleanup (mongoc_cmd_parts_t *op);
BSON_END_DECLS
#endif /* MONGOC_CMD_PRIVATE_H */
diff --git a/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-cmd.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-cmd.c
new file mode 100644
index 00000000..36a07b16
--- /dev/null
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-cmd.c
@@ -0,0 +1,748 @@
+/*
+ * Copyright 2017 MongoDB, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include "mongoc-cmd-private.h"
+#include "mongoc-read-prefs-private.h"
+#include "mongoc-trace-private.h"
+#include "mongoc-client-private.h"
+#include "mongoc-write-concern-private.h"
+/* For strcasecmp on Windows */
+#include "mongoc-util-private.h"
+
+
+void
+mongoc_cmd_parts_init (mongoc_cmd_parts_t *parts,
+ mongoc_client_t *client,
+ const char *db_name,
+ mongoc_query_flags_t user_query_flags,
+ const bson_t *command_body)
+{
+ parts->body = command_body;
+ parts->user_query_flags = user_query_flags;
+ parts->read_prefs = NULL;
+ parts->is_read_command = false;
+ parts->is_write_command = false;
+ parts->prohibit_lsid = false;
+ parts->allow_txn_number = MONGOC_CMD_PARTS_ALLOW_TXN_NUMBER_UNKNOWN;
+ parts->is_retryable_write = false;
+ parts->has_temp_session = false;
+ parts->client = client;
+ bson_init (&parts->read_concern_document);
+ bson_init (&parts->extra);
+ bson_init (&parts->assembled_body);
+
+ parts->assembled.db_name = db_name;
+ parts->assembled.command = NULL;
+ parts->assembled.query_flags = MONGOC_QUERY_NONE;
+ parts->assembled.payload_identifier = NULL;
+ parts->assembled.payload = NULL;
+ parts->assembled.session = NULL;
+ parts->assembled.is_acknowledged = true;
+}
+
+
+/*
+ *--------------------------------------------------------------------------
+ *
+ * mongoc_cmd_parts_set_session --
+ *
+ * Set the client session field.
+ *
+ * Side effects:
+ * Aborts if the command is assembled or if mongoc_cmd_parts_append_opts
+ * was called before.
+ *
+ *--------------------------------------------------------------------------
+ */
+
+void
+mongoc_cmd_parts_set_session (mongoc_cmd_parts_t *parts,
+ mongoc_client_session_t *cs)
+{
+ BSON_ASSERT (parts);
+ BSON_ASSERT (!parts->assembled.command);
+ BSON_ASSERT (!parts->assembled.session);
+
+ parts->assembled.session = cs;
+}
+
+
+/*
+ *--------------------------------------------------------------------------
+ *
+ * mongoc_cmd_parts_append_opts --
+ *
+ * Take an iterator over user-supplied options document and append the
+ * options to @parts->command_extra, taking the selected server's max
+ * wire version into account.
+ *
+ * Return:
+ * True if the options were successfully applied. If any options are
+ * invalid, returns false and fills out @error. In that case @parts is
+ * invalid and must not be used.
+ *
+ * Side effects:
+ * May partly apply options before returning an error.
+ *
+ *--------------------------------------------------------------------------
+ */
+
+bool
+mongoc_cmd_parts_append_opts (mongoc_cmd_parts_t *parts,
+ bson_iter_t *iter,
+ int max_wire_version,
+ bson_error_t *error)
+{
+ const char *command_name;
+ bool is_fam;
+ mongoc_client_session_t *cs = NULL;
+ mongoc_write_concern_t *wc;
+ uint32_t len;
+ const uint8_t *data;
+ bson_t read_concern;
+
+ ENTRY;
+
+ /* not yet assembled */
+ BSON_ASSERT (!parts->assembled.command);
+
+ command_name = _mongoc_get_command_name (parts->body);
+
+ if (!command_name) {
+ bson_set_error (error,
+ MONGOC_ERROR_COMMAND,
+ MONGOC_ERROR_COMMAND_INVALID_ARG,
+ "Empty command document");
+ RETURN (false);
+ }
+
+ is_fam = !strcasecmp (command_name, "findandmodify");
+
+ while (bson_iter_next (iter)) {
+ if (BSON_ITER_IS_KEY (iter, "collation")) {
+ if (max_wire_version < WIRE_VERSION_COLLATION) {
+ bson_set_error (error,
+ MONGOC_ERROR_COMMAND,
+ MONGOC_ERROR_PROTOCOL_BAD_WIRE_VERSION,
+ "The selected server does not support collation");
+ RETURN (false);
+ }
+
+ } else if (BSON_ITER_IS_KEY (iter, "writeConcern")) {
+ wc = _mongoc_write_concern_new_from_iter (iter, error);
+ if (!wc) {
+ RETURN (false);
+ }
+
+ if ((is_fam && max_wire_version < WIRE_VERSION_FAM_WRITE_CONCERN) ||
+ (!is_fam && max_wire_version < WIRE_VERSION_CMD_WRITE_CONCERN)) {
+ mongoc_write_concern_destroy (wc);
+ continue;
+ }
+
+ parts->assembled.is_acknowledged =
+ mongoc_write_concern_is_acknowledged (wc);
+ mongoc_write_concern_destroy (wc);
+ } else if (BSON_ITER_IS_KEY (iter, "readConcern")) {
+ if (max_wire_version < WIRE_VERSION_READ_CONCERN) {
+ bson_set_error (error,
+ MONGOC_ERROR_COMMAND,
+ MONGOC_ERROR_PROTOCOL_BAD_WIRE_VERSION,
+ "The selected server does not support readConcern");
+ RETURN (false);
+ }
+
+ if (!BSON_ITER_HOLDS_DOCUMENT (iter)) {
+ bson_set_error (error,
+ MONGOC_ERROR_COMMAND,
+ MONGOC_ERROR_PROTOCOL_BAD_WIRE_VERSION,
+ "Invalid readConcern");
+ RETURN (false);
+ }
+
+ /* add readConcern later, once we know about causal consistency */
+ bson_iter_document (iter, &len, &data);
+ bson_init_static (&read_concern, data, (size_t) len);
+ bson_destroy (&parts->read_concern_document);
+ bson_copy_to (&read_concern, &parts->read_concern_document);
+ continue;
+ } else if (BSON_ITER_IS_KEY (iter, "sessionId")) {
+ BSON_ASSERT (!parts->assembled.session);
+
+ if (!_mongoc_client_session_from_iter (
+ parts->client, iter, &cs, error)) {
+ RETURN (false);
+ }
+
+ parts->assembled.session = cs;
+ continue;
+ } else if (BSON_ITER_IS_KEY (iter, "serverId") ||
+ BSON_ITER_IS_KEY (iter, "maxAwaitTimeMS")) {
+ continue;
+ }
+
+ bson_append_iter (&parts->extra, bson_iter_key (iter), -1, iter);
+ }
+
+ RETURN (true);
+}
+
+
+static void
+_mongoc_cmd_parts_ensure_copied (mongoc_cmd_parts_t *parts)
+{
+ if (parts->assembled.command == parts->body) {
+ bson_concat (&parts->assembled_body, parts->body);
+ bson_concat (&parts->assembled_body, &parts->extra);
+ parts->assembled.command = &parts->assembled_body;
+ }
+}
+
+
+/* The server type must be mongos, or message must be OP_MSG. */
+static void
+_mongoc_cmd_parts_add_read_prefs (bson_t *query,
+ const mongoc_read_prefs_t *prefs)
+{
+ bson_t child;
+ const char *mode_str;
+ const bson_t *tags;
+ int64_t stale;
+
+ mode_str = _mongoc_read_mode_as_str (mongoc_read_prefs_get_mode (prefs));
+ tags = mongoc_read_prefs_get_tags (prefs);
+ stale = mongoc_read_prefs_get_max_staleness_seconds (prefs);
+
+ bson_append_document_begin (query, "$readPreference", 15, &child);
+ bson_append_utf8 (&child, "mode", 4, mode_str, -1);
+ if (!bson_empty0 (tags)) {
+ bson_append_array (&child, "tags", 4, tags);
+ }
+
+ if (stale != MONGOC_NO_MAX_STALENESS) {
+ bson_append_int64 (&child, "maxStalenessSeconds", 19, stale);
+ }
+
+ bson_append_document_end (query, &child);
+}
+
+
+static void
+_iter_concat (bson_t *dst, bson_iter_t *iter)
+{
+ uint32_t len;
+ const uint8_t *data;
+ bson_t src;
+
+ bson_iter_document (iter, &len, &data);
+ bson_init_static (&src, data, len);
+ bson_concat (dst, &src);
+}
+
+
+/* Update result with the read prefs. Server must be mongos.
+ */
+static void
+_mongoc_cmd_parts_assemble_mongos (mongoc_cmd_parts_t *parts,
+ const mongoc_server_stream_t *server_stream)
+{
+ mongoc_read_mode_t mode;
+ const bson_t *tags = NULL;
+ bool add_read_prefs = false;
+ bson_t query;
+ bson_iter_t dollar_query;
+ bool has_dollar_query = false;
+ bool requires_read_concern;
+
+ ENTRY;
+
+ mode = mongoc_read_prefs_get_mode (parts->read_prefs);
+ if (parts->read_prefs) {
+ tags = mongoc_read_prefs_get_tags (parts->read_prefs);
+ }
+
+ /* Server Selection Spec says:
+ *
+ * For mode 'primary', drivers MUST NOT set the slaveOK wire protocol flag
+ * and MUST NOT use $readPreference
+ *
+ * For mode 'secondary', drivers MUST set the slaveOK wire protocol flag and
+ * MUST also use $readPreference
+ *
+ * For mode 'primaryPreferred', drivers MUST set the slaveOK wire protocol
+ * flag and MUST also use $readPreference
+ *
+ * For mode 'secondaryPreferred', drivers MUST set the slaveOK wire protocol
+ * flag. If the read preference contains a non-empty tag_sets parameter,
+ * drivers MUST use $readPreference; otherwise, drivers MUST NOT use
+ * $readPreference
+ *
+ * For mode 'nearest', drivers MUST set the slaveOK wire protocol flag and
+ * MUST also use $readPreference
+ */
+ switch (mode) {
+ case MONGOC_READ_PRIMARY:
+ break;
+ case MONGOC_READ_SECONDARY_PREFERRED:
+ if (!bson_empty0 (tags)) {
+ add_read_prefs = true;
+ }
+ parts->assembled.query_flags |= MONGOC_QUERY_SLAVE_OK;
+ break;
+ case MONGOC_READ_PRIMARY_PREFERRED:
+ case MONGOC_READ_SECONDARY:
+ case MONGOC_READ_NEAREST:
+ default:
+ parts->assembled.query_flags |= MONGOC_QUERY_SLAVE_OK;
+ add_read_prefs = true;
+ }
+
+ requires_read_concern =
+ !bson_empty (&parts->read_concern_document) &&
+ strcmp (parts->assembled.command_name, "getMore") != 0;
+
+ if (add_read_prefs) {
+ /* produce {$query: {user query, readConcern}, $readPreference: ... } */
+ bson_append_document_begin (&parts->assembled_body, "$query", 6, &query);
+
+ if (bson_iter_init_find (&dollar_query, parts->body, "$query")) {
+ /* user provided something like {$query: {key: "x"}} */
+ has_dollar_query = true;
+ _iter_concat (&query, &dollar_query);
+ } else {
+ bson_concat (&query, parts->body);
+ }
+
+ bson_concat (&query, &parts->extra);
+ if (requires_read_concern) {
+ bson_append_document (
+ &query, "readConcern", 11, &parts->read_concern_document);
+ }
+
+ bson_append_document_end (&parts->assembled_body, &query);
+ _mongoc_cmd_parts_add_read_prefs (&parts->assembled_body,
+ parts->read_prefs);
+
+ if (has_dollar_query) {
+ /* copy anything that isn't in user's $query */
+ bson_copy_to_excluding_noinit (
+ parts->body, &parts->assembled_body, "$query", NULL);
+ }
+
+ parts->assembled.command = &parts->assembled_body;
+ } else if (bson_iter_init_find (&dollar_query, parts->body, "$query")) {
+ /* user provided $query, we have no read prefs */
+ bson_append_document_begin (&parts->assembled_body, "$query", 6, &query);
+ _iter_concat (&query, &dollar_query);
+ bson_concat (&query, &parts->extra);
+ if (requires_read_concern) {
+ bson_append_document (
+ &query, "readConcern", 11, &parts->read_concern_document);
+ }
+
+ bson_append_document_end (&parts->assembled_body, &query);
+ /* copy anything that isn't in user's $query */
+ bson_copy_to_excluding_noinit (
+ parts->body, &parts->assembled_body, "$query", NULL);
+
+ parts->assembled.command = &parts->assembled_body;
+ }
+
+ if (requires_read_concern) {
+ _mongoc_cmd_parts_ensure_copied (parts);
+ bson_append_document (&parts->assembled_body,
+ "readConcern",
+ 11,
+ &parts->read_concern_document);
+ }
+
+ if (!bson_empty (&parts->extra)) {
+ /* if none of the above logic has merged "extra", do it now */
+ _mongoc_cmd_parts_ensure_copied (parts);
+ }
+
+ EXIT;
+}
+
+
+static void
+_mongoc_cmd_parts_assemble_mongod (mongoc_cmd_parts_t *parts,
+ const mongoc_server_stream_t *server_stream)
+{
+ ENTRY;
+
+ if (!parts->is_write_command) {
+ switch (server_stream->topology_type) {
+ case MONGOC_TOPOLOGY_SINGLE:
+ /* Server Selection Spec: for topology type single and server types
+ * besides mongos, "clients MUST always set the slaveOK wire
+ * protocol flag on reads to ensure that any server type can handle
+ * the request."
+ */
+ parts->assembled.query_flags |= MONGOC_QUERY_SLAVE_OK;
+ break;
+
+ case MONGOC_TOPOLOGY_RS_NO_PRIMARY:
+ case MONGOC_TOPOLOGY_RS_WITH_PRIMARY:
+ /* Server Selection Spec: for RS topology types, "For all read
+ * preferences modes except primary, clients MUST set the slaveOK wire
+ * protocol flag to ensure that any suitable server can handle the
+ * request. Clients MUST NOT set the slaveOK wire protocol flag if the
+ * read preference mode is primary.
+ */
+ if (parts->read_prefs &&
+ parts->read_prefs->mode != MONGOC_READ_PRIMARY) {
+ parts->assembled.query_flags |= MONGOC_QUERY_SLAVE_OK;
+ }
+
+ break;
+ case MONGOC_TOPOLOGY_SHARDED:
+ case MONGOC_TOPOLOGY_UNKNOWN:
+ case MONGOC_TOPOLOGY_DESCRIPTION_TYPES:
+ default:
+ /* must not call this function w/ sharded or unknown topology type */
+ BSON_ASSERT (false);
+ }
+ } /* if (!parts->is_write_command) */
+
+ if (!bson_empty (&parts->extra)) {
+ _mongoc_cmd_parts_ensure_copied (parts);
+ }
+
+ if (!bson_empty (&parts->read_concern_document) &&
+ strcmp (parts->assembled.command_name, "getMore") != 0) {
+ _mongoc_cmd_parts_ensure_copied (parts);
+ bson_append_document (&parts->assembled_body,
+ "readConcern",
+ 11,
+ &parts->read_concern_document);
+ }
+
+ EXIT;
+}
+
+
+static const bson_t *
+_largest_cluster_time (const bson_t *a, const bson_t *b)
+{
+ if (!a) {
+ return b;
+ }
+
+ if (!b) {
+ return a;
+ }
+
+ if (_mongoc_cluster_time_greater (a, b)) {
+ return a;
+ }
+
+ return b;
+}
+
+
+/* Check if the command should allow a transaction number if that has not
+ * already been determined.
+ *
+ * This should only return true for write commands that are always retryable for
+ * the server stream's wire version.
+ *
+ * The basic write commands (i.e. insert, update, delete) are intentionally
+ * excluded here. While insert is always retryable, update and delete are only
+ * retryable if they include no multi-document writes. Since it would be costly
+ * to inspect the command document here, the bulk operation API explicitly sets
+ * allow_txn_number for us. This means that insert, update, and delete are not
+ * retryable if executed via mongoc_client_write_command_with_opts(); however,
+ * documentation already instructs users not to use that for basic writes.
+ */
+static bool
+_allow_txn_number (const mongoc_cmd_parts_t *parts,
+ const mongoc_server_stream_t *server_stream)
+{
+ /* There is no reason to call this function if allow_txn_number is set */
+ BSON_ASSERT (parts->allow_txn_number ==
+ MONGOC_CMD_PARTS_ALLOW_TXN_NUMBER_UNKNOWN);
+
+ if (!parts->is_write_command) {
+ return false;
+ }
+
+ if (server_stream->sd->max_wire_version < WIRE_VERSION_RETRY_WRITES) {
+ return false;
+ }
+
+ if (!strcasecmp (parts->assembled.command_name, "findandmodify")) {
+ return true;
+ }
+
+ return false;
+}
+
+
+/* Check if the write command should support retryable behavior. */
+static bool
+_is_retryable_write (const mongoc_cmd_parts_t *parts,
+ const mongoc_server_stream_t *server_stream)
+{
+ if (!parts->assembled.session) {
+ return false;
+ }
+
+ if (!parts->is_write_command) {
+ return false;
+ }
+
+ if (parts->allow_txn_number != MONGOC_CMD_PARTS_ALLOW_TXN_NUMBER_YES) {
+ return false;
+ }
+
+ if (server_stream->sd->max_wire_version < WIRE_VERSION_RETRY_WRITES) {
+ return false;
+ }
+
+ if (server_stream->sd->type == MONGOC_SERVER_STANDALONE) {
+ return false;
+ }
+
+ if (!mongoc_uri_get_option_as_bool (
+ parts->client->uri, MONGOC_URI_RETRYWRITES, false)) {
+ return false;
+ }
+
+ return true;
+}
+
+
+/*
+ *--------------------------------------------------------------------------
+ *
+ * mongoc_cmd_parts_assemble --
+ *
+ * Assemble the command body, options, and read preference into one
+ * command.
+ *
+ * Return:
+ * True if the options were successfully applied. If any options are
+ * invalid, returns false and fills out @error. In that case @parts is
+ * invalid and must not be used.
+ *
+ * Side effects:
+ * May partly assemble before returning an error.
+ * mongoc_cmd_parts_cleanup should be called in all cases.
+ *
+ *--------------------------------------------------------------------------
+ */
+
+bool
+mongoc_cmd_parts_assemble (mongoc_cmd_parts_t *parts,
+ const mongoc_server_stream_t *server_stream,
+ bson_error_t *error)
+{
+ mongoc_server_description_type_t server_type;
+ mongoc_client_session_t *cs;
+ const bson_t *cluster_time = NULL;
+ bson_t child;
+ mongoc_read_prefs_t *prefs = NULL;
+ bool is_get_more;
+ const mongoc_read_prefs_t *prefs_ptr;
+
+ ENTRY;
+
+ BSON_ASSERT (parts);
+ BSON_ASSERT (server_stream);
+
+ server_type = server_stream->sd->type;
+ cs = parts->assembled.session;
+
+ /* must not be assembled already */
+ BSON_ASSERT (!parts->assembled.command);
+ BSON_ASSERT (bson_empty (&parts->assembled_body));
+
+ /* begin with raw flags/cmd as assembled flags/cmd, might change below */
+ parts->assembled.command = parts->body;
+ /* unused in OP_MSG: */
+ parts->assembled.query_flags = parts->user_query_flags;
+ parts->assembled.server_stream = server_stream;
+ parts->assembled.command_name =
+ _mongoc_get_command_name (parts->assembled.command);
+
+ if (!parts->assembled.command_name) {
+ bson_set_error (error,
+ MONGOC_ERROR_COMMAND,
+ MONGOC_ERROR_COMMAND_INVALID_ARG,
+ "Empty command document");
+ RETURN (false);
+ }
+
+ TRACE ("Preparing '%s'", parts->assembled.command_name);
+
+ is_get_more = !strcmp (parts->assembled.command_name, "getMore");
+
+ if (!parts->is_write_command && IS_PREF_PRIMARY (parts->read_prefs) &&
+ server_stream->topology_type == MONGOC_TOPOLOGY_SINGLE &&
+ server_stream->sd->type != MONGOC_SERVER_MONGOS) {
+ prefs = mongoc_read_prefs_new (MONGOC_READ_PRIMARY_PREFERRED);
+ prefs_ptr = prefs;
+ } else {
+ prefs_ptr = parts->read_prefs;
+ }
+
+ if (server_stream->sd->max_wire_version >= WIRE_VERSION_OP_MSG) {
+ if (!bson_has_field (parts->body, "$db")) {
+ BSON_APPEND_UTF8 (&parts->extra, "$db", parts->assembled.db_name);
+ }
+
+ if (!IS_PREF_PRIMARY (prefs_ptr)) {
+ _mongoc_cmd_parts_add_read_prefs (&parts->extra, prefs_ptr);
+ }
+
+ if (!bson_empty (&parts->extra)) {
+ _mongoc_cmd_parts_ensure_copied (parts);
+ }
+
+ /* If an explicit session was not provided and lsid is not prohibited,
+ * attempt to create an implicit session (ignoring any errors). */
+ if (!cs && !parts->prohibit_lsid) {
+ cs = mongoc_client_start_session (parts->client, NULL, NULL);
+
+ if (cs) {
+ parts->assembled.session = cs;
+ parts->has_temp_session = true;
+ }
+ }
+
+ if (cs) {
+ _mongoc_cmd_parts_ensure_copied (parts);
+ bson_append_document (&parts->assembled_body,
+ "lsid",
+ 4,
+ mongoc_client_session_get_lsid (cs));
+
+ cs->server_session->last_used_usec = bson_get_monotonic_time ();
+ cluster_time = mongoc_client_session_get_cluster_time (cs);
+ }
+
+ /* Ensure we know if the write command allows a transaction number */
+ if (parts->is_write_command &&
+ parts->allow_txn_number ==
+ MONGOC_CMD_PARTS_ALLOW_TXN_NUMBER_UNKNOWN) {
+ parts->allow_txn_number = _allow_txn_number (parts, server_stream)
+ ? MONGOC_CMD_PARTS_ALLOW_TXN_NUMBER_YES
+ : MONGOC_CMD_PARTS_ALLOW_TXN_NUMBER_NO;
+ }
+
+ /* Determine if the command is a retryable. If so, append txnNumber now
+ * for future use and mark the command as such. */
+ if (_is_retryable_write (parts, server_stream)) {
+ _mongoc_cmd_parts_ensure_copied (parts);
+ bson_append_int64 (&parts->assembled_body, "txnNumber", 9, 0);
+ parts->is_retryable_write = true;
+ }
+
+ if (!bson_empty (&server_stream->cluster_time)) {
+ cluster_time =
+ _largest_cluster_time (&server_stream->cluster_time, cluster_time);
+ }
+
+ if (cluster_time) {
+ _mongoc_cmd_parts_ensure_copied (parts);
+ bson_append_document (
+ &parts->assembled_body, "$clusterTime", 12, cluster_time);
+ }
+
+ if (!is_get_more) {
+ /* This condition should never trigger for an implicit client session.
+ * Even though the causal consistency option may default to true, an
+ * implicit client session will have no previous operation time. */
+ if (parts->is_read_command && cs &&
+ mongoc_session_opts_get_causal_consistency (&cs->opts) &&
+ cs->operation_timestamp) {
+ _mongoc_cmd_parts_ensure_copied (parts);
+ bson_append_document_begin (
+ &parts->assembled_body, "readConcern", 11, &child);
+
+ if (!bson_empty (&parts->read_concern_document)) {
+ /* combine user's readConcern with afterClusterTime */
+ bson_concat (&child, &parts->read_concern_document);
+ }
+
+ bson_append_timestamp (&child,
+ "afterClusterTime",
+ 16,
+ cs->operation_timestamp,
+ cs->operation_increment);
+ bson_append_document_end (&parts->assembled_body, &child);
+ } else if (!bson_empty (&parts->read_concern_document)) {
+ bson_append_document (&parts->assembled_body,
+ "readConcern",
+ 11,
+ &parts->read_concern_document);
+ }
+ }
+ } else if (server_type == MONGOC_SERVER_MONGOS) {
+ _mongoc_cmd_parts_assemble_mongos (parts, server_stream);
+ } else {
+ _mongoc_cmd_parts_assemble_mongod (parts, server_stream);
+ }
+
+ mongoc_read_prefs_destroy (prefs); /* NULL ok */
+ RETURN (true);
+}
+
+/*
+ *--------------------------------------------------------------------------
+ *
+ * mongoc_cmd_parts_cleanup --
+ *
+ * Free memory associated with a stack-allocated mongoc_cmd_parts_t.
+ *
+ * Side effects:
+ * None.
+ *
+ *--------------------------------------------------------------------------
+ */
+
+void
+mongoc_cmd_parts_cleanup (mongoc_cmd_parts_t *parts)
+{
+ bson_destroy (&parts->read_concern_document);
+ bson_destroy (&parts->extra);
+ bson_destroy (&parts->assembled_body);
+
+ if (parts->has_temp_session) {
+ /* client session returns its server session to server session pool */
+ mongoc_client_session_destroy (parts->assembled.session);
+ }
+}
+
+bool
+mongoc_cmd_is_compressible (mongoc_cmd_t *cmd)
+{
+ BSON_ASSERT (cmd);
+ BSON_ASSERT (cmd->command_name);
+
+ return !!strcasecmp (cmd->command_name, "ismaster") &&
+ !!strcasecmp (cmd->command_name, "authenticate") &&
+ !!strcasecmp (cmd->command_name, "getnonce") &&
+ !!strcasecmp (cmd->command_name, "saslstart") &&
+ !!strcasecmp (cmd->command_name, "saslcontinue") &&
+ !!strcasecmp (cmd->command_name, "createuser") &&
+ !!strcasecmp (cmd->command_name, "updateuser") &&
+ !!strcasecmp (cmd->command_name, "copydb") &&
+ !!strcasecmp (cmd->command_name, "copydbsaslstart") &&
+ !!strcasecmp (cmd->command_name, "copydbgetnonce");
+}
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-collection-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-collection-private.h
similarity index 92%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-collection-private.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-collection-private.h
index 61db056f..28273e8d 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-collection-private.h
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-collection-private.h
@@ -1,63 +1,59 @@
/*
* Copyright 2013-2014 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MONGOC_COLLECTION_PRIVATE_H
#define MONGOC_COLLECTION_PRIVATE_H
#if !defined(MONGOC_COMPILATION)
#error "Only <mongoc.h> can be included directly."
#endif
#include <bson.h>
-#include "mongoc-buffer-private.h"
#include "mongoc-client.h"
-
BSON_BEGIN_DECLS
struct _mongoc_collection_t {
mongoc_client_t *client;
char ns[128];
uint32_t nslen;
char db[128];
char collection[128];
uint32_t collectionlen;
- mongoc_buffer_t buffer;
mongoc_read_prefs_t *read_prefs;
mongoc_read_concern_t *read_concern;
mongoc_write_concern_t *write_concern;
bson_t *gle;
};
mongoc_collection_t *
_mongoc_collection_new (mongoc_client_t *client,
const char *db,
const char *collection,
const mongoc_read_prefs_t *read_prefs,
const mongoc_read_concern_t *read_concern,
const mongoc_write_concern_t *write_concern);
mongoc_cursor_t *
-_mongoc_collection_find_indexes_legacy (mongoc_collection_t *collection,
- bson_error_t *error);
+_mongoc_collection_find_indexes_legacy (mongoc_collection_t *collection);
BSON_END_DECLS
#endif /* MONGOC_COLLECTION_PRIVATE_H */
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-collection.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-collection.c
similarity index 71%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-collection.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-collection.c
index a9650346..15a36983 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-collection.c
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-collection.c
@@ -1,2673 +1,3135 @@
/*
* Copyright 2013 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-#include <bcon.h>
#include <stdio.h>
#include "mongoc-bulk-operation.h"
#include "mongoc-bulk-operation-private.h"
+#include "mongoc-change-stream-private.h"
#include "mongoc-client-private.h"
#include "mongoc-find-and-modify-private.h"
#include "mongoc-find-and-modify.h"
#include "mongoc-collection.h"
#include "mongoc-collection-private.h"
#include "mongoc-cursor-private.h"
#include "mongoc-cursor-cursorid-private.h"
#include "mongoc-cursor-array-private.h"
#include "mongoc-error.h"
#include "mongoc-index.h"
#include "mongoc-log.h"
#include "mongoc-trace-private.h"
#include "mongoc-read-concern-private.h"
#include "mongoc-write-concern-private.h"
+#include "mongoc-read-prefs-private.h"
#include "mongoc-util-private.h"
+#include "mongoc-write-command-private.h"
#undef MONGOC_LOG_DOMAIN
#define MONGOC_LOG_DOMAIN "collection"
-#define _BSON_APPEND_WRITE_CONCERN(_bson, _write_concern) \
- do { \
- const bson_t *write_concern_bson; \
- mongoc_write_concern_t *write_concern_copy = NULL; \
- if (_write_concern->frozen) { \
- write_concern_bson = _mongoc_write_concern_get_bson (_write_concern); \
- } else { \
- /* _mongoc_write_concern_get_bson will freeze the write_concern */ \
- write_concern_copy = mongoc_write_concern_copy (_write_concern); \
- write_concern_bson = \
- _mongoc_write_concern_get_bson (write_concern_copy); \
- } \
- BSON_APPEND_DOCUMENT (_bson, "writeConcern", write_concern_bson); \
- if (write_concern_copy) { \
- mongoc_write_concern_destroy (write_concern_copy); \
- } \
- } while (0);
-
-
static mongoc_cursor_t *
_mongoc_collection_cursor_new (mongoc_collection_t *collection,
mongoc_query_flags_t flags,
- const mongoc_read_prefs_t *prefs)
+ const mongoc_read_prefs_t *prefs,
+ bool is_command)
{
return _mongoc_cursor_new (collection->client,
collection->ns,
flags,
- 0, /* skip */
- 0, /* limit */
- 0, /* batch_size */
- false, /* is_command */
- NULL, /* query */
- NULL, /* fields */
- prefs, /* read prefs */
- NULL); /* read concern */
+ 0, /* skip */
+ 0, /* limit */
+ 0, /* batch_size */
+ !is_command, /* is_find */
+ NULL, /* query */
+ NULL, /* fields */
+ prefs, /* read prefs */
+ NULL); /* read concern */
}
static void
_mongoc_collection_write_command_execute (
mongoc_write_command_t *command,
const mongoc_collection_t *collection,
const mongoc_write_concern_t *write_concern,
+ mongoc_client_session_t *cs,
mongoc_write_result_t *result)
{
mongoc_server_stream_t *server_stream;
ENTRY;
server_stream = mongoc_cluster_stream_for_writes (
&collection->client->cluster, &result->error);
if (!server_stream) {
/* result->error has been filled out */
EXIT;
}
_mongoc_write_command_execute (command,
collection->client,
server_stream,
collection->db,
collection->collection,
write_concern,
0 /* offset */,
+ cs,
result);
mongoc_server_stream_cleanup (server_stream);
EXIT;
}
/*
*--------------------------------------------------------------------------
*
* _mongoc_collection_new --
*
* INTERNAL API
*
* Create a new mongoc_collection_t structure for the given client.
*
* @client must remain valid during the lifetime of this structure.
* @db is the db name of the collection.
* @collection is the name of the collection.
* @read_prefs is the default read preferences to apply or NULL.
* @read_concern is the default read concern to apply or NULL.
* @write_concern is the default write concern to apply or NULL.
*
* Returns:
* A newly allocated mongoc_collection_t that should be freed with
* mongoc_collection_destroy().
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
mongoc_collection_t *
_mongoc_collection_new (mongoc_client_t *client,
const char *db,
const char *collection,
const mongoc_read_prefs_t *read_prefs,
const mongoc_read_concern_t *read_concern,
const mongoc_write_concern_t *write_concern)
{
mongoc_collection_t *col;
ENTRY;
BSON_ASSERT (client);
BSON_ASSERT (db);
BSON_ASSERT (collection);
col = (mongoc_collection_t *) bson_malloc0 (sizeof *col);
col->client = client;
col->write_concern = write_concern
? mongoc_write_concern_copy (write_concern)
: mongoc_write_concern_new ();
col->read_concern = read_concern ? mongoc_read_concern_copy (read_concern)
: mongoc_read_concern_new ();
col->read_prefs = read_prefs ? mongoc_read_prefs_copy (read_prefs)
: mongoc_read_prefs_new (MONGOC_READ_PRIMARY);
bson_snprintf (col->ns, sizeof col->ns, "%s.%s", db, collection);
bson_snprintf (col->db, sizeof col->db, "%s", db);
bson_snprintf (col->collection, sizeof col->collection, "%s", collection);
col->collectionlen = (uint32_t) strlen (col->collection);
col->nslen = (uint32_t) strlen (col->ns);
- _mongoc_buffer_init (&col->buffer, NULL, 0, NULL, NULL);
-
col->gle = NULL;
RETURN (col);
}
/*
*--------------------------------------------------------------------------
*
* mongoc_collection_destroy --
*
* Release resources associated with @collection and frees the
* structure.
*
* Returns:
* None.
*
* Side effects:
* Everything.
*
*--------------------------------------------------------------------------
*/
void
mongoc_collection_destroy (mongoc_collection_t *collection) /* IN */
{
ENTRY;
BSON_ASSERT (collection);
bson_clear (&collection->gle);
- _mongoc_buffer_destroy (&collection->buffer);
-
if (collection->read_prefs) {
mongoc_read_prefs_destroy (collection->read_prefs);
collection->read_prefs = NULL;
}
if (collection->read_concern) {
mongoc_read_concern_destroy (collection->read_concern);
collection->read_concern = NULL;
}
if (collection->write_concern) {
mongoc_write_concern_destroy (collection->write_concern);
collection->write_concern = NULL;
}
bson_free (collection);
EXIT;
}
/*
*--------------------------------------------------------------------------
*
* mongoc_collection_copy --
*
* Returns a copy of @collection that needs to be freed by calling
* mongoc_collection_destroy.
*
* Returns:
* A copy of this collection.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
mongoc_collection_t *
mongoc_collection_copy (mongoc_collection_t *collection) /* IN */
{
ENTRY;
BSON_ASSERT (collection);
RETURN (_mongoc_collection_new (collection->client,
collection->db,
collection->collection,
collection->read_prefs,
collection->read_concern,
collection->write_concern));
}
/*
*--------------------------------------------------------------------------
*
* mongoc_collection_aggregate --
*
* Send an "aggregate" command to the MongoDB server.
*
- * This varies it's behavior based on the wire version. If we're on
- * wire_version > 0, we use the new aggregate command, which returns a
- * database cursor. On wire_version == 0, we create synthetic cursor on
- * top of the array returned in result.
- *
* This function will always return a new mongoc_cursor_t that should
* be freed with mongoc_cursor_destroy().
*
* The cursor may fail once iterated upon, so check
* mongoc_cursor_error() if mongoc_cursor_next() returns false.
*
* See http://docs.mongodb.org/manual/aggregation/ for more
* information on how to build aggregation pipelines.
*
- * Requires:
- * MongoDB >= 2.1.0
- *
* Parameters:
* @flags: bitwise or of mongoc_query_flags_t or 0.
* @pipeline: A bson_t containing the pipeline request. @pipeline
* will be sent as an array type in the request.
* @options: A bson_t containing aggregation options, such as
* bypassDocumentValidation (used with $out pipeline),
* maxTimeMS (declaring maximum server execution time) and
* explain (return information on the processing of the
- *pipeline).
+ * pipeline).
* @read_prefs: Optional read preferences for the command.
*
* Returns:
* A newly allocated mongoc_cursor_t that should be freed with
* mongoc_cursor_destroy().
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
mongoc_cursor_t *
mongoc_collection_aggregate (mongoc_collection_t *collection, /* IN */
mongoc_query_flags_t flags, /* IN */
const bson_t *pipeline, /* IN */
const bson_t *opts, /* IN */
const mongoc_read_prefs_t *read_prefs) /* IN */
{
- mongoc_cmd_parts_t parts;
mongoc_server_stream_t *server_stream = NULL;
bool has_batch_size = false;
bool has_out_key = false;
bson_iter_t kiter;
bson_iter_t ar;
mongoc_cursor_t *cursor;
uint32_t server_id;
int32_t batch_size = 0;
bson_iter_t iter;
bson_t command;
bson_t child;
- bool use_cursor;
ENTRY;
BSON_ASSERT (collection);
BSON_ASSERT (pipeline);
bson_init (&command);
- if (!read_prefs) {
- read_prefs = collection->read_prefs;
- }
-
- cursor = _mongoc_collection_cursor_new (collection, flags, read_prefs);
- mongoc_cmd_parts_init (&parts, collection->db, flags, &command);
- parts.read_prefs = read_prefs;
-
- if (!_mongoc_read_prefs_validate (cursor->read_prefs, &cursor->error)) {
- GOTO (done);
- }
+ cursor = _mongoc_collection_cursor_new (collection, flags, read_prefs, true);
if (!_mongoc_get_server_id_from_opts (opts,
MONGOC_ERROR_COMMAND,
MONGOC_ERROR_COMMAND_INVALID_ARG,
&server_id,
&cursor->error)) {
GOTO (done);
}
if (server_id) {
/* will set slaveok bit if server is not mongos */
mongoc_cursor_set_hint (cursor, server_id);
/* server id isn't enough. ensure we're connected & know wire version */
server_stream =
mongoc_cluster_stream_for_server (&collection->client->cluster,
cursor->server_id,
true /* reconnect ok */,
&cursor->error);
if (!server_stream) {
GOTO (done);
}
} else {
server_stream = mongoc_cluster_stream_for_reads (
&collection->client->cluster, read_prefs, &cursor->error);
if (!server_stream) {
GOTO (done);
}
/* don't use mongoc_cursor_set_hint, don't want special slaveok logic */
cursor->server_id = server_stream->sd->id;
}
- use_cursor = server_stream->sd->max_wire_version >= WIRE_VERSION_AGG_CURSOR;
+ if (!read_prefs && !server_id) {
+ mongoc_read_prefs_destroy (cursor->read_prefs);
+ cursor->read_prefs = mongoc_read_prefs_copy (collection->read_prefs);
+ }
+
+ if (!_mongoc_read_prefs_validate (cursor->read_prefs, &cursor->error)) {
+ GOTO (done);
+ }
BSON_APPEND_UTF8 (&command, "aggregate", collection->collection);
/*
* The following will allow @pipeline to be either an array of
* items for the pipeline, or {"pipeline": [...]}.
*/
if (bson_iter_init_find (&iter, pipeline, "pipeline") &&
BSON_ITER_HOLDS_ARRAY (&iter)) {
if (!bson_append_iter (&command, "pipeline", 8, &iter)) {
bson_set_error (&cursor->error,
MONGOC_ERROR_COMMAND,
MONGOC_ERROR_COMMAND_INVALID_ARG,
"Failed to append \"pipeline\" to create command.");
GOTO (done);
}
} else {
BSON_APPEND_ARRAY (&command, "pipeline", pipeline);
}
if (bson_iter_init_find (&iter, pipeline, "pipeline") &&
BSON_ITER_HOLDS_ARRAY (&iter) && bson_iter_recurse (&iter, &ar)) {
while (bson_iter_next (&ar)) {
if (BSON_ITER_HOLDS_DOCUMENT (&ar) &&
bson_iter_recurse (&ar, &kiter)) {
has_out_key |= bson_iter_find (&kiter, "$out");
}
}
}
- /* for newer version, we include a cursor subdocument */
- if (use_cursor) {
- bson_append_document_begin (&command, "cursor", 6, &child);
-
- if (opts && bson_iter_init_find (&iter, opts, "batchSize") &&
- BSON_ITER_HOLDS_NUMBER (&iter)) {
- batch_size = (int32_t) bson_iter_as_int64 (&iter);
- BSON_APPEND_INT32 (&child, "batchSize", batch_size);
- has_batch_size = true;
- }
-
- bson_append_document_end (&command, &child);
+ bson_append_document_begin (&command, "cursor", 6, &child);
+ if (opts && bson_iter_init_find (&iter, opts, "batchSize") &&
+ BSON_ITER_HOLDS_NUMBER (&iter)) {
+ batch_size = (int32_t) bson_iter_as_int64 (&iter);
+ BSON_APPEND_INT32 (&child, "batchSize", batch_size);
+ has_batch_size = true;
}
+ bson_append_document_end (&command, &child);
if (opts) {
- bool ok = false;
- bson_t opts_dupe = BSON_INITIALIZER;
-
- if (has_batch_size || server_stream->sd->max_wire_version == 0) {
- bson_copy_to_excluding_noinit (opts, &opts_dupe, "batchSize", NULL);
- bson_iter_init (&iter, &opts_dupe);
+ if (has_batch_size) {
+ bson_copy_to_excluding_noinit (opts, &cursor->opts, "batchSize", NULL);
} else {
- bson_iter_init (&iter, opts);
- }
-
- /* omits "serverId" */
- ok = mongoc_cmd_parts_append_opts (
- &parts, &iter, server_stream->sd->max_wire_version, &cursor->error);
-
- bson_destroy (&opts_dupe);
-
- if (!ok) {
- GOTO (done);
+ bson_concat (&cursor->opts, opts);
}
}
/* Only inherit WriteConcern when for aggregate with $out */
- if (!bson_has_field (&parts.extra, "writeConcern") && has_out_key) {
+ if (!bson_has_field (&cursor->opts, "writeConcern") && has_out_key) {
mongoc_write_concern_destroy (cursor->write_concern);
cursor->write_concern = mongoc_write_concern_copy (
mongoc_collection_get_write_concern (collection));
}
- if (!bson_has_field (&parts.extra, "readConcern")) {
+ if (!bson_has_field (&cursor->opts, "readConcern")) {
mongoc_read_concern_destroy (cursor->read_concern);
cursor->read_concern = mongoc_read_concern_copy (
mongoc_collection_get_read_concern (collection));
if (cursor->read_concern->level != NULL) {
const bson_t *read_concern_bson;
read_concern_bson =
_mongoc_read_concern_get_bson (cursor->read_concern);
- BSON_APPEND_DOCUMENT (&parts.extra, "readConcern", read_concern_bson);
+ BSON_APPEND_DOCUMENT (&cursor->opts, "readConcern", read_concern_bson);
}
}
- mongoc_cmd_parts_assemble (&parts, server_stream);
-
- if (use_cursor) {
- _mongoc_cursor_cursorid_init (cursor, parts.assembled.command);
- } else {
- /* for older versions we get an array that we can create a synthetic
- * cursor on top of */
- _mongoc_cursor_array_init (cursor, parts.assembled.command, "result");
- }
+ _mongoc_cursor_cursorid_init (cursor, &command);
done:
mongoc_server_stream_cleanup (server_stream); /* null ok */
- mongoc_cmd_parts_cleanup (&parts);
bson_destroy (&command);
/* we always return the cursor, even if it fails; users can detect the
* failure on performing a cursor operation. see CDRIVER-880. */
RETURN (cursor);
}
/*
*--------------------------------------------------------------------------
*
* mongoc_collection_find --
*
* DEPRECATED: use mongoc_collection_find_with_opts.
*
* Performs a query against the configured MongoDB server. If @read_prefs
* is provided, it will be used to locate a MongoDB node in the cluster
* to deliver the query to.
*
* @flags may be bitwise-or'd flags or MONGOC_QUERY_NONE.
*
* @skip may contain the number of documents to skip before returning the
* matching document.
*
* @limit may contain the maximum number of documents that may be
* returned.
*
* This function will always return a cursor, with the exception of
* invalid API use.
*
* Parameters:
* @collection: A mongoc_collection_t.
* @flags: A bitwise or of mongoc_query_flags_t.
* @skip: The number of documents to skip.
* @limit: The maximum number of items.
* @batch_size: The batch size
* @query: The query to locate matching documents.
* @fields: The fields to return, or NULL for all fields.
* @read_prefs: Read preferences to choose cluster node.
*
* Returns:
* A newly allocated mongoc_cursor_t that should be freed with
* mongoc_cursor_destroy().
*
* The client used by mongoc_collection_t must be valid for the
* lifetime of the resulting mongoc_cursor_t.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
mongoc_cursor_t *
mongoc_collection_find (mongoc_collection_t *collection, /* IN */
mongoc_query_flags_t flags, /* IN */
uint32_t skip, /* IN */
uint32_t limit, /* IN */
uint32_t batch_size, /* IN */
const bson_t *query, /* IN */
const bson_t *fields, /* IN */
const mongoc_read_prefs_t *read_prefs) /* IN */
{
BSON_ASSERT (collection);
BSON_ASSERT (query);
bson_clear (&collection->gle);
if (!read_prefs) {
read_prefs = collection->read_prefs;
}
return _mongoc_cursor_new (collection->client,
collection->ns,
flags,
skip,
limit,
batch_size,
- false,
+ true /* is_find */,
query,
fields,
COALESCE (read_prefs, collection->read_prefs),
collection->read_concern);
}
/*
*--------------------------------------------------------------------------
*
* mongoc_collection_find_with_opts --
*
* Create a cursor with a query filter. All other options are
* specified in a free-form BSON document.
*
* Parameters:
* @collection: A mongoc_collection_t.
* @filter: The query to locate matching documents.
* @opts: Other options.
* @read_prefs: Optional read preferences to choose cluster node.
*
* Returns:
* A newly allocated mongoc_cursor_t that should be freed with
* mongoc_cursor_destroy().
*
* The client used by mongoc_collection_t must be valid for the
* lifetime of the resulting mongoc_cursor_t.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
mongoc_cursor_t *
mongoc_collection_find_with_opts (mongoc_collection_t *collection,
const bson_t *filter,
const bson_t *opts,
const mongoc_read_prefs_t *read_prefs)
{
BSON_ASSERT (collection);
BSON_ASSERT (filter);
bson_clear (&collection->gle);
if (!read_prefs) {
read_prefs = collection->read_prefs;
}
return _mongoc_cursor_new_with_opts (
collection->client,
collection->ns,
- false /* is_command */,
+ true /* is_find */,
filter,
opts,
COALESCE (read_prefs, collection->read_prefs),
collection->read_concern);
}
/*
*--------------------------------------------------------------------------
*
* mongoc_collection_command --
*
* Executes a command on a cluster node matching @read_prefs. If
* @read_prefs is not provided, it will be run on the primary node.
*
* This function will always return a mongoc_cursor_t.
*
* Parameters:
* @collection: A mongoc_collection_t.
* @flags: Bitwise-or'd flags for command.
* @skip: Number of documents to skip, typically 0.
* @limit : Number of documents to return
* @batch_size : Batch size
* @query: The command to execute.
* @fields: The fields to return, or NULL.
* @read_prefs: Command read preferences or NULL.
*
* Returns:
* None.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
mongoc_cursor_t *
mongoc_collection_command (mongoc_collection_t *collection,
mongoc_query_flags_t flags,
uint32_t skip,
uint32_t limit,
uint32_t batch_size,
const bson_t *query,
const bson_t *fields,
const mongoc_read_prefs_t *read_prefs)
{
char ns[MONGOC_NAMESPACE_MAX];
BSON_ASSERT (collection);
BSON_ASSERT (query);
if (!read_prefs) {
read_prefs = collection->read_prefs;
}
bson_clear (&collection->gle);
if (NULL == strstr (collection->collection, "$cmd")) {
- bson_snprintf (ns, sizeof ns, "%s", collection->db);
+ bson_snprintf (ns, sizeof ns, "%s.$cmd", collection->db);
} else {
- bson_snprintf (
- ns, sizeof ns, "%s.%s", collection->db, collection->collection);
- }
-
- return mongoc_client_command (collection->client,
- ns,
- flags,
- skip,
- limit,
- batch_size,
- query,
- fields,
- read_prefs);
+ bson_snprintf (ns, sizeof ns, "%s", collection->db);
+ }
+
+ /* Server Selection Spec: "The generic command method has a default read
+ * preference of mode 'primary'. The generic command method MUST ignore any
+ * default read preference from client, database or collection
+ * configuration. The generic command method SHOULD allow an optional read
+ * preference argument."
+ */
+
+ /* flags, skip, limit, batch_size, fields are unused */
+ return _mongoc_cursor_new_with_opts (collection->client,
+ ns,
+ false /* is_find */,
+ query,
+ NULL,
+ read_prefs,
+ NULL);
}
bool
mongoc_collection_read_command_with_opts (mongoc_collection_t *collection,
const bson_t *command,
const mongoc_read_prefs_t *read_prefs,
const bson_t *opts,
bson_t *reply,
bson_error_t *error)
{
BSON_ASSERT (collection);
return _mongoc_client_command_with_opts (
collection->client,
collection->db,
command,
MONGOC_CMD_READ,
opts,
MONGOC_QUERY_NONE,
COALESCE (read_prefs, collection->read_prefs),
collection->read_concern,
collection->write_concern,
reply,
error);
}
bool
mongoc_collection_write_command_with_opts (mongoc_collection_t *collection,
const bson_t *command,
const bson_t *opts,
bson_t *reply,
bson_error_t *error)
{
BSON_ASSERT (collection);
return _mongoc_client_command_with_opts (collection->client,
collection->db,
command,
MONGOC_CMD_WRITE,
opts,
MONGOC_QUERY_NONE,
collection->read_prefs,
collection->read_concern,
collection->write_concern,
reply,
error);
}
bool
mongoc_collection_read_write_command_with_opts (
mongoc_collection_t *collection,
const bson_t *command,
const mongoc_read_prefs_t *read_prefs /* IGNORED */,
const bson_t *opts,
bson_t *reply,
bson_error_t *error)
{
BSON_ASSERT (collection);
return _mongoc_client_command_with_opts (
collection->client,
collection->db,
command,
MONGOC_CMD_RW,
opts,
MONGOC_QUERY_NONE,
COALESCE (read_prefs, collection->read_prefs),
collection->read_concern,
collection->write_concern,
reply,
error);
}
+bool
+mongoc_collection_command_with_opts (mongoc_collection_t *collection,
+ const bson_t *command,
+ const mongoc_read_prefs_t *read_prefs,
+ const bson_t *opts,
+ bson_t *reply,
+ bson_error_t *error)
+{
+ BSON_ASSERT (collection);
+
+ return _mongoc_client_command_with_opts (collection->client,
+ collection->db,
+ command,
+ MONGOC_CMD_RAW,
+ opts,
+ MONGOC_QUERY_NONE,
+ read_prefs,
+ collection->read_concern,
+ collection->write_concern,
+ reply,
+ error);
+}
+
+
bool
mongoc_collection_command_simple (mongoc_collection_t *collection,
const bson_t *command,
const mongoc_read_prefs_t *read_prefs,
bson_t *reply,
bson_error_t *error)
{
BSON_ASSERT (collection);
BSON_ASSERT (command);
bson_clear (&collection->gle);
- return mongoc_client_command_simple (
- collection->client, collection->db, command, read_prefs, reply, error);
+ /* Server Selection Spec: "The generic command method has a default read
+ * preference of mode 'primary'. The generic command method MUST ignore any
+ * default read preference from client, database or collection
+ * configuration. The generic command method SHOULD allow an optional read
+ * preference argument."
+ */
+
+ return _mongoc_client_command_with_opts (collection->client,
+ collection->db,
+ command,
+ MONGOC_CMD_READ,
+ NULL /* opts */,
+ MONGOC_QUERY_NONE,
+ read_prefs,
+ NULL /* read concern */,
+ NULL /* write concern */,
+ reply,
+ error);
}
/*
*--------------------------------------------------------------------------
*
* mongoc_collection_count --
*
* Count the number of documents matching @query.
*
* Parameters:
* @flags: A mongoc_query_flags_t describing the query flags or 0.
* @query: The query to perform or NULL for {}.
* @skip: The $skip to perform within the query or 0.
* @limit: The $limit to perform within the query or 0.
* @read_prefs: desired read preferences or NULL.
* @error: A location for an error or NULL.
*
* Returns:
* -1 on failure; otherwise the number of matching documents.
*
* Side effects:
* @error is set upon failure if non-NULL.
*
*--------------------------------------------------------------------------
*/
int64_t
mongoc_collection_count (mongoc_collection_t *collection, /* IN */
mongoc_query_flags_t flags, /* IN */
const bson_t *query, /* IN */
int64_t skip, /* IN */
int64_t limit, /* IN */
const mongoc_read_prefs_t *read_prefs, /* IN */
bson_error_t *error) /* OUT */
{
int64_t ret;
bson_t opts = BSON_INITIALIZER;
/* Complex types must be parts of `opts`, otherwise we can't
* follow various specs that require validation etc */
if (collection->read_concern->level != NULL) {
const bson_t *read_concern_bson;
read_concern_bson =
_mongoc_read_concern_get_bson (collection->read_concern);
BSON_APPEND_DOCUMENT (&opts, "readConcern", read_concern_bson);
}
/* Server Selection Spec: "may-use-secondary" commands SHOULD take a read
* preference argument and otherwise MUST use the default read preference
* from client, database or collection configuration. */
ret = mongoc_collection_count_with_opts (
collection, flags, query, skip, limit, &opts, read_prefs, error);
bson_destroy (&opts);
return ret;
}
int64_t
mongoc_collection_count_with_opts (
mongoc_collection_t *collection, /* IN */
mongoc_query_flags_t flags, /* IN */
const bson_t *query, /* IN */
int64_t skip, /* IN */
int64_t limit, /* IN */
const bson_t *opts, /* IN */
const mongoc_read_prefs_t *read_prefs, /* IN */
bson_error_t *error) /* OUT */
{
bson_iter_t iter;
int64_t ret = -1;
bool success;
bson_t reply;
bson_t cmd = BSON_INITIALIZER;
bson_t q;
ENTRY;
BSON_ASSERT (collection);
bson_append_utf8 (
&cmd, "count", 5, collection->collection, collection->collectionlen);
if (query) {
bson_append_document (&cmd, "query", 5, query);
} else {
bson_init (&q);
bson_append_document (&cmd, "query", 5, &q);
bson_destroy (&q);
}
if (limit) {
bson_append_int64 (&cmd, "limit", 5, limit);
}
if (skip) {
bson_append_int64 (&cmd, "skip", 4, skip);
}
success = _mongoc_client_command_with_opts (
collection->client,
collection->db,
&cmd,
MONGOC_CMD_READ,
opts,
flags,
COALESCE (read_prefs, collection->read_prefs),
collection->read_concern,
collection->write_concern,
&reply,
error);
if (success) {
if (bson_iter_init_find (&iter, &reply, "n")) {
ret = bson_iter_as_int64 (&iter);
}
}
bson_destroy (&reply);
bson_destroy (&cmd);
RETURN (ret);
}
/*
*--------------------------------------------------------------------------
*
* mongoc_collection_drop --
*
* Request the MongoDB server drop the collection.
*
* Returns:
* true if successful; otherwise false and @error is set.
*
* Side effects:
* @error is set upon failure.
*
*--------------------------------------------------------------------------
*/
bool
mongoc_collection_drop (mongoc_collection_t *collection, /* IN */
bson_error_t *error) /* OUT */
{
return mongoc_collection_drop_with_opts (collection, NULL, error);
}
bool
mongoc_collection_drop_with_opts (mongoc_collection_t *collection,
const bson_t *opts,
bson_error_t *error)
{
bool ret;
bson_t cmd;
BSON_ASSERT (collection);
bson_init (&cmd);
bson_append_utf8 (
&cmd, "drop", 4, collection->collection, collection->collectionlen);
ret = _mongoc_client_command_with_opts (collection->client,
collection->db,
&cmd,
MONGOC_CMD_WRITE,
opts,
MONGOC_QUERY_NONE,
collection->read_prefs,
collection->read_concern,
collection->write_concern,
NULL, /* reply */
error);
bson_destroy (&cmd);
return ret;
}
/*
*--------------------------------------------------------------------------
*
* mongoc_collection_drop_index --
*
* Request the MongoDB server drop the named index.
*
* Returns:
* true if successful; otherwise false and @error is set.
*
* Side effects:
* @error is setup upon failure if non-NULL.
*
*--------------------------------------------------------------------------
*/
bool
mongoc_collection_drop_index (mongoc_collection_t *collection, /* IN */
const char *index_name, /* IN */
bson_error_t *error) /* OUT */
{
return mongoc_collection_drop_index_with_opts (
collection, index_name, NULL, error);
}
bool
mongoc_collection_drop_index_with_opts (mongoc_collection_t *collection,
const char *index_name,
const bson_t *opts,
bson_error_t *error)
{
bool ret;
bson_t cmd;
BSON_ASSERT (collection);
BSON_ASSERT (index_name);
bson_init (&cmd);
bson_append_utf8 (&cmd,
"dropIndexes",
-1,
collection->collection,
collection->collectionlen);
bson_append_utf8 (&cmd, "index", -1, index_name, -1);
ret = _mongoc_client_command_with_opts (collection->client,
collection->db,
&cmd,
MONGOC_CMD_WRITE,
opts,
MONGOC_QUERY_NONE,
collection->read_prefs,
collection->read_concern,
collection->write_concern,
NULL, /* reply */
error);
bson_destroy (&cmd);
return ret;
}
char *
mongoc_collection_keys_to_index_string (const bson_t *keys)
{
bson_string_t *s;
bson_iter_t iter;
int i = 0;
BSON_ASSERT (keys);
if (!bson_iter_init (&iter, keys)) {
return NULL;
}
s = bson_string_new (NULL);
while (bson_iter_next (&iter)) {
/* Index type can be specified as a string ("2d") or as an integer
* representing direction */
if (bson_iter_type (&iter) == BSON_TYPE_UTF8) {
bson_string_append_printf (s,
(i++ ? "_%s_%s" : "%s_%s"),
bson_iter_key (&iter),
bson_iter_utf8 (&iter, NULL));
} else {
bson_string_append_printf (s,
(i++ ? "_%s_%d" : "%s_%d"),
bson_iter_key (&iter),
bson_iter_int32 (&iter));
}
}
return bson_string_free (s, false);
}
-/*
- *--------------------------------------------------------------------------
- *
- * _mongoc_collection_create_index_legacy --
- *
- * Request the MongoDB server create the named index.
- *
- * Returns:
- * true if successful; otherwise false and @error is set.
- *
- * Side effects:
- * @error is setup upon failure if non-NULL.
- *
- *--------------------------------------------------------------------------
- */
-
-static bool
-_mongoc_collection_create_index_legacy (mongoc_collection_t *collection,
- const bson_t *keys,
- const mongoc_index_opt_t *opt,
- bson_error_t *error)
-{
- const mongoc_index_opt_t *def_opt;
- mongoc_collection_t *col;
- bool ret;
- bson_t insert;
- char *name;
-
- BSON_ASSERT (collection);
-
- def_opt = mongoc_index_opt_get_default ();
- opt = opt ? opt : def_opt;
-
- if (!opt->is_initialized) {
- MONGOC_WARNING ("Options have not yet been initialized");
- return false;
- }
-
- bson_init (&insert);
-
- bson_append_document (&insert, "key", -1, keys);
- bson_append_utf8 (&insert, "ns", -1, collection->ns, -1);
-
- if (opt->background != def_opt->background) {
- bson_append_bool (&insert, "background", -1, opt->background);
- }
-
- if (opt->unique != def_opt->unique) {
- bson_append_bool (&insert, "unique", -1, opt->unique);
- }
-
- if (opt->name != def_opt->name) {
- bson_append_utf8 (&insert, "name", -1, opt->name, -1);
- } else {
- name = mongoc_collection_keys_to_index_string (keys);
- if (!name) {
- bson_set_error (
- error,
- MONGOC_ERROR_BSON,
- MONGOC_ERROR_BSON_INVALID,
- "Cannot generate index name from invalid `keys` argument");
- bson_destroy (&insert);
- return false;
- }
- bson_append_utf8 (&insert, "name", -1, name, -1);
- bson_free (name);
- }
-
- if (opt->drop_dups != def_opt->drop_dups) {
- bson_append_bool (&insert, "dropDups", -1, opt->drop_dups);
- }
-
- if (opt->sparse != def_opt->sparse) {
- bson_append_bool (&insert, "sparse", -1, opt->sparse);
- }
-
- if (opt->expire_after_seconds != def_opt->expire_after_seconds) {
- bson_append_int32 (
- &insert, "expireAfterSeconds", -1, opt->expire_after_seconds);
- }
-
- if (opt->v != def_opt->v) {
- bson_append_int32 (&insert, "v", -1, opt->v);
- }
-
- if (opt->weights != def_opt->weights) {
- bson_append_document (&insert, "weights", -1, opt->weights);
- }
-
- if (opt->default_language != def_opt->default_language) {
- bson_append_utf8 (
- &insert, "default_language", -1, opt->default_language, -1);
- }
-
- if (opt->language_override != def_opt->language_override) {
- bson_append_utf8 (
- &insert, "language_override", -1, opt->language_override, -1);
- }
-
- col = mongoc_client_get_collection (
- collection->client, collection->db, "system.indexes");
-
- ret = mongoc_collection_insert (
- col,
- (mongoc_insert_flags_t) MONGOC_INSERT_NO_VALIDATE,
- &insert,
- NULL,
- error);
-
- mongoc_collection_destroy (col);
-
- bson_destroy (&insert);
-
- return ret;
-}
-
-
bool
mongoc_collection_create_index (mongoc_collection_t *collection,
const bson_t *keys,
const mongoc_index_opt_t *opt,
bson_error_t *error)
{
bson_t reply;
bool ret;
BEGIN_IGNORE_DEPRECATIONS
ret = mongoc_collection_create_index_with_opts (
collection, keys, opt, NULL, &reply, error);
END_IGNORE_DEPRECATIONS
bson_destroy (&reply);
return ret;
}
bool
mongoc_collection_create_index_with_opts (mongoc_collection_t *collection,
const bson_t *keys,
const mongoc_index_opt_t *opt,
const bson_t *opts,
bson_t *reply,
bson_error_t *error)
{
mongoc_cmd_parts_t parts;
const mongoc_index_opt_t *def_opt;
const mongoc_index_opt_geo_t *def_geo;
- bson_error_t local_error;
const char *name;
bson_t cmd = BSON_INITIALIZER;
bson_t ar;
bson_t doc;
bson_t storage_doc;
bson_t wt_doc;
const mongoc_index_opt_geo_t *geo_opt;
const mongoc_index_opt_storage_t *storage_opt;
const mongoc_index_opt_wt_t *wt_opt;
char *alloc_name = NULL;
bool ret = false;
bool reply_initialized = false;
bool has_collation = false;
mongoc_server_stream_t *server_stream = NULL;
bson_iter_t iter;
mongoc_cluster_t *cluster;
ENTRY;
BSON_ASSERT (collection);
BSON_ASSERT (keys);
def_opt = mongoc_index_opt_get_default ();
opt = opt ? opt : def_opt;
- mongoc_cmd_parts_init (&parts, collection->db, MONGOC_QUERY_NONE, &cmd);
+ mongoc_cmd_parts_init (
+ &parts, collection->client, collection->db, MONGOC_QUERY_NONE, &cmd);
parts.is_write_command = true;
/*
* Generate the key name if it was not provided.
*/
name = (opt->name != def_opt->name) ? opt->name : NULL;
if (!name) {
alloc_name = mongoc_collection_keys_to_index_string (keys);
if (alloc_name) {
name = alloc_name;
} else {
bson_set_error (
error,
MONGOC_ERROR_BSON,
MONGOC_ERROR_BSON_INVALID,
"Cannot generate index name from invalid `keys` argument");
GOTO (done);
}
}
/*
* Build our createIndexes command to send to the server.
*/
BSON_APPEND_UTF8 (&cmd, "createIndexes", collection->collection);
bson_append_array_begin (&cmd, "indexes", 7, &ar);
bson_append_document_begin (&ar, "0", 1, &doc);
BSON_APPEND_DOCUMENT (&doc, "key", keys);
BSON_APPEND_UTF8 (&doc, "name", name);
if (opt->background) {
BSON_APPEND_BOOL (&doc, "background", true);
}
if (opt->unique) {
BSON_APPEND_BOOL (&doc, "unique", true);
}
if (opt->drop_dups) {
BSON_APPEND_BOOL (&doc, "dropDups", true);
}
if (opt->sparse) {
BSON_APPEND_BOOL (&doc, "sparse", true);
}
if (opt->expire_after_seconds != def_opt->expire_after_seconds) {
BSON_APPEND_INT32 (&doc, "expireAfterSeconds", opt->expire_after_seconds);
}
if (opt->v != def_opt->v) {
BSON_APPEND_INT32 (&doc, "v", opt->v);
}
if (opt->weights && (opt->weights != def_opt->weights)) {
BSON_APPEND_DOCUMENT (&doc, "weights", opt->weights);
}
if (opt->default_language != def_opt->default_language) {
BSON_APPEND_UTF8 (&doc, "default_language", opt->default_language);
}
if (opt->language_override != def_opt->language_override) {
BSON_APPEND_UTF8 (&doc, "language_override", opt->language_override);
}
if (opt->partial_filter_expression) {
BSON_APPEND_DOCUMENT (
&doc, "partialFilterExpression", opt->partial_filter_expression);
}
if (opt->collation) {
BSON_APPEND_DOCUMENT (&doc, "collation", opt->collation);
has_collation = true;
}
if (opt->geo_options) {
geo_opt = opt->geo_options;
def_geo = mongoc_index_opt_geo_get_default ();
if (geo_opt->twod_sphere_version != def_geo->twod_sphere_version) {
BSON_APPEND_INT32 (
&doc, "2dsphereIndexVersion", geo_opt->twod_sphere_version);
}
if (geo_opt->twod_bits_precision != def_geo->twod_bits_precision) {
BSON_APPEND_INT32 (&doc, "bits", geo_opt->twod_bits_precision);
}
if (geo_opt->twod_location_min != def_geo->twod_location_min) {
BSON_APPEND_DOUBLE (&doc, "min", geo_opt->twod_location_min);
}
if (geo_opt->twod_location_max != def_geo->twod_location_max) {
BSON_APPEND_DOUBLE (&doc, "max", geo_opt->twod_location_max);
}
if (geo_opt->haystack_bucket_size != def_geo->haystack_bucket_size) {
BSON_APPEND_DOUBLE (&doc, "bucketSize", geo_opt->haystack_bucket_size);
}
}
if (opt->storage_options) {
storage_opt = opt->storage_options;
switch (storage_opt->type) {
case MONGOC_INDEX_STORAGE_OPT_WIREDTIGER:
wt_opt = (mongoc_index_opt_wt_t *) storage_opt;
BSON_APPEND_DOCUMENT_BEGIN (&doc, "storageEngine", &storage_doc);
BSON_APPEND_DOCUMENT_BEGIN (&storage_doc, "wiredTiger", &wt_doc);
BSON_APPEND_UTF8 (&wt_doc, "configString", wt_opt->config_str);
bson_append_document_end (&storage_doc, &wt_doc);
bson_append_document_end (&doc, &storage_doc);
break;
default:
break;
}
}
bson_append_document_end (&ar, &doc);
bson_append_array_end (&cmd, &ar);
server_stream = mongoc_cluster_stream_for_reads (
&collection->client->cluster, NULL, error);
if (!server_stream) {
GOTO (done);
}
if (opts && bson_iter_init (&iter, opts)) {
if (!mongoc_cmd_parts_append_opts (
&parts, &iter, server_stream->sd->max_wire_version, error)) {
GOTO (done);
}
}
if (has_collation &&
server_stream->sd->max_wire_version < WIRE_VERSION_COLLATION) {
bson_set_error (error,
MONGOC_ERROR_COMMAND,
MONGOC_ERROR_PROTOCOL_BAD_WIRE_VERSION,
"The selected server does not support collation");
GOTO (done);
}
cluster = &collection->client->cluster;
- ret = mongoc_cluster_run_command_monitored (
- cluster, &parts, server_stream, reply, &local_error);
+ if (mongoc_cmd_parts_assemble (&parts, server_stream, error)) {
+ ret = mongoc_cluster_run_command_monitored (
+ cluster, &parts.assembled, reply, error);
+ } else {
+ _mongoc_bson_init_if_set (reply);
+ }
reply_initialized = true;
if (ret) {
if (reply) {
ret = !_mongoc_parse_wc_err (reply, error);
}
- } else {
- /*
- * If we failed due to the command not being found, then use the legacy
- * version which performs an insert into the system.indexes collection.
- */
- if (local_error.code == MONGOC_ERROR_QUERY_COMMAND_NOT_FOUND) {
- if (has_collation) {
- bson_set_error (error,
- MONGOC_ERROR_COMMAND,
- MONGOC_ERROR_PROTOCOL_BAD_WIRE_VERSION,
- "The selected server does not support collation");
- }
- ret = _mongoc_collection_create_index_legacy (
- collection, keys, opt, error);
- /* Clear the error reply from the first request */
- if (reply) {
- bson_reinit (reply);
- }
- } else if (error) {
- memcpy (error, &local_error, sizeof *error);
- }
}
done:
bson_destroy (&cmd);
bson_free (alloc_name);
mongoc_server_stream_cleanup (server_stream);
mongoc_cmd_parts_cleanup (&parts);
if (!reply_initialized && reply) {
bson_init (reply);
}
RETURN (ret);
}
bool
mongoc_collection_ensure_index (mongoc_collection_t *collection,
const bson_t *keys,
const mongoc_index_opt_t *opt,
bson_error_t *error)
{
BEGIN_IGNORE_DEPRECATIONS
return mongoc_collection_create_index (collection, keys, opt, error);
END_IGNORE_DEPRECATIONS
}
mongoc_cursor_t *
-_mongoc_collection_find_indexes_legacy (mongoc_collection_t *collection,
- bson_error_t *error)
+_mongoc_collection_find_indexes_legacy (mongoc_collection_t *collection)
{
mongoc_database_t *db;
mongoc_collection_t *idx_collection;
mongoc_read_prefs_t *read_prefs;
bson_t query = BSON_INITIALIZER;
mongoc_cursor_t *cursor;
BSON_ASSERT (collection);
BSON_APPEND_UTF8 (&query, "ns", collection->ns);
db = mongoc_client_get_database (collection->client, collection->db);
BSON_ASSERT (db);
idx_collection = mongoc_database_get_collection (db, "system.indexes");
BSON_ASSERT (idx_collection);
/* Index Enumeration Spec: "run listIndexes on the primary node". */
read_prefs = mongoc_read_prefs_new (MONGOC_READ_PRIMARY);
cursor = mongoc_collection_find_with_opts (
idx_collection, &query, NULL, read_prefs);
mongoc_read_prefs_destroy (read_prefs);
mongoc_collection_destroy (idx_collection);
mongoc_database_destroy (db);
return cursor;
}
mongoc_cursor_t *
mongoc_collection_find_indexes (mongoc_collection_t *collection,
bson_error_t *error)
{
mongoc_cursor_t *cursor;
+
+ cursor = mongoc_collection_find_indexes_with_opts (collection, NULL);
+
+ if (mongoc_cursor_error (cursor, error)) {
+ /* conform to deprecated API: unhandled errors cause a NULL return */
+ mongoc_cursor_destroy (cursor);
+ return NULL;
+ }
+
+ return cursor;
+}
+
+
+mongoc_cursor_t *
+mongoc_collection_find_indexes_with_opts (mongoc_collection_t *collection,
+ const bson_t *opts)
+{
+ mongoc_cursor_t *cursor;
bson_t cmd = BSON_INITIALIZER;
bson_t child;
+ bson_error_t error;
BSON_ASSERT (collection);
bson_append_utf8 (&cmd,
"listIndexes",
-1,
collection->collection,
collection->collectionlen);
BSON_APPEND_DOCUMENT_BEGIN (&cmd, "cursor", &child);
bson_append_document_end (&cmd, &child);
- /* Set slaveOk but no read preference: Index Enumeration Spec says
- * "listIndexes can be run on a secondary" when directly connected but
- * "run listIndexes on the primary node in replicaSet mode". */
- cursor = _mongoc_collection_cursor_new (
- collection, MONGOC_QUERY_SLAVE_OK, NULL /* read prefs */);
+ /* No read preference. Index Enumeration Spec: "run listIndexes on the
+ * primary node in replicaSet mode". */
+ cursor = _mongoc_cursor_new_with_opts (collection->client,
+ collection->ns,
+ false /* is_find */,
+ &cmd,
+ opts,
+ NULL /* read prefs */,
+ NULL /* read concern */);
+
_mongoc_cursor_cursorid_init (cursor, &cmd);
if (_mongoc_cursor_cursorid_prime (cursor)) {
/* intentionally empty */
} else {
- if (mongoc_cursor_error (cursor, error)) {
- mongoc_cursor_destroy (cursor);
-
- if (error->code == MONGOC_ERROR_COLLECTION_DOES_NOT_EXIST) {
+ if (mongoc_cursor_error (cursor, &error)) {
+ if (error.code == MONGOC_ERROR_COLLECTION_DOES_NOT_EXIST) {
bson_t empty_arr = BSON_INITIALIZER;
/* collection does not exist. in accordance with the spec we return
* an empty array. Also we need to clear out the error. */
- error->code = 0;
- error->domain = 0;
+ error.code = 0;
+ error.domain = 0;
+ mongoc_cursor_destroy (cursor);
cursor = _mongoc_collection_cursor_new (
- collection, MONGOC_QUERY_SLAVE_OK, NULL /* read prefs */);
+ collection, MONGOC_QUERY_SLAVE_OK, NULL /* read prefs */, true);
_mongoc_cursor_array_init (cursor, NULL, NULL);
_mongoc_cursor_array_set_bson (cursor, &empty_arr);
- } else if (error->code == MONGOC_ERROR_QUERY_COMMAND_NOT_FOUND) {
+ } else if (error.code == MONGOC_ERROR_QUERY_COMMAND_NOT_FOUND) {
/* talking to an old server. */
/* clear out error. */
- error->code = 0;
- error->domain = 0;
- cursor = _mongoc_collection_find_indexes_legacy (collection, error);
- } else {
- /* other error, to be handled by caller */
- cursor = NULL;
+ error.code = 0;
+ error.domain = 0;
+ mongoc_cursor_destroy (cursor);
+ cursor = _mongoc_collection_find_indexes_legacy (collection);
}
}
}
bson_destroy (&cmd);
return cursor;
}
/*
*--------------------------------------------------------------------------
*
* mongoc_collection_insert_bulk --
*
* Bulk insert documents into a MongoDB collection.
*
* Parameters:
* @collection: A mongoc_collection_t.
* @flags: flags for the insert or 0.
* @documents: The documents to insert.
* @n_documents: The number of documents to insert.
* @write_concern: A write concern or NULL.
* @error: a location for an error or NULL.
*
* Returns:
* true if successful; otherwise false and @error is set.
*
* If the write concern does not dictate checking the result of the
* insert, then true may be returned even though the document was
* not actually inserted on the MongoDB server or cluster.
*
* Side effects:
* @collection->gle is setup, depending on write_concern->w value.
* @error may be set upon failure if non-NULL.
*
*--------------------------------------------------------------------------
*/
bool
mongoc_collection_insert_bulk (mongoc_collection_t *collection,
mongoc_insert_flags_t flags,
const bson_t **documents,
uint32_t n_documents,
const mongoc_write_concern_t *write_concern,
bson_error_t *error)
{
mongoc_write_command_t command;
mongoc_write_result_t result;
mongoc_bulk_write_flags_t write_flags = MONGOC_BULK_WRITE_FLAGS_INIT;
uint32_t i;
bool ret;
BSON_ASSERT (collection);
BSON_ASSERT (documents);
if (!write_concern) {
write_concern = collection->write_concern;
}
if (!(flags & MONGOC_INSERT_NO_VALIDATE)) {
for (i = 0; i < n_documents; i++) {
if (!_mongoc_validate_new_document (documents[i], error)) {
RETURN (false);
}
}
}
bson_clear (&collection->gle);
_mongoc_write_result_init (&result);
write_flags.ordered = !(flags & MONGOC_INSERT_CONTINUE_ON_ERROR);
_mongoc_write_command_init_insert (
&command,
NULL,
+ NULL,
write_flags,
++collection->client->cluster.operation_id,
true);
for (i = 0; i < n_documents; i++) {
_mongoc_write_command_insert_append (&command, documents[i]);
}
_mongoc_collection_write_command_execute (
- &command, collection, write_concern, &result);
+ &command, collection, write_concern, NULL, &result);
collection->gle = bson_new ();
- ret = _mongoc_write_result_complete (&result,
- collection->client->error_api_version,
- write_concern,
- /* no error domain override */
- (mongoc_error_domain_t) 0,
- collection->gle,
- error);
+ ret = MONGOC_WRITE_RESULT_COMPLETE (&result,
+ collection->client->error_api_version,
+ write_concern,
+ /* no error domain override */
+ (mongoc_error_domain_t) 0,
+ collection->gle,
+ error);
_mongoc_write_result_destroy (&result);
_mongoc_write_command_destroy (&command);
return ret;
}
+typedef struct {
+ mongoc_bulk_write_flags_t write_flags;
+ mongoc_write_concern_t *write_concern;
+ bool write_concern_owned;
+ mongoc_client_session_t *client_session;
+ bool client_validation;
+ bson_t copied_opts;
+} mongoc_write_opts_parsed_t;
+
+
+static bool
+_mongoc_write_opts_parse (const bson_t *opts,
+ mongoc_collection_t *collection,
+ mongoc_write_opts_parsed_t *parsed,
+ bson_error_t *error)
+{
+ mongoc_bulk_write_flags_t default_flags = MONGOC_BULK_WRITE_FLAGS_INIT;
+ bson_iter_t iter;
+
+ bson_clear (&collection->gle);
+
+ parsed->write_flags = default_flags;
+ bson_init (&parsed->copied_opts);
+ parsed->write_concern = collection->write_concern;
+ parsed->write_concern_owned = false;
+ parsed->client_session = NULL;
+ parsed->client_validation = true;
+
+ if (opts) {
+ if (!bson_iter_init (&iter, opts)) {
+ bson_set_error (error,
+ MONGOC_ERROR_BSON,
+ MONGOC_ERROR_BSON_INVALID,
+ "Invalid 'opts' parameter.");
+ return false;
+ }
+
+ while (bson_iter_next (&iter)) {
+ if (!strcmp (bson_iter_key (&iter), "writeConcern")) {
+ parsed->write_concern =
+ _mongoc_write_concern_new_from_iter (&iter, error);
+ if (!parsed->write_concern) {
+ return false;
+ }
+
+ parsed->write_concern_owned = true;
+ continue;
+ }
+
+ if (!strcmp (bson_iter_key (&iter), "bypassDocumentValidation")) {
+ parsed->write_flags.bypass_document_validation =
+ bson_iter_as_bool (&iter)
+ ? MONGOC_BYPASS_DOCUMENT_VALIDATION_TRUE
+ : MONGOC_BYPASS_DOCUMENT_VALIDATION_FALSE;
+ continue;
+ }
+
+ if (!strcmp (bson_iter_key (&iter), "sessionId")) {
+ if (!_mongoc_client_session_from_iter (
+ collection->client, &iter, &parsed->client_session, error)) {
+ return false;
+ }
+ continue;
+ }
+
+ if (!strcmp (bson_iter_key (&iter), "validate")) {
+ parsed->client_validation = bson_iter_as_bool (&iter);
+ if (parsed->client_validation && !BSON_ITER_HOLDS_BOOL (&iter)) {
+ /* reserve truthy values besides boolean "true" for future
+ * fine-grained validation control, see CDRIVER-2296
+ */
+ bson_set_error (
+ error,
+ MONGOC_ERROR_COMMAND,
+ MONGOC_ERROR_COMMAND_INVALID_ARG,
+ "Invalid type for option \"validate\", \"%s\":"
+ " \"validate\" must be a boolean.",
+ _mongoc_bson_type_to_str (bson_iter_type (&iter)));
+ return false;
+ }
+ continue;
+ }
+
+ if (!strcmp (bson_iter_key (&iter), "ordered")) {
+ parsed->write_flags.ordered = bson_iter_as_bool (&iter);
+ continue;
+ }
+
+ if (!strcmp (bson_iter_key (&iter), "collation")) {
+ parsed->write_flags.has_collation = true;
+ /* FALL THROUGH */
+ }
+
+ if (!bson_append_value (&parsed->copied_opts,
+ bson_iter_key (&iter),
+ -1,
+ bson_iter_value (&iter))) {
+ bson_set_error (error,
+ MONGOC_ERROR_BSON,
+ MONGOC_ERROR_BSON_INVALID,
+ "Invalid 'opts' parameter.");
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+
+static void
+_mongoc_write_opts_cleanup (mongoc_write_opts_parsed_t *parsed)
+{
+ if (parsed->write_concern_owned) {
+ mongoc_write_concern_destroy (parsed->write_concern);
+ }
+
+ bson_destroy (&parsed->copied_opts);
+}
+
+
+bool
+mongoc_collection_insert (mongoc_collection_t *collection,
+ mongoc_insert_flags_t flags,
+ const bson_t *document,
+ const mongoc_write_concern_t *write_concern,
+ bson_error_t *error)
+{
+ bson_t opts = BSON_INITIALIZER;
+ bson_t reply;
+ bool r;
+
+ bson_clear (&collection->gle);
+
+ if (flags & MONGOC_INSERT_NO_VALIDATE) {
+ bson_append_bool (&opts, "validate", 8, false);
+ }
+
+ if (write_concern) {
+ mongoc_write_concern_append ((mongoc_write_concern_t *) write_concern,
+ &opts);
+ }
+
+ r =
+ mongoc_collection_insert_one (collection, document, &opts, &reply, error);
+
+ collection->gle = bson_copy (&reply);
+ bson_destroy (&reply);
+ bson_destroy (&opts);
+
+ return r;
+}
+
+
/*
*--------------------------------------------------------------------------
*
- * mongoc_collection_insert --
+ * mongoc_collection_insert_one --
*
* Insert a document into a MongoDB collection.
*
* Parameters:
* @collection: A mongoc_collection_t.
- * @flags: flags for the insert or 0.
* @document: The document to insert.
- * @write_concern: A write concern or NULL.
- * @error: a location for an error or NULL.
+ * @opts: Standard command options.
+ * @reply: Optional. Uninitialized doc to receive the update result.
+ * @error: A location for an error or NULL.
*
* Returns:
* true if successful; otherwise false and @error is set.
*
* If the write concern does not dictate checking the result of the
* insert, then true may be returned even though the document was
* not actually inserted on the MongoDB server or cluster.
*
- * Side effects:
- * @collection->gle is setup, depending on write_concern->w value.
- * @error may be set upon failure if non-NULL.
- *
*--------------------------------------------------------------------------
*/
bool
-mongoc_collection_insert (mongoc_collection_t *collection,
- mongoc_insert_flags_t flags,
- const bson_t *document,
- const mongoc_write_concern_t *write_concern,
- bson_error_t *error)
+mongoc_collection_insert_one (mongoc_collection_t *collection,
+ const bson_t *document,
+ const bson_t *opts,
+ bson_t *reply,
+ bson_error_t *error)
{
- mongoc_bulk_write_flags_t write_flags = MONGOC_BULK_WRITE_FLAGS_INIT;
+ mongoc_write_opts_parsed_t parsed;
mongoc_write_command_t command;
mongoc_write_result_t result;
bool ret;
ENTRY;
BSON_ASSERT (collection);
BSON_ASSERT (document);
- bson_clear (&collection->gle);
+ _mongoc_bson_init_if_set (reply);
- if (!write_concern) {
- write_concern = collection->write_concern;
+ if (!_mongoc_write_opts_parse (opts, collection, &parsed, error)) {
+ _mongoc_write_opts_cleanup (&parsed);
+ return false;
}
- if (!(flags & MONGOC_INSERT_NO_VALIDATE) &&
+ if (parsed.client_validation &&
!_mongoc_validate_new_document (document, error)) {
RETURN (false);
}
_mongoc_write_result_init (&result);
_mongoc_write_command_init_insert (
&command,
document,
- write_flags,
+ &parsed.copied_opts,
+ parsed.write_flags,
++collection->client->cluster.operation_id,
false);
- _mongoc_collection_write_command_execute (
- &command, collection, write_concern, &result);
+ _mongoc_collection_write_command_execute (&command,
+ collection,
+ parsed.write_concern,
+ parsed.client_session,
+ &result);
+
+ ret = MONGOC_WRITE_RESULT_COMPLETE (&result,
+ collection->client->error_api_version,
+ parsed.write_concern,
+ /* no error domain override */
+ (mongoc_error_domain_t) 0,
+ reply,
+ error,
+ "insertedCount");
- collection->gle = bson_new ();
- ret = _mongoc_write_result_complete (&result,
- collection->client->error_api_version,
- write_concern,
- /* no error domain override */
- (mongoc_error_domain_t) 0,
- collection->gle,
- error);
+ _mongoc_write_result_destroy (&result);
+ _mongoc_write_command_destroy (&command);
+ _mongoc_write_opts_cleanup (&parsed);
+ RETURN (ret);
+}
+
+
+/*
+ *--------------------------------------------------------------------------
+ *
+ * mongoc_collection_insert_many --
+ *
+ * Insert documents into a MongoDB collection. Replaces
+ * mongoc_collection_insert_bulk.
+ *
+ * Parameters:
+ * @collection: A mongoc_collection_t.
+ * @documents: The documents to insert.
+ * @n_documents: Length of @documents array.
+ * @opts: Standard command options.
+ * @reply: Optional. Uninitialized doc to receive the update result.
+ * @error: A location for an error or NULL.
+ *
+ * Returns:
+ * true if successful; otherwise false and @error is set.
+ *
+ * If the write concern does not dictate checking the result of the
+ * insert, then true may be returned even though the document was
+ * not actually inserted on the MongoDB server or cluster.
+ *
+ *--------------------------------------------------------------------------
+ */
+
+bool
+mongoc_collection_insert_many (mongoc_collection_t *collection,
+ const bson_t **documents,
+ size_t n_documents,
+ const bson_t *opts,
+ bson_t *reply,
+ bson_error_t *error)
+{
+ mongoc_write_opts_parsed_t parsed;
+ mongoc_write_command_t command;
+ mongoc_write_result_t result;
+ size_t i;
+ bool ret;
+
+ ENTRY;
+
+ BSON_ASSERT (collection);
+ BSON_ASSERT (documents);
+
+ _mongoc_bson_init_if_set (reply);
+
+ if (!_mongoc_write_opts_parse (opts, collection, &parsed, error)) {
+ _mongoc_write_opts_cleanup (&parsed);
+ return false;
+ }
+
+ _mongoc_write_result_init (&result);
+ _mongoc_write_command_init_insert (
+ &command,
+ NULL,
+ &parsed.copied_opts,
+ parsed.write_flags,
+ ++collection->client->cluster.operation_id,
+ false);
+
+ for (i = 0; i < n_documents; i++) {
+ if (parsed.client_validation &&
+ !_mongoc_validate_new_document (documents[i], error)) {
+ ret = false;
+ GOTO (done);
+ }
+
+ _mongoc_write_command_insert_append (&command, documents[i]);
+ }
+
+ _mongoc_collection_write_command_execute (&command,
+ collection,
+ parsed.write_concern,
+ parsed.client_session,
+ &result);
+
+ ret = MONGOC_WRITE_RESULT_COMPLETE (&result,
+ collection->client->error_api_version,
+ parsed.write_concern,
+ /* no error domain override */
+ (mongoc_error_domain_t) 0,
+ reply,
+ error,
+ "insertedCount");
+
+done:
_mongoc_write_result_destroy (&result);
_mongoc_write_command_destroy (&command);
+ _mongoc_write_opts_cleanup (&parsed);
RETURN (ret);
}
/*
*--------------------------------------------------------------------------
*
* mongoc_collection_update --
*
* Updates one or more documents matching @selector with @update.
*
* Parameters:
* @collection: A mongoc_collection_t.
* @flags: The flags for the update.
* @selector: A bson_t containing your selector.
* @update: A bson_t containing your update document.
* @write_concern: The write concern or NULL.
* @error: A location for an error or NULL.
*
* Returns:
* true if successful; otherwise false and @error is set.
*
* Side effects:
* @collection->gle is setup, depending on write_concern->w value.
* @error is setup upon failure.
*
*--------------------------------------------------------------------------
*/
bool
mongoc_collection_update (mongoc_collection_t *collection,
mongoc_update_flags_t uflags,
const bson_t *selector,
const bson_t *update,
const mongoc_write_concern_t *write_concern,
bson_error_t *error)
{
mongoc_bulk_write_flags_t write_flags = MONGOC_BULK_WRITE_FLAGS_INIT;
mongoc_write_command_t command;
mongoc_write_result_t result;
bson_iter_t iter;
bool ret;
int flags = uflags;
bson_t opts;
ENTRY;
BSON_ASSERT (collection);
BSON_ASSERT (selector);
BSON_ASSERT (update);
bson_clear (&collection->gle);
if (!write_concern) {
write_concern = collection->write_concern;
}
if (!((uint32_t) flags & MONGOC_UPDATE_NO_VALIDATE) &&
bson_iter_init (&iter, update) && bson_iter_next (&iter)) {
if (bson_iter_key (&iter)[0] == '$') {
/* update document, all keys must be $-operators */
if (!_mongoc_validate_update (update, error)) {
return false;
}
} else {
if (!_mongoc_validate_replace (update, error)) {
return false;
}
}
}
bson_init (&opts);
BSON_APPEND_BOOL (&opts, "upsert", !!(flags & MONGOC_UPDATE_UPSERT));
BSON_APPEND_BOOL (&opts, "multi", !!(flags & MONGOC_UPDATE_MULTI_UPDATE));
_mongoc_write_result_init (&result);
_mongoc_write_command_init_update (
&command,
selector,
update,
&opts,
write_flags,
++collection->client->cluster.operation_id);
bson_destroy (&opts);
_mongoc_collection_write_command_execute (
- &command, collection, write_concern, &result);
+ &command, collection, write_concern, NULL, &result);
collection->gle = bson_new ();
- ret = _mongoc_write_result_complete (&result,
- collection->client->error_api_version,
- write_concern,
- /* no error domain override */
- (mongoc_error_domain_t) 0,
- collection->gle,
- error);
+ ret = MONGOC_WRITE_RESULT_COMPLETE (&result,
+ collection->client->error_api_version,
+ write_concern,
+ /* no error domain override */
+ (mongoc_error_domain_t) 0,
+ collection->gle,
+ error);
+
+ _mongoc_write_result_destroy (&result);
+ _mongoc_write_command_destroy (&command);
+
+ RETURN (ret);
+}
+
+static bool
+_mongoc_collection_update_or_replace (mongoc_collection_t *collection,
+ const bson_t *selector,
+ const bson_t *update,
+ const bson_t *opts,
+ bson_t *reply,
+ bson_error_t *error,
+ bool is_multi,
+ bool is_update)
+{
+ mongoc_write_opts_parsed_t parsed;
+ mongoc_write_command_t command;
+ mongoc_write_result_t result;
+ bool ret;
+
+ ENTRY;
+
+ BSON_ASSERT (collection);
+ BSON_ASSERT (selector);
+ BSON_ASSERT (update);
+
+ _mongoc_bson_init_if_set (reply);
+
+ if (!_mongoc_write_opts_parse (opts, collection, &parsed, error)) {
+ _mongoc_write_opts_cleanup (&parsed);
+ return false;
+ }
+
+ if (parsed.client_validation) {
+ /* update document, all keys must be $-operators */
+ if (is_update) {
+ if (!_mongoc_validate_update (update, error)) {
+ return false;
+ }
+ } else if (!_mongoc_validate_replace (update, error)) {
+ return false;
+ }
+ }
+
+ if (is_multi) {
+ bson_append_bool (&parsed.copied_opts, "multi", 5, true);
+ }
+
+ _mongoc_write_result_init (&result);
+
+ _mongoc_write_command_init_update (
+ &command,
+ selector,
+ update,
+ &parsed.copied_opts,
+ parsed.write_flags,
+ ++collection->client->cluster.operation_id);
+
+ _mongoc_collection_write_command_execute (&command,
+ collection,
+ parsed.write_concern,
+ parsed.client_session,
+ &result);
+
+ /* set fields described in CRUD spec for the UpdateResult */
+ ret = MONGOC_WRITE_RESULT_COMPLETE (&result,
+ collection->client->error_api_version,
+ parsed.write_concern,
+ /* no error domain override */
+ (mongoc_error_domain_t) 0,
+ reply,
+ error,
+ "modifiedCount",
+ "matchedCount",
+ "upsertedId");
_mongoc_write_result_destroy (&result);
_mongoc_write_command_destroy (&command);
+ _mongoc_write_opts_cleanup (&parsed);
RETURN (ret);
}
+bool
+mongoc_collection_update_one (mongoc_collection_t *collection,
+ const bson_t *selector,
+ const bson_t *update,
+ const bson_t *opts,
+ bson_t *reply,
+ bson_error_t *error)
+{
+ return _mongoc_collection_update_or_replace (collection,
+ selector,
+ update,
+ opts,
+ reply,
+ error,
+ false /* is_multi */,
+ true /* is_update */);
+}
+
+bool
+mongoc_collection_update_many (mongoc_collection_t *collection,
+ const bson_t *selector,
+ const bson_t *update,
+ const bson_t *opts,
+ bson_t *reply,
+ bson_error_t *error)
+{
+ return _mongoc_collection_update_or_replace (collection,
+ selector,
+ update,
+ opts,
+ reply,
+ error,
+ true /* is_multi */,
+ true /* is_update */);
+}
+
+bool
+mongoc_collection_replace_one (mongoc_collection_t *collection,
+ const bson_t *selector,
+ const bson_t *replacement,
+ const bson_t *opts,
+ bson_t *reply,
+ bson_error_t *error)
+{
+ return _mongoc_collection_update_or_replace (collection,
+ selector,
+ replacement,
+ opts,
+ reply,
+ error,
+ false /* is_multi */,
+ false /* is_update */);
+}
+
/*
*--------------------------------------------------------------------------
*
* mongoc_collection_save --
*
* Save @document to @collection.
*
* If the document has an _id field, it will be updated. Otherwise,
* the document will be inserted into the collection.
*
* Returns:
* true if successful; otherwise false and @error is set.
*
* Side effects:
* @error is set upon failure if non-NULL.
*
*--------------------------------------------------------------------------
*/
bool
mongoc_collection_save (mongoc_collection_t *collection,
const bson_t *document,
const mongoc_write_concern_t *write_concern,
bson_error_t *error)
{
bson_iter_t iter;
bool ret;
bson_t selector;
BSON_ASSERT (collection);
BSON_ASSERT (document);
+ BEGIN_IGNORE_DEPRECATIONS
if (!bson_iter_init_find (&iter, document, "_id")) {
return mongoc_collection_insert (
collection, MONGOC_INSERT_NONE, document, write_concern, error);
}
bson_init (&selector);
if (!bson_append_iter (&selector, NULL, 0, &iter)) {
bson_set_error (error,
MONGOC_ERROR_COMMAND,
MONGOC_ERROR_COMMAND_INVALID_ARG,
"Failed to append bson to create update.");
bson_destroy (&selector);
return false;
}
/* this document will be inserted, validate same as for inserts */
if (!_mongoc_validate_new_document (document, error)) {
return false;
}
ret = mongoc_collection_update (collection,
MONGOC_UPDATE_UPSERT |
MONGOC_UPDATE_NO_VALIDATE,
&selector,
document,
write_concern,
error);
+ END_IGNORE_DEPRECATIONS
bson_destroy (&selector);
return ret;
}
/*
*--------------------------------------------------------------------------
*
* mongoc_collection_remove --
*
* Delete one or more items from a collection. If you want to
* limit to a single delete, provided MONGOC_REMOVE_SINGLE_REMOVE
* for @flags.
*
+ * Superseded by mongoc_collection_delete_one/many.
+ *
* Parameters:
* @collection: A mongoc_collection_t.
* @flags: the delete flags or 0.
* @selector: A selector of documents to delete.
* @write_concern: A write concern or NULL. If NULL, the default
* write concern for the collection will be used.
* @error: A location for an error or NULL.
*
* Returns:
* true if successful; otherwise false and error is set.
*
* If the write concern does not dictate checking the result, this
* function may return true even if it failed.
*
* Side effects:
* @collection->gle is setup, depending on write_concern->w value.
* @error is setup upon failure.
*
*--------------------------------------------------------------------------
*/
bool
mongoc_collection_remove (mongoc_collection_t *collection,
mongoc_remove_flags_t flags,
const bson_t *selector,
const mongoc_write_concern_t *write_concern,
bson_error_t *error)
{
mongoc_bulk_write_flags_t write_flags = MONGOC_BULK_WRITE_FLAGS_INIT;
mongoc_write_command_t command;
mongoc_write_result_t result;
bson_t opts;
bool ret;
ENTRY;
BSON_ASSERT (collection);
BSON_ASSERT (selector);
bson_clear (&collection->gle);
if (!write_concern) {
write_concern = collection->write_concern;
}
bson_init (&opts);
BSON_APPEND_INT32 (
&opts, "limit", flags & MONGOC_REMOVE_SINGLE_REMOVE ? 1 : 0);
_mongoc_write_result_init (&result);
++collection->client->cluster.operation_id;
_mongoc_write_command_init_delete (&command,
selector,
+ NULL,
&opts,
write_flags,
collection->client->cluster.operation_id);
bson_destroy (&opts);
_mongoc_collection_write_command_execute (
- &command, collection, write_concern, &result);
+ &command, collection, write_concern, NULL, &result);
collection->gle = bson_new ();
- ret = _mongoc_write_result_complete (&result,
- collection->client->error_api_version,
- write_concern,
- 0 /* no error domain override */,
- collection->gle,
- error);
+ ret = MONGOC_WRITE_RESULT_COMPLETE (&result,
+ collection->client->error_api_version,
+ write_concern,
+ 0 /* no error domain override */,
+ collection->gle,
+ error);
_mongoc_write_result_destroy (&result);
_mongoc_write_command_destroy (&command);
RETURN (ret);
}
bool
mongoc_collection_delete (mongoc_collection_t *collection,
mongoc_delete_flags_t flags,
const bson_t *selector,
const mongoc_write_concern_t *write_concern,
bson_error_t *error)
{
return mongoc_collection_remove (collection,
(mongoc_remove_flags_t) flags,
selector,
write_concern,
error);
}
+static bool
+_mongoc_delete_one_or_many (mongoc_collection_t *collection,
+ const bson_t *selector,
+ const bson_t *opts,
+ bson_t *reply,
+ bson_error_t *error,
+ bool is_multi)
+{
+ mongoc_write_opts_parsed_t parsed;
+ mongoc_write_command_t command;
+ mongoc_write_result_t result;
+ bson_t limit_opt = BSON_INITIALIZER;
+ bool ret;
+
+ ENTRY;
+
+ BSON_ASSERT (collection);
+ BSON_ASSERT (selector);
+
+ _mongoc_bson_init_if_set (reply);
+
+ if (!_mongoc_write_opts_parse (opts, collection, &parsed, error)) {
+ _mongoc_write_opts_cleanup (&parsed);
+ return false;
+ }
+
+ /* limit of 0 or 1 is required for each delete operation */
+ bson_append_int32 (&limit_opt, "limit", 5, is_multi ? 0 : 1);
+
+ _mongoc_write_result_init (&result);
+
+ _mongoc_write_command_init_delete (
+ &command,
+ selector,
+ &parsed.copied_opts,
+ &limit_opt,
+ parsed.write_flags,
+ ++collection->client->cluster.operation_id);
+
+ _mongoc_collection_write_command_execute (&command,
+ collection,
+ parsed.write_concern,
+ parsed.client_session,
+ &result);
+
+ /* set field described in CRUD spec for the DeleteResult */
+ ret = MONGOC_WRITE_RESULT_COMPLETE (&result,
+ collection->client->error_api_version,
+ parsed.write_concern,
+ /* no error domain override */
+ (mongoc_error_domain_t) 0,
+ reply,
+ error,
+ "deletedCount");
+
+ _mongoc_write_result_destroy (&result);
+ _mongoc_write_command_destroy (&command);
+ _mongoc_write_opts_cleanup (&parsed);
+ bson_destroy (&limit_opt);
+
+ RETURN (ret);
+}
+
+
+bool
+mongoc_collection_delete_one (mongoc_collection_t *collection,
+ const bson_t *selector,
+ const bson_t *opts,
+ bson_t *reply,
+ bson_error_t *error)
+{
+ return _mongoc_delete_one_or_many (
+ collection, selector, opts, reply, error, false /* is_multi */);
+}
+
+
+bool
+mongoc_collection_delete_many (mongoc_collection_t *collection,
+ const bson_t *selector,
+ const bson_t *opts,
+ bson_t *reply,
+ bson_error_t *error)
+{
+ return _mongoc_delete_one_or_many (
+ collection, selector, opts, reply, error, true /* is_multi */);
+}
+
+
/*
*--------------------------------------------------------------------------
*
* mongoc_collection_get_read_prefs --
*
* Fetch the default read preferences for the collection.
*
* Returns:
* A mongoc_read_prefs_t that should not be modified or freed.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
const mongoc_read_prefs_t *
mongoc_collection_get_read_prefs (const mongoc_collection_t *collection)
{
BSON_ASSERT (collection);
return collection->read_prefs;
}
/*
*--------------------------------------------------------------------------
*
* mongoc_collection_set_read_prefs --
*
* Sets the default read preferences for the collection instance.
*
* Returns:
* None.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
void
mongoc_collection_set_read_prefs (mongoc_collection_t *collection,
const mongoc_read_prefs_t *read_prefs)
{
BSON_ASSERT (collection);
if (collection->read_prefs) {
mongoc_read_prefs_destroy (collection->read_prefs);
collection->read_prefs = NULL;
}
if (read_prefs) {
collection->read_prefs = mongoc_read_prefs_copy (read_prefs);
}
}
/*
*--------------------------------------------------------------------------
*
* mongoc_collection_get_read_concern --
*
* Fetches the default read concern for the collection instance.
*
* Returns:
* A mongoc_read_concern_t that should not be modified or freed.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
const mongoc_read_concern_t *
mongoc_collection_get_read_concern (const mongoc_collection_t *collection)
{
BSON_ASSERT (collection);
return collection->read_concern;
}
/*
*--------------------------------------------------------------------------
*
* mongoc_collection_set_read_concern --
*
* Sets the default read concern for the collection instance.
*
* Returns:
* None.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
void
mongoc_collection_set_read_concern (mongoc_collection_t *collection,
const mongoc_read_concern_t *read_concern)
{
BSON_ASSERT (collection);
if (collection->read_concern) {
mongoc_read_concern_destroy (collection->read_concern);
collection->read_concern = NULL;
}
if (read_concern) {
collection->read_concern = mongoc_read_concern_copy (read_concern);
}
}
/*
*--------------------------------------------------------------------------
*
* mongoc_collection_get_write_concern --
*
* Fetches the default write concern for the collection instance.
*
* Returns:
* A mongoc_write_concern_t that should not be modified or freed.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
const mongoc_write_concern_t *
mongoc_collection_get_write_concern (const mongoc_collection_t *collection)
{
BSON_ASSERT (collection);
return collection->write_concern;
}
/*
*--------------------------------------------------------------------------
*
* mongoc_collection_set_write_concern --
*
* Sets the default write concern for the collection instance.
*
* Returns:
* None.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
void
mongoc_collection_set_write_concern (
mongoc_collection_t *collection, const mongoc_write_concern_t *write_concern)
{
BSON_ASSERT (collection);
if (collection->write_concern) {
mongoc_write_concern_destroy (collection->write_concern);
collection->write_concern = NULL;
}
if (write_concern) {
collection->write_concern = mongoc_write_concern_copy (write_concern);
}
}
/*
*--------------------------------------------------------------------------
*
* mongoc_collection_get_name --
*
* Returns the name of the collection, excluding the database name.
*
* Returns:
* A string which should not be modified or freed.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
const char *
mongoc_collection_get_name (mongoc_collection_t *collection)
{
BSON_ASSERT (collection);
return collection->collection;
}
/*
*--------------------------------------------------------------------------
*
* mongoc_collection_get_last_error --
*
* Returns a bulk result.
*
* Returns:
* NULL or a bson_t that should not be modified or freed. This value
* is not guaranteed to be persistent between calls into the
* mongoc_collection_t instance, and therefore must be copied if
* you would like to keep it around.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
const bson_t *
mongoc_collection_get_last_error (
const mongoc_collection_t *collection) /* IN */
{
BSON_ASSERT (collection);
return collection->gle;
}
/*
*--------------------------------------------------------------------------
*
* mongoc_collection_validate --
*
* Helper to call the validate command on the MongoDB server to
* validate the collection.
*
* Options may be additional options, or NULL.
* Currently supported options are:
*
* "full": Boolean
*
* If full is true, then perform a more resource intensive
* validation.
*
* The result is stored in reply.
*
* Returns:
* true if successful; otherwise false and @error is set.
*
* Side effects:
* @reply is set if successful.
* @error may be set.
*
*--------------------------------------------------------------------------
*/
bool
mongoc_collection_validate (mongoc_collection_t *collection, /* IN */
const bson_t *options, /* IN */
bson_t *reply, /* OUT */
bson_error_t *error) /* IN */
{
bson_iter_t iter;
bson_t cmd = BSON_INITIALIZER;
bool ret = false;
bool reply_initialized = false;
BSON_ASSERT (collection);
if (options && bson_iter_init_find (&iter, options, "full") &&
!BSON_ITER_HOLDS_BOOL (&iter)) {
bson_set_error (error,
MONGOC_ERROR_BSON,
MONGOC_ERROR_BSON_INVALID,
"'full' must be a boolean value.");
goto cleanup;
}
bson_append_utf8 (
&cmd, "validate", 8, collection->collection, collection->collectionlen);
if (options) {
bson_concat (&cmd, options);
}
ret =
mongoc_collection_command_simple (collection, &cmd, NULL, reply, error);
reply_initialized = true;
cleanup:
bson_destroy (&cmd);
if (reply && !reply_initialized) {
bson_init (reply);
}
return ret;
}
/*
*--------------------------------------------------------------------------
*
* mongoc_collection_rename --
*
* Rename the collection to @new_name.
*
* If @new_db is NULL, the same db will be used.
*
* If @drop_target_before_rename is true, then a collection named
* @new_name will be dropped before renaming @collection to
* @new_name.
*
* Returns:
* true on success; false on failure and @error is set.
*
* Side effects:
* @error is set on failure.
*
*--------------------------------------------------------------------------
*/
bool
mongoc_collection_rename (mongoc_collection_t *collection,
const char *new_db,
const char *new_name,
bool drop_target_before_rename,
bson_error_t *error)
{
return mongoc_collection_rename_with_opts (
collection, new_db, new_name, drop_target_before_rename, NULL, error);
}
bool
mongoc_collection_rename_with_opts (mongoc_collection_t *collection,
const char *new_db,
const char *new_name,
bool drop_target_before_rename,
const bson_t *opts,
bson_error_t *error)
{
bson_t cmd = BSON_INITIALIZER;
char newns[MONGOC_NAMESPACE_MAX + 1];
bool ret;
BSON_ASSERT (collection);
BSON_ASSERT (new_name);
if (strchr (new_name, '$')) {
bson_set_error (error,
MONGOC_ERROR_NAMESPACE,
MONGOC_ERROR_NAMESPACE_INVALID,
"\"%s\" is an invalid collection name.",
new_name);
return false;
}
bson_snprintf (
newns, sizeof newns, "%s.%s", new_db ? new_db : collection->db, new_name);
BSON_APPEND_UTF8 (&cmd, "renameCollection", collection->ns);
BSON_APPEND_UTF8 (&cmd, "to", newns);
if (drop_target_before_rename) {
BSON_APPEND_BOOL (&cmd, "dropTarget", true);
}
ret = _mongoc_client_command_with_opts (collection->client,
"admin",
&cmd,
MONGOC_CMD_WRITE,
opts,
MONGOC_QUERY_NONE,
collection->read_prefs,
collection->read_concern,
collection->write_concern,
NULL, /* reply */
error);
if (ret) {
if (new_db) {
bson_snprintf (collection->db, sizeof collection->db, "%s", new_db);
}
bson_snprintf (
collection->collection, sizeof collection->collection, "%s", new_name);
collection->collectionlen = (int) strlen (collection->collection);
bson_snprintf (collection->ns,
sizeof collection->ns,
"%s.%s",
collection->db,
new_name);
collection->nslen = (int) strlen (collection->ns);
}
bson_destroy (&cmd);
return ret;
}
/*
*--------------------------------------------------------------------------
*
* mongoc_collection_stats --
*
* Fetches statistics about the collection.
*
* The result is stored in @stats, which should NOT be an initialized
* bson_t or a leak will occur.
*
* @stats, @options, and @error are optional.
*
* Returns:
* true on success and @stats is set.
* false on failure and @error is set.
*
* Side effects:
* @stats and @error.
*
*--------------------------------------------------------------------------
*/
bool
mongoc_collection_stats (mongoc_collection_t *collection,
const bson_t *options,
bson_t *stats,
bson_error_t *error)
{
bson_iter_t iter;
bson_t cmd = BSON_INITIALIZER;
bool ret;
BSON_ASSERT (collection);
if (options && bson_iter_init_find (&iter, options, "scale") &&
!BSON_ITER_HOLDS_INT32 (&iter)) {
bson_set_error (error,
MONGOC_ERROR_BSON,
MONGOC_ERROR_BSON_INVALID,
"'scale' must be an int32 value.");
return false;
}
BSON_APPEND_UTF8 (&cmd, "collStats", collection->collection);
if (options) {
bson_concat (&cmd, options);
}
/* Server Selection Spec: "may-use-secondary" commands SHOULD take a read
* preference argument and otherwise MUST use the default read preference
* from client, database or collection configuration. */
ret = mongoc_collection_command_simple (
collection, &cmd, collection->read_prefs, stats, error);
bson_destroy (&cmd);
return ret;
}
mongoc_bulk_operation_t *
mongoc_collection_create_bulk_operation (
mongoc_collection_t *collection,
bool ordered,
const mongoc_write_concern_t *write_concern)
+{
+ bson_t opts = BSON_INITIALIZER;
+ mongoc_bulk_operation_t *bulk;
+ bool wc_ok = true;
+
+ bson_append_bool (&opts, "ordered", 7, ordered);
+ if (write_concern) {
+ wc_ok = mongoc_write_concern_append (
+ (mongoc_write_concern_t *) write_concern, &opts);
+ }
+
+ bulk = mongoc_collection_create_bulk_operation_with_opts (collection, &opts);
+
+ bson_destroy (&opts);
+
+ if (!wc_ok) {
+ bson_set_error (&bulk->result.error,
+ MONGOC_ERROR_COMMAND,
+ MONGOC_ERROR_COMMAND_INVALID_ARG,
+ "invalid writeConcern");
+ }
+
+ return bulk;
+}
+
+
+mongoc_bulk_operation_t *
+mongoc_collection_create_bulk_operation_with_opts (
+ mongoc_collection_t *collection, const bson_t *opts)
{
mongoc_bulk_write_flags_t write_flags = MONGOC_BULK_WRITE_FLAGS_INIT;
+ bson_iter_t iter;
+ mongoc_write_concern_t *wc = NULL;
+ mongoc_bulk_operation_t *bulk;
+ bson_error_t wc_invalid = {0};
+
BSON_ASSERT (collection);
- if (!write_concern) {
- write_concern = collection->write_concern;
+ if (opts && bson_iter_init_find (&iter, opts, "writeConcern")) {
+ wc = _mongoc_write_concern_new_from_iter (&iter, &wc_invalid);
}
- write_flags.ordered = ordered;
+ write_flags.ordered = _mongoc_lookup_bool (opts, "ordered", true);
- return _mongoc_bulk_operation_new (collection->client,
+ bulk = _mongoc_bulk_operation_new (collection->client,
collection->db,
collection->collection,
write_flags,
- write_concern);
+ wc ? wc : collection->write_concern);
+
+ mongoc_write_concern_destroy (wc); /* NULL is ok */
+
+ if (opts && bson_iter_init_find (&iter, opts, "sessionId")) {
+ _mongoc_client_session_from_iter (
+ collection->client, &iter, &bulk->session, &bulk->result.error);
+ }
+
+ if (wc_invalid.domain) {
+ /* _mongoc_write_concern_new_from_iter failed, above */
+ memcpy (&bulk->result.error, &wc_invalid, sizeof (bson_error_t));
+ }
+
+ return bulk;
}
/*
*--------------------------------------------------------------------------
*
* mongoc_collection_find_and_modify_with_opts --
*
* Find a document in @collection matching @query, applying @opts.
*
* If @reply is not NULL, then the result document will be placed
* in reply and should be released with bson_destroy().
*
* See http://docs.mongodb.org/manual/reference/command/findAndModify/
* for more information.
*
* Returns:
* true on success; false on failure.
*
* Side effects:
* reply is initialized.
* error is set if false is returned.
*
*--------------------------------------------------------------------------
*/
bool
mongoc_collection_find_and_modify_with_opts (
mongoc_collection_t *collection,
const bson_t *query,
const mongoc_find_and_modify_opts_t *opts,
bson_t *reply,
bson_error_t *error)
{
mongoc_cluster_t *cluster;
mongoc_cmd_parts_t parts;
mongoc_server_stream_t *server_stream;
+ bool is_retryable;
bson_iter_t iter;
bson_iter_t inner;
const char *name;
bson_t reply_local;
bson_t *reply_ptr;
bool ret;
bson_t command = BSON_INITIALIZER;
+ mongoc_server_stream_t *retry_server_stream = NULL;
ENTRY;
BSON_ASSERT (collection);
BSON_ASSERT (query);
reply_ptr = reply ? reply : &reply_local;
bson_init (reply_ptr);
cluster = &collection->client->cluster;
server_stream = mongoc_cluster_stream_for_writes (cluster, error);
if (!server_stream) {
bson_destroy (&command);
RETURN (false);
}
name = mongoc_collection_get_name (collection);
BSON_APPEND_UTF8 (&command, "findAndModify", name);
BSON_APPEND_DOCUMENT (&command, "query", query);
if (opts->sort) {
BSON_APPEND_DOCUMENT (&command, "sort", opts->sort);
}
if (opts->update) {
BSON_APPEND_DOCUMENT (&command, "update", opts->update);
}
if (opts->fields) {
BSON_APPEND_DOCUMENT (&command, "fields", opts->fields);
}
if (opts->flags & MONGOC_FIND_AND_MODIFY_REMOVE) {
BSON_APPEND_BOOL (&command, "remove", true);
}
if (opts->flags & MONGOC_FIND_AND_MODIFY_UPSERT) {
BSON_APPEND_BOOL (&command, "upsert", true);
}
if (opts->flags & MONGOC_FIND_AND_MODIFY_RETURN_NEW) {
BSON_APPEND_BOOL (&command, "new", true);
}
if (opts->bypass_document_validation !=
MONGOC_BYPASS_DOCUMENT_VALIDATION_DEFAULT) {
BSON_APPEND_BOOL (&command,
"bypassDocumentValidation",
!!opts->bypass_document_validation);
}
if (opts->max_time_ms > 0) {
BSON_APPEND_INT32 (&command, "maxTimeMS", opts->max_time_ms);
}
if (!bson_has_field (&opts->extra, "writeConcern")) {
if (server_stream->sd->max_wire_version >=
WIRE_VERSION_FAM_WRITE_CONCERN) {
if (!mongoc_write_concern_is_valid (collection->write_concern)) {
bson_set_error (error,
MONGOC_ERROR_COMMAND,
MONGOC_ERROR_COMMAND_INVALID_ARG,
"The write concern is invalid.");
bson_destroy (&command);
mongoc_server_stream_cleanup (server_stream);
RETURN (false);
}
if (mongoc_write_concern_is_acknowledged (collection->write_concern)) {
- _BSON_APPEND_WRITE_CONCERN (&command, collection->write_concern);
+ BSON_APPEND_DOCUMENT (
+ &command,
+ "writeConcern",
+ _mongoc_write_concern_get_bson (collection->write_concern));
}
}
}
- mongoc_cmd_parts_init (&parts, collection->db, MONGOC_QUERY_NONE, &command);
+ mongoc_cmd_parts_init (
+ &parts, collection->client, collection->db, MONGOC_QUERY_NONE, &command);
+ parts.is_read_command = true;
parts.is_write_command = true;
if (bson_iter_init (&iter, &opts->extra)) {
bool ok = mongoc_cmd_parts_append_opts (
&parts, &iter, server_stream->sd->max_wire_version, error);
if (!ok) {
bson_destroy (&command);
mongoc_server_stream_cleanup (server_stream);
RETURN (false);
}
}
parts.assembled.operation_id = ++cluster->operation_id;
+ if (!mongoc_cmd_parts_assemble (&parts, server_stream, error)) {
+ bson_destroy (&command);
+ mongoc_cmd_parts_cleanup (&parts);
+ mongoc_server_stream_cleanup (server_stream);
+ RETURN (false);
+ }
+
+ is_retryable = parts.is_retryable_write;
+
+ /* increment the transaction number for the first attempt of each retryable
+ * write command */
+ if (is_retryable) {
+ bson_iter_t txn_number_iter;
+ BSON_ASSERT (bson_iter_init_find (
+ &txn_number_iter, parts.assembled.command, "txnNumber"));
+ bson_iter_overwrite_int64 (
+ &txn_number_iter,
+ ++parts.assembled.session->server_session->txn_number);
+ }
+retry:
ret = mongoc_cluster_run_command_monitored (
- cluster, &parts, server_stream, reply_ptr, error);
+ cluster, &parts.assembled, reply_ptr, error);
+
+ /* If a retryable error is encountered and the write is retryable, select
+ * a new writable stream and retry. If server selection fails or the selected
+ * server does not support retryable writes, fall through and allow the
+ * original error to be reported. */
+ if (!ret && is_retryable &&
+ (error->domain == MONGOC_ERROR_STREAM ||
+ mongoc_cluster_is_not_master_error (error))) {
+ bson_error_t ignored_error;
+
+ /* each write command may be retried at most once */
+ is_retryable = false;
+
+ if (retry_server_stream) {
+ mongoc_server_stream_cleanup (retry_server_stream);
+ }
+
+ retry_server_stream =
+ mongoc_cluster_stream_for_writes (cluster, &ignored_error);
+
+ if (retry_server_stream && retry_server_stream->sd->max_wire_version >=
+ WIRE_VERSION_RETRY_WRITES) {
+ parts.assembled.server_stream = retry_server_stream;
+ GOTO (retry);
+ }
+ }
if (bson_iter_init_find (&iter, reply_ptr, "writeConcernError") &&
BSON_ITER_HOLDS_DOCUMENT (&iter)) {
const char *errmsg = NULL;
int32_t code = 0;
bson_iter_recurse (&iter, &inner);
while (bson_iter_next (&inner)) {
if (BSON_ITER_IS_KEY (&inner, "code")) {
code = bson_iter_int32 (&inner);
} else if (BSON_ITER_IS_KEY (&inner, "errmsg")) {
errmsg = bson_iter_utf8 (&inner, NULL);
}
}
bson_set_error (error,
MONGOC_ERROR_WRITE_CONCERN,
code,
"Write Concern error: %s",
errmsg);
ret = false;
}
if (reply_ptr == &reply_local) {
bson_destroy (reply_ptr);
}
mongoc_cmd_parts_cleanup (&parts);
bson_destroy (&command);
mongoc_server_stream_cleanup (server_stream);
+ if (retry_server_stream) {
+ mongoc_server_stream_cleanup (retry_server_stream);
+ }
+
RETURN (ret);
}
/*
*--------------------------------------------------------------------------
*
* mongoc_collection_find_and_modify --
*
* Find a document in @collection matching @query and update it with
* the update document @update.
*
* If @reply is not NULL, then the result document will be placed
* in reply and should be released with bson_destroy().
*
* If @remove is true, then the matching documents will be removed.
*
* If @fields is not NULL, it will be used to select the desired
* resulting fields.
*
* If @_new is true, then the new version of the document is returned
* instead of the old document.
*
* See http://docs.mongodb.org/manual/reference/command/findAndModify/
* for more information.
*
* Returns:
* true on success; false on failure.
*
* Side effects:
* reply is initialized.
* error is set if false is returned.
*
*--------------------------------------------------------------------------
*/
bool
mongoc_collection_find_and_modify (mongoc_collection_t *collection,
const bson_t *query,
const bson_t *sort,
const bson_t *update,
const bson_t *fields,
bool _remove,
bool upsert,
bool _new,
bson_t *reply,
bson_error_t *error)
{
mongoc_find_and_modify_opts_t *opts;
int flags = 0;
bool ret;
ENTRY;
BSON_ASSERT (collection);
BSON_ASSERT (query);
BSON_ASSERT (update || _remove);
if (_remove) {
flags |= MONGOC_FIND_AND_MODIFY_REMOVE;
}
if (upsert) {
flags |= MONGOC_FIND_AND_MODIFY_UPSERT;
}
if (_new) {
flags |= MONGOC_FIND_AND_MODIFY_RETURN_NEW;
}
opts = mongoc_find_and_modify_opts_new ();
mongoc_find_and_modify_opts_set_sort (opts, sort);
mongoc_find_and_modify_opts_set_update (opts, update);
mongoc_find_and_modify_opts_set_fields (opts, fields);
mongoc_find_and_modify_opts_set_flags (opts, flags);
ret = mongoc_collection_find_and_modify_with_opts (
collection, query, opts, reply, error);
mongoc_find_and_modify_opts_destroy (opts);
return ret;
}
+
+mongoc_change_stream_t *
+mongoc_collection_watch (const mongoc_collection_t *coll,
+ const bson_t *pipeline,
+ const bson_t *opts)
+{
+ return _mongoc_change_stream_new (coll, pipeline, opts);
+}
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-collection.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-collection.h
similarity index 76%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-collection.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-collection.h
index aef11f34..9faf0f7c 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-collection.h
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-collection.h
@@ -1,275 +1,341 @@
/*
* Copyright 2013-2014 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MONGOC_COLLECTION_H
#define MONGOC_COLLECTION_H
#if !defined(MONGOC_INSIDE) && !defined(MONGOC_COMPILATION)
#error "Only <mongoc.h> can be included directly."
#endif
#include <bson.h>
+#include "mongoc-change-stream.h"
#include "mongoc-macros.h"
#include "mongoc-bulk-operation.h"
#include "mongoc-flags.h"
#include "mongoc-cursor.h"
#include "mongoc-index.h"
#include "mongoc-read-prefs.h"
#include "mongoc-read-concern.h"
#include "mongoc-write-concern.h"
#include "mongoc-find-and-modify.h"
-
BSON_BEGIN_DECLS
typedef struct _mongoc_collection_t mongoc_collection_t;
MONGOC_EXPORT (mongoc_cursor_t *)
mongoc_collection_aggregate (mongoc_collection_t *collection,
mongoc_query_flags_t flags,
const bson_t *pipeline,
const bson_t *opts,
const mongoc_read_prefs_t *read_prefs)
BSON_GNUC_WARN_UNUSED_RESULT;
MONGOC_EXPORT (void)
mongoc_collection_destroy (mongoc_collection_t *collection);
MONGOC_EXPORT (mongoc_collection_t *)
mongoc_collection_copy (mongoc_collection_t *collection);
MONGOC_EXPORT (mongoc_cursor_t *)
mongoc_collection_command (mongoc_collection_t *collection,
mongoc_query_flags_t flags,
uint32_t skip,
uint32_t limit,
uint32_t batch_size,
const bson_t *command,
const bson_t *fields,
const mongoc_read_prefs_t *read_prefs)
BSON_GNUC_WARN_UNUSED_RESULT;
MONGOC_EXPORT (bool)
mongoc_collection_read_command_with_opts (mongoc_collection_t *collection,
const bson_t *command,
const mongoc_read_prefs_t *read_prefs,
const bson_t *opts,
bson_t *reply,
bson_error_t *error);
MONGOC_EXPORT (bool)
mongoc_collection_write_command_with_opts (mongoc_collection_t *collection,
const bson_t *command,
const bson_t *opts,
bson_t *reply,
bson_error_t *error);
MONGOC_EXPORT (bool)
mongoc_collection_read_write_command_with_opts (
mongoc_collection_t *collection,
const bson_t *command,
const mongoc_read_prefs_t *read_prefs /* IGNORED */,
const bson_t *opts,
bson_t *reply,
bson_error_t *error);
MONGOC_EXPORT (bool)
+mongoc_collection_command_with_opts (mongoc_collection_t *collection,
+ const bson_t *command,
+ const mongoc_read_prefs_t *read_prefs,
+ const bson_t *opts,
+ bson_t *reply,
+ bson_error_t *error);
+MONGOC_EXPORT (bool)
mongoc_collection_command_simple (mongoc_collection_t *collection,
const bson_t *command,
const mongoc_read_prefs_t *read_prefs,
bson_t *reply,
bson_error_t *error);
MONGOC_EXPORT (int64_t)
mongoc_collection_count (mongoc_collection_t *collection,
mongoc_query_flags_t flags,
const bson_t *query,
int64_t skip,
int64_t limit,
const mongoc_read_prefs_t *read_prefs,
bson_error_t *error);
MONGOC_EXPORT (int64_t)
mongoc_collection_count_with_opts (mongoc_collection_t *collection,
mongoc_query_flags_t flags,
const bson_t *query,
int64_t skip,
int64_t limit,
const bson_t *opts,
const mongoc_read_prefs_t *read_prefs,
bson_error_t *error);
MONGOC_EXPORT (bool)
mongoc_collection_drop (mongoc_collection_t *collection, bson_error_t *error);
MONGOC_EXPORT (bool)
mongoc_collection_drop_with_opts (mongoc_collection_t *collection,
const bson_t *opts,
bson_error_t *error);
MONGOC_EXPORT (bool)
mongoc_collection_drop_index (mongoc_collection_t *collection,
const char *index_name,
bson_error_t *error);
MONGOC_EXPORT (bool)
mongoc_collection_drop_index_with_opts (mongoc_collection_t *collection,
const char *index_name,
const bson_t *opts,
bson_error_t *error);
MONGOC_EXPORT (bool)
mongoc_collection_create_index (mongoc_collection_t *collection,
const bson_t *keys,
const mongoc_index_opt_t *opt,
bson_error_t *error) BSON_GNUC_DEPRECATED;
MONGOC_EXPORT (bool)
mongoc_collection_create_index_with_opts (mongoc_collection_t *collection,
const bson_t *keys,
const mongoc_index_opt_t *opt,
const bson_t *opts,
bson_t *reply,
bson_error_t *error)
BSON_GNUC_DEPRECATED;
MONGOC_EXPORT (bool)
mongoc_collection_ensure_index (mongoc_collection_t *collection,
const bson_t *keys,
const mongoc_index_opt_t *opt,
- bson_error_t *error)
- BSON_GNUC_DEPRECATED;
+ bson_error_t *error) BSON_GNUC_DEPRECATED;
MONGOC_EXPORT (mongoc_cursor_t *)
mongoc_collection_find_indexes (mongoc_collection_t *collection,
- bson_error_t *error);
+ bson_error_t *error)
+ BSON_GNUC_DEPRECATED_FOR (mongoc_collection_find_indexes_with_opts);
+MONGOC_EXPORT (mongoc_cursor_t *)
+mongoc_collection_find_indexes_with_opts (mongoc_collection_t *collection,
+ const bson_t *opts);
MONGOC_EXPORT (mongoc_cursor_t *)
mongoc_collection_find (mongoc_collection_t *collection,
mongoc_query_flags_t flags,
uint32_t skip,
uint32_t limit,
uint32_t batch_size,
const bson_t *query,
const bson_t *fields,
const mongoc_read_prefs_t *read_prefs)
BSON_GNUC_DEPRECATED_FOR (mongoc_collection_find_with_opts)
BSON_GNUC_WARN_UNUSED_RESULT;
MONGOC_EXPORT (mongoc_cursor_t *)
mongoc_collection_find_with_opts (mongoc_collection_t *collection,
const bson_t *filter,
const bson_t *opts,
const mongoc_read_prefs_t *read_prefs)
BSON_GNUC_WARN_UNUSED_RESULT;
MONGOC_EXPORT (bool)
mongoc_collection_insert (mongoc_collection_t *collection,
mongoc_insert_flags_t flags,
const bson_t *document,
const mongoc_write_concern_t *write_concern,
bson_error_t *error);
MONGOC_EXPORT (bool)
+mongoc_collection_insert_one (mongoc_collection_t *collection,
+ const bson_t *document,
+ const bson_t *opts,
+ bson_t *reply,
+ bson_error_t *error);
+MONGOC_EXPORT (bool)
+mongoc_collection_insert_many (mongoc_collection_t *collection,
+ const bson_t **documents,
+ size_t n_documents,
+ const bson_t *opts,
+ bson_t *reply,
+ bson_error_t *error);
+MONGOC_EXPORT (bool)
mongoc_collection_insert_bulk (mongoc_collection_t *collection,
mongoc_insert_flags_t flags,
const bson_t **documents,
uint32_t n_documents,
const mongoc_write_concern_t *write_concern,
bson_error_t *error)
- BSON_GNUC_DEPRECATED_FOR (mongoc_collection_create_bulk_operation);
+ BSON_GNUC_DEPRECATED_FOR (mongoc_collection_insert_many);
MONGOC_EXPORT (bool)
mongoc_collection_update (mongoc_collection_t *collection,
mongoc_update_flags_t flags,
const bson_t *selector,
const bson_t *update,
const mongoc_write_concern_t *write_concern,
bson_error_t *error);
MONGOC_EXPORT (bool)
+mongoc_collection_update_one (mongoc_collection_t *collection,
+ const bson_t *selector,
+ const bson_t *update,
+ const bson_t *opts,
+ bson_t *reply,
+ bson_error_t *error);
+MONGOC_EXPORT (bool)
+mongoc_collection_update_many (mongoc_collection_t *collection,
+ const bson_t *selector,
+ const bson_t *update,
+ const bson_t *opts,
+ bson_t *reply,
+ bson_error_t *error);
+bool
+mongoc_collection_replace_one (mongoc_collection_t *collection,
+ const bson_t *selector,
+ const bson_t *replacement,
+ const bson_t *opts,
+ bson_t *reply,
+ bson_error_t *error);
+MONGOC_EXPORT (bool)
mongoc_collection_delete (mongoc_collection_t *collection,
mongoc_delete_flags_t flags,
const bson_t *selector,
const mongoc_write_concern_t *write_concern,
bson_error_t *error)
- BSON_GNUC_DEPRECATED_FOR (mongoc_collection_remove);
+ BSON_GNUC_DEPRECATED_FOR (mongoc_collection_delete_one or
+ mongoc_collection_delete_many);
MONGOC_EXPORT (bool)
mongoc_collection_save (mongoc_collection_t *collection,
const bson_t *document,
const mongoc_write_concern_t *write_concern,
bson_error_t *error)
- BSON_GNUC_DEPRECATED_FOR (mongoc_collection_insert or
- mongoc_collection_update);
+ BSON_GNUC_DEPRECATED_FOR (mongoc_collection_insert_one or
+ mongoc_collection_replace_one);
MONGOC_EXPORT (bool)
mongoc_collection_remove (mongoc_collection_t *collection,
mongoc_remove_flags_t flags,
const bson_t *selector,
const mongoc_write_concern_t *write_concern,
bson_error_t *error);
MONGOC_EXPORT (bool)
+mongoc_collection_delete_one (mongoc_collection_t *collection,
+ const bson_t *selector,
+ const bson_t *opts,
+ bson_t *reply,
+ bson_error_t *error);
+MONGOC_EXPORT (bool)
+mongoc_collection_delete_many (mongoc_collection_t *collection,
+ const bson_t *selector,
+ const bson_t *opts,
+ bson_t *reply,
+ bson_error_t *error);
+MONGOC_EXPORT (bool)
mongoc_collection_rename (mongoc_collection_t *collection,
const char *new_db,
const char *new_name,
bool drop_target_before_rename,
bson_error_t *error);
MONGOC_EXPORT (bool)
mongoc_collection_rename_with_opts (mongoc_collection_t *collection,
const char *new_db,
const char *new_name,
bool drop_target_before_rename,
const bson_t *opts,
bson_error_t *error);
MONGOC_EXPORT (bool)
mongoc_collection_find_and_modify_with_opts (
mongoc_collection_t *collection,
const bson_t *query,
const mongoc_find_and_modify_opts_t *opts,
bson_t *reply,
bson_error_t *error);
MONGOC_EXPORT (bool)
mongoc_collection_find_and_modify (mongoc_collection_t *collection,
const bson_t *query,
const bson_t *sort,
const bson_t *update,
const bson_t *fields,
bool _remove,
bool upsert,
bool _new,
bson_t *reply,
bson_error_t *error);
MONGOC_EXPORT (bool)
mongoc_collection_stats (mongoc_collection_t *collection,
const bson_t *options,
bson_t *reply,
bson_error_t *error);
MONGOC_EXPORT (mongoc_bulk_operation_t *)
mongoc_collection_create_bulk_operation (
mongoc_collection_t *collection,
bool ordered,
- const mongoc_write_concern_t *write_concern) BSON_GNUC_WARN_UNUSED_RESULT;
+ const mongoc_write_concern_t *write_concern) BSON_GNUC_WARN_UNUSED_RESULT
+ BSON_GNUC_DEPRECATED_FOR (mongoc_collection_create_bulk_operation_with_opts);
+MONGOC_EXPORT (mongoc_bulk_operation_t *)
+mongoc_collection_create_bulk_operation_with_opts (
+ mongoc_collection_t *collection,
+ const bson_t *opts) BSON_GNUC_WARN_UNUSED_RESULT;
MONGOC_EXPORT (const mongoc_read_prefs_t *)
mongoc_collection_get_read_prefs (const mongoc_collection_t *collection);
MONGOC_EXPORT (void)
mongoc_collection_set_read_prefs (mongoc_collection_t *collection,
const mongoc_read_prefs_t *read_prefs);
MONGOC_EXPORT (const mongoc_read_concern_t *)
mongoc_collection_get_read_concern (const mongoc_collection_t *collection);
MONGOC_EXPORT (void)
mongoc_collection_set_read_concern (mongoc_collection_t *collection,
const mongoc_read_concern_t *read_concern);
MONGOC_EXPORT (const mongoc_write_concern_t *)
mongoc_collection_get_write_concern (const mongoc_collection_t *collection);
MONGOC_EXPORT (void)
mongoc_collection_set_write_concern (
mongoc_collection_t *collection,
const mongoc_write_concern_t *write_concern);
MONGOC_EXPORT (const char *)
mongoc_collection_get_name (mongoc_collection_t *collection);
MONGOC_EXPORT (const bson_t *)
-mongoc_collection_get_last_error (const mongoc_collection_t *collection);
+mongoc_collection_get_last_error (const mongoc_collection_t *collection)
+ BSON_GNUC_DEPRECATED;
MONGOC_EXPORT (char *)
mongoc_collection_keys_to_index_string (const bson_t *keys);
MONGOC_EXPORT (bool)
mongoc_collection_validate (mongoc_collection_t *collection,
const bson_t *options,
bson_t *reply,
bson_error_t *error);
-
+MONGOC_EXPORT (mongoc_change_stream_t *)
+mongoc_collection_watch (const mongoc_collection_t *coll,
+ const bson_t *pipeline,
+ const bson_t *opts);
BSON_END_DECLS
#endif /* MONGOC_COLLECTION_H */
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-compression-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-compression-private.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-compression-private.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-compression-private.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-compression.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-compression.c
similarity index 84%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-compression.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-compression.c
index a892636a..ae08c5ec 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-compression.c
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-compression.c
@@ -1,208 +1,225 @@
/*
* Copyright 2017 MongoDB Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "mongoc-config.h"
#include "mongoc-compression-private.h"
#include "mongoc-trace-private.h"
#include "mongoc-util-private.h"
#ifdef MONGOC_ENABLE_COMPRESSION
#ifdef MONGOC_ENABLE_COMPRESSION_ZLIB
#include <zlib.h>
#endif
#ifdef MONGOC_ENABLE_COMPRESSION_SNAPPY
#include <snappy-c.h>
#endif
#endif
size_t
-mongoc_compressor_max_compressed_length (int32_t compressor_id, size_t size)
+mongoc_compressor_max_compressed_length (int32_t compressor_id, size_t len)
{
+ TRACE ("Getting compression length for '%s' (%d)",
+ mongoc_compressor_id_to_name (compressor_id),
+ compressor_id);
switch (compressor_id) {
#ifdef MONGOC_ENABLE_COMPRESSION_SNAPPY
case MONGOC_COMPRESSOR_SNAPPY_ID:
- return snappy_max_compressed_length (size);
+ return snappy_max_compressed_length (len);
break;
#endif
#ifdef MONGOC_ENABLE_COMPRESSION_ZLIB
case MONGOC_COMPRESSOR_ZLIB_ID:
- return compressBound (size);
+ return compressBound (len);
break;
#endif
case MONGOC_COMPRESSOR_NOOP_ID:
- return size;
+ return len;
break;
default:
return 0;
}
}
bool
mongoc_compressor_supported (const char *compressor)
{
#ifdef MONGOC_ENABLE_COMPRESSION_SNAPPY
if (!strcasecmp (compressor, MONGOC_COMPRESSOR_SNAPPY_STR)) {
return true;
}
#endif
#ifdef MONGOC_ENABLE_COMPRESSION_ZLIB
if (!strcasecmp (compressor, MONGOC_COMPRESSOR_ZLIB_STR)) {
return true;
}
#endif
if (!strcasecmp (compressor, MONGOC_COMPRESSOR_NOOP_STR)) {
return true;
}
return false;
}
const char *
mongoc_compressor_id_to_name (int32_t compressor_id)
{
switch (compressor_id) {
case MONGOC_COMPRESSOR_SNAPPY_ID:
return MONGOC_COMPRESSOR_SNAPPY_STR;
case MONGOC_COMPRESSOR_ZLIB_ID:
return MONGOC_COMPRESSOR_ZLIB_STR;
case MONGOC_COMPRESSOR_NOOP_ID:
return MONGOC_COMPRESSOR_NOOP_STR;
default:
return "unknown";
}
}
int
mongoc_compressor_name_to_id (const char *compressor)
{
#ifdef MONGOC_ENABLE_COMPRESSION_SNAPPY
if (strcasecmp (MONGOC_COMPRESSOR_SNAPPY_STR, compressor) == 0) {
return MONGOC_COMPRESSOR_SNAPPY_ID;
}
#endif
#ifdef MONGOC_ENABLE_COMPRESSION_ZLIB
if (strcasecmp (MONGOC_COMPRESSOR_ZLIB_STR, compressor) == 0) {
return MONGOC_COMPRESSOR_ZLIB_ID;
}
#endif
if (strcasecmp (MONGOC_COMPRESSOR_NOOP_STR, compressor) == 0) {
return MONGOC_COMPRESSOR_NOOP_ID;
}
return -1;
}
bool
mongoc_uncompress (int32_t compressor_id,
const uint8_t *compressed,
size_t compressed_len,
uint8_t *uncompressed,
- size_t *uncompressed_size)
+ size_t *uncompressed_len)
{
+ TRACE ("Uncompressing with '%s' (%d)",
+ mongoc_compressor_id_to_name (compressor_id),
+ compressor_id);
switch (compressor_id) {
case MONGOC_COMPRESSOR_SNAPPY_ID: {
#ifdef MONGOC_ENABLE_COMPRESSION_SNAPPY
snappy_status status;
status = snappy_uncompress ((const char *) compressed,
compressed_len,
(char *) uncompressed,
- uncompressed_size);
+ uncompressed_len);
return status == SNAPPY_OK;
#else
MONGOC_WARNING ("Received snappy compressed opcode, but snappy "
"compression is not compiled in");
return false;
#endif
break;
}
case MONGOC_COMPRESSOR_ZLIB_ID: {
#ifdef MONGOC_ENABLE_COMPRESSION_ZLIB
int ok;
ok = uncompress (uncompressed,
- (unsigned long *) uncompressed_size,
+ (unsigned long *) uncompressed_len,
compressed,
compressed_len);
return ok == Z_OK;
#else
MONGOC_WARNING ("Received zlib compressed opcode, but zlib "
"compression is not compiled in");
return false;
#endif
break;
}
+ case MONGOC_COMPRESSOR_NOOP_ID:
+ memcpy (uncompressed, compressed, compressed_len);
+ *uncompressed_len = compressed_len;
+ return true;
default:
MONGOC_WARNING ("Unknown compressor ID %d", compressor_id);
}
return false;
}
bool
mongoc_compress (int32_t compressor_id,
int32_t compression_level,
char *uncompressed,
size_t uncompressed_len,
char *compressed,
size_t *compressed_len)
{
+ TRACE ("Compressing with '%s' (%d)",
+ mongoc_compressor_id_to_name (compressor_id),
+ compressor_id);
switch (compressor_id) {
case MONGOC_COMPRESSOR_SNAPPY_ID:
#ifdef MONGOC_ENABLE_COMPRESSION_SNAPPY
/* No compression_level option for snappy */
return snappy_compress (
uncompressed, uncompressed_len, compressed, compressed_len) ==
SNAPPY_OK;
break;
#else
MONGOC_ERROR ("Client attempting to use compress with snappy, but snappy "
"compression is not compiled in");
return false;
#endif
case MONGOC_COMPRESSOR_ZLIB_ID:
#ifdef MONGOC_ENABLE_COMPRESSION_ZLIB
return compress2 ((unsigned char *) compressed,
(unsigned long *) compressed_len,
(unsigned char *) uncompressed,
uncompressed_len,
compression_level) == Z_OK;
break;
#else
MONGOC_ERROR ("Client attempting to use compress with zlib, but zlib "
"compression is not compiled in");
return false;
#endif
+ case MONGOC_COMPRESSOR_NOOP_ID:
+ memcpy (compressed, uncompressed, uncompressed_len);
+ *compressed_len = uncompressed_len;
+ return true;
default:
return false;
}
}
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-config.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-config.h
similarity index 84%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-config.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-config.h
index 944a8b7d..b0cbc5d2 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-config.h
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-config.h
@@ -1,287 +1,343 @@
/*
* Copyright 2013 MongoDB Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MONGOC_CONFIG_H
#define MONGOC_CONFIG_H
/* MONGOC_USER_SET_CFLAGS is set from config based on what compiler flags were
* used to compile mongoc */
#define MONGOC_USER_SET_CFLAGS "-g -O0"
#define MONGOC_USER_SET_LDFLAGS ""
/* MONGOC_CC is used to determine what C compiler was used to compile mongoc */
#define MONGOC_CC "cc"
/*
* MONGOC_ENABLE_SSL_SECURE_CHANNEL is set from configure to determine if we are
* compiled with Native SSL support on Windows
*/
#define MONGOC_ENABLE_SSL_SECURE_CHANNEL 0
#if MONGOC_ENABLE_SSL_SECURE_CHANNEL != 1
# undef MONGOC_ENABLE_SSL_SECURE_CHANNEL
#endif
/*
* MONGOC_ENABLE_CRYPTO_CNG is set from configure to determine if we are
* compiled with Native Crypto support on Windows
*/
#define MONGOC_ENABLE_CRYPTO_CNG 0
#if MONGOC_ENABLE_CRYPTO_CNG != 1
# undef MONGOC_ENABLE_CRYPTO_CNG
#endif
/*
* MONGOC_ENABLE_SSL_SECURE_TRANSPORT is set from configure to determine if we are
* compiled with Native SSL support on Darwin
*/
#define MONGOC_ENABLE_SSL_SECURE_TRANSPORT 0
#if MONGOC_ENABLE_SSL_SECURE_TRANSPORT != 1
# undef MONGOC_ENABLE_SSL_SECURE_TRANSPORT
#endif
/*
* MONGOC_ENABLE_CRYPTO_COMMON_CRYPTO is set from configure to determine if we are
* compiled with Native Crypto support on Darwin
*/
#define MONGOC_ENABLE_CRYPTO_COMMON_CRYPTO 0
#if MONGOC_ENABLE_CRYPTO_COMMON_CRYPTO != 1
# undef MONGOC_ENABLE_CRYPTO_COMMON_CRYPTO
#endif
/*
* MONGOC_ENABLE_SSL_LIBRESSL is set from configure to determine if we are
* compiled with LibreSSL support.
*/
#define MONGOC_ENABLE_SSL_LIBRESSL 0
#if MONGOC_ENABLE_SSL_LIBRESSL != 1
# undef MONGOC_ENABLE_SSL_LIBRESSL
#endif
/*
* MONGOC_ENABLE_SSL_OPENSSL is set from configure to determine if we are
* compiled with OpenSSL support.
*/
#define MONGOC_ENABLE_SSL_OPENSSL 1
#if MONGOC_ENABLE_SSL_OPENSSL != 1
# undef MONGOC_ENABLE_SSL_OPENSSL
#endif
/*
* MONGOC_ENABLE_CRYPTO_LIBCRYPTO is set from configure to determine if we are
* compiled with OpenSSL support.
*/
#define MONGOC_ENABLE_CRYPTO_LIBCRYPTO 1
#if MONGOC_ENABLE_CRYPTO_LIBCRYPTO != 1
# undef MONGOC_ENABLE_CRYPTO_LIBCRYPTO
#endif
/*
* MONGOC_ENABLE_SSL is set from configure to determine if we are
* compiled with any SSL support.
*/
#define MONGOC_ENABLE_SSL 1
#if MONGOC_ENABLE_SSL != 1
# undef MONGOC_ENABLE_SSL
#endif
/*
* MONGOC_ENABLE_CRYPTO is set from configure to determine if we are
* compiled with any crypto support.
*/
#define MONGOC_ENABLE_CRYPTO 1
#if MONGOC_ENABLE_CRYPTO != 1
# undef MONGOC_ENABLE_CRYPTO
#endif
/*
* Use system crypto profile
*/
#define MONGOC_ENABLE_CRYPTO_SYSTEM_PROFILE 0
#if MONGOC_ENABLE_CRYPTO_SYSTEM_PROFILE != 1
# undef MONGOC_ENABLE_CRYPTO_SYSTEM_PROFILE
#endif
/*
* Use ASN1_STRING_get0_data () rather than the deprecated ASN1_STRING_data
*/
#define MONGOC_HAVE_ASN1_STRING_GET0_DATA 0
#if MONGOC_HAVE_ASN1_STRING_GET0_DATA != 1
# undef MONGOC_HAVE_ASN1_STRING_GET0_DATA
#endif
/*
* MONGOC_ENABLE_SASL is set from configure to determine if we are
* compiled with SASL support.
*/
#define MONGOC_ENABLE_SASL 1
#if MONGOC_ENABLE_SASL != 1
# undef MONGOC_ENABLE_SASL
#endif
/*
* MONGOC_ENABLE_SASL_CYRUS is set from configure to determine if we are
* compiled with Cyrus SASL support.
*/
#define MONGOC_ENABLE_SASL_CYRUS 1
#if MONGOC_ENABLE_SASL_CYRUS != 1
# undef MONGOC_ENABLE_SASL_CYRUS
#endif
/*
* MONGOC_ENABLE_SASL_SSPI is set from configure to determine if we are
* compiled with SSPI support.
*/
#define MONGOC_ENABLE_SASL_SSPI 0
#if MONGOC_ENABLE_SASL_SSPI != 1
# undef MONGOC_ENABLE_SASL_SSPI
#endif
/*
* MONGOC_ENABLE_SASL_GSSAPI is set from configure to determine if we are
* compiled with GSSAPI support.
*/
#define MONGOC_ENABLE_SASL_GSSAPI 0
#if MONGOC_ENABLE_SASL_GSSAPI != 1
# undef MONGOC_ENABLE_SASL_GSSAPI
#endif
/*
* MONGOC_HAVE_SASL_CLIENT_DONE is set from configure to determine if we
* have SASL and its version is new enough to use sasl_client_done (),
* which supersedes sasl_done ().
*/
#define MONGOC_HAVE_SASL_CLIENT_DONE 1
#if MONGOC_HAVE_SASL_CLIENT_DONE != 1
# undef MONGOC_HAVE_SASL_CLIENT_DONE
#endif
/*
* MONGOC_HAVE_WEAK_SYMBOLS is set from configure to determine if the
* compiler supports the (weak) annotation. We use it to prevent
* Link-Time-Optimization (LTO) in our constant-time mongoc_memcmp()
* This is known to work with GNU GCC and Solaris Studio
*/
#define MONGOC_HAVE_WEAK_SYMBOLS 1
#if MONGOC_HAVE_WEAK_SYMBOLS != 1
# undef MONGOC_HAVE_WEAK_SYMBOLS
#endif
/*
* Disable automatic calls to mongoc_init() and mongoc_cleanup()
* before main() is called, and after exit() (respectively).
*/
#define MONGOC_NO_AUTOMATIC_GLOBALS 1
#if MONGOC_NO_AUTOMATIC_GLOBALS != 1
# undef MONGOC_NO_AUTOMATIC_GLOBALS
#endif
/*
* MONGOC_HAVE_SOCKLEN is set from configure to determine if we
* need to emulate the type.
*/
#define MONGOC_HAVE_SOCKLEN 1
#if MONGOC_HAVE_SOCKLEN != 1
# undef MONGOC_HAVE_SOCKLEN
#endif
+/*
+ * MONGOC_HAVE_DNSAPI is set from configure to determine if we should use the
+ * Windows dnsapi for SRV record lookups.
+ */
+#define MONGOC_HAVE_DNSAPI 0
+
+#if MONGOC_HAVE_DNSAPI != 1
+# undef MONGOC_HAVE_DNSAPI
+#endif
+
+
+/*
+ * MONGOC_HAVE_RES_NSEARCH is set from configure to determine if we
+ * have thread-safe res_nsearch().
+ */
+#define MONGOC_HAVE_RES_NSEARCH 1
+
+#if MONGOC_HAVE_RES_NSEARCH != 1
+# undef MONGOC_HAVE_RES_NSEARCH
+#endif
+
+
+/*
+ * MONGOC_HAVE_RES_NDESTROY is set from configure to determine if we
+ * have BSD / Darwin's res_ndestroy().
+ */
+#define MONGOC_HAVE_RES_NDESTROY 0
+
+#if MONGOC_HAVE_RES_NDESTROY != 1
+# undef MONGOC_HAVE_RES_NDESTROY
+#endif
+
+
+/*
+ * MONGOC_HAVE_RES_NCLOSE is set from configure to determine if we
+ * have Linux's res_nclose().
+ */
+#define MONGOC_HAVE_RES_NCLOSE 1
+
+#if MONGOC_HAVE_RES_NCLOSE != 1
+# undef MONGOC_HAVE_RES_NCLOSE
+#endif
+
+
+/*
+ * MONGOC_HAVE_RES_SEARCH is set from configure to determine if we
+ * have thread-unsafe res_search(). It's unset if we have the preferred
+ * res_nsearch().
+ */
+#define MONGOC_HAVE_RES_SEARCH 0
+
+#if MONGOC_HAVE_RES_SEARCH != 1
+# undef MONGOC_HAVE_RES_SEARCH
+#endif
+
+
/*
* Set from configure, see
* https://curl.haxx.se/mail/lib-2009-04/0287.html
*/
#define MONGOC_SOCKET_ARG2 struct sockaddr
#define MONGOC_SOCKET_ARG3 socklen_t
/*
* Enable wire protocol compression negotiation
*
*/
-#define MONGOC_ENABLE_COMPRESSION 0
+#define MONGOC_ENABLE_COMPRESSION 1
#if MONGOC_ENABLE_COMPRESSION != 1
# undef MONGOC_ENABLE_COMPRESSION
#endif
/*
* Set if we have snappy compression support
*
*/
#define MONGOC_ENABLE_COMPRESSION_SNAPPY 0
#if MONGOC_ENABLE_COMPRESSION_SNAPPY != 1
# undef MONGOC_ENABLE_COMPRESSION_SNAPPY
#endif
/*
* Set if we have zlib compression support
*
*/
-#define MONGOC_ENABLE_COMPRESSION_ZLIB 0
+#define MONGOC_ENABLE_COMPRESSION_ZLIB 1
#if MONGOC_ENABLE_COMPRESSION_ZLIB != 1
# undef MONGOC_ENABLE_COMPRESSION_ZLIB
#endif
/*
* NOTICE:
* If you're about to update this file and add a config flag, make sure to
* update:
* o The bitfield in mongoc-handshake-private.h
* o _get_config_bitfield() in mongoc-handshake.c
* o examples/parse_handshake_cfg.py
*/
#endif /* MONGOC_CONFIG_H */
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-config.h.in b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-config.h.in
similarity index 85%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-config.h.in
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-config.h.in
index f62b65e9..15432b82 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-config.h.in
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-config.h.in
@@ -1,287 +1,343 @@
/*
* Copyright 2013 MongoDB Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MONGOC_CONFIG_H
#define MONGOC_CONFIG_H
/* MONGOC_USER_SET_CFLAGS is set from config based on what compiler flags were
* used to compile mongoc */
#define MONGOC_USER_SET_CFLAGS "@MONGOC_USER_SET_CFLAGS@"
#define MONGOC_USER_SET_LDFLAGS "@MONGOC_USER_SET_LDFLAGS@"
/* MONGOC_CC is used to determine what C compiler was used to compile mongoc */
#define MONGOC_CC "@MONGOC_CC@"
/*
* MONGOC_ENABLE_SSL_SECURE_CHANNEL is set from configure to determine if we are
* compiled with Native SSL support on Windows
*/
#define MONGOC_ENABLE_SSL_SECURE_CHANNEL @MONGOC_ENABLE_SSL_SECURE_CHANNEL@
#if MONGOC_ENABLE_SSL_SECURE_CHANNEL != 1
# undef MONGOC_ENABLE_SSL_SECURE_CHANNEL
#endif
/*
* MONGOC_ENABLE_CRYPTO_CNG is set from configure to determine if we are
* compiled with Native Crypto support on Windows
*/
#define MONGOC_ENABLE_CRYPTO_CNG @MONGOC_ENABLE_CRYPTO_CNG@
#if MONGOC_ENABLE_CRYPTO_CNG != 1
# undef MONGOC_ENABLE_CRYPTO_CNG
#endif
/*
* MONGOC_ENABLE_SSL_SECURE_TRANSPORT is set from configure to determine if we are
* compiled with Native SSL support on Darwin
*/
#define MONGOC_ENABLE_SSL_SECURE_TRANSPORT @MONGOC_ENABLE_SSL_SECURE_TRANSPORT@
#if MONGOC_ENABLE_SSL_SECURE_TRANSPORT != 1
# undef MONGOC_ENABLE_SSL_SECURE_TRANSPORT
#endif
/*
* MONGOC_ENABLE_CRYPTO_COMMON_CRYPTO is set from configure to determine if we are
* compiled with Native Crypto support on Darwin
*/
#define MONGOC_ENABLE_CRYPTO_COMMON_CRYPTO @MONGOC_ENABLE_CRYPTO_COMMON_CRYPTO@
#if MONGOC_ENABLE_CRYPTO_COMMON_CRYPTO != 1
# undef MONGOC_ENABLE_CRYPTO_COMMON_CRYPTO
#endif
/*
* MONGOC_ENABLE_SSL_LIBRESSL is set from configure to determine if we are
* compiled with LibreSSL support.
*/
#define MONGOC_ENABLE_SSL_LIBRESSL @MONGOC_ENABLE_SSL_LIBRESSL@
#if MONGOC_ENABLE_SSL_LIBRESSL != 1
# undef MONGOC_ENABLE_SSL_LIBRESSL
#endif
/*
* MONGOC_ENABLE_SSL_OPENSSL is set from configure to determine if we are
* compiled with OpenSSL support.
*/
#define MONGOC_ENABLE_SSL_OPENSSL @MONGOC_ENABLE_SSL_OPENSSL@
#if MONGOC_ENABLE_SSL_OPENSSL != 1
# undef MONGOC_ENABLE_SSL_OPENSSL
#endif
/*
* MONGOC_ENABLE_CRYPTO_LIBCRYPTO is set from configure to determine if we are
* compiled with OpenSSL support.
*/
#define MONGOC_ENABLE_CRYPTO_LIBCRYPTO @MONGOC_ENABLE_CRYPTO_LIBCRYPTO@
#if MONGOC_ENABLE_CRYPTO_LIBCRYPTO != 1
# undef MONGOC_ENABLE_CRYPTO_LIBCRYPTO
#endif
/*
* MONGOC_ENABLE_SSL is set from configure to determine if we are
* compiled with any SSL support.
*/
#define MONGOC_ENABLE_SSL @MONGOC_ENABLE_SSL@
#if MONGOC_ENABLE_SSL != 1
# undef MONGOC_ENABLE_SSL
#endif
/*
* MONGOC_ENABLE_CRYPTO is set from configure to determine if we are
* compiled with any crypto support.
*/
#define MONGOC_ENABLE_CRYPTO @MONGOC_ENABLE_CRYPTO@
#if MONGOC_ENABLE_CRYPTO != 1
# undef MONGOC_ENABLE_CRYPTO
#endif
/*
* Use system crypto profile
*/
#define MONGOC_ENABLE_CRYPTO_SYSTEM_PROFILE @MONGOC_ENABLE_CRYPTO_SYSTEM_PROFILE@
#if MONGOC_ENABLE_CRYPTO_SYSTEM_PROFILE != 1
# undef MONGOC_ENABLE_CRYPTO_SYSTEM_PROFILE
#endif
/*
* Use ASN1_STRING_get0_data () rather than the deprecated ASN1_STRING_data
*/
#define MONGOC_HAVE_ASN1_STRING_GET0_DATA @MONGOC_HAVE_ASN1_STRING_GET0_DATA@
#if MONGOC_HAVE_ASN1_STRING_GET0_DATA != 1
# undef MONGOC_HAVE_ASN1_STRING_GET0_DATA
#endif
/*
* MONGOC_ENABLE_SASL is set from configure to determine if we are
* compiled with SASL support.
*/
#define MONGOC_ENABLE_SASL @MONGOC_ENABLE_SASL@
#if MONGOC_ENABLE_SASL != 1
# undef MONGOC_ENABLE_SASL
#endif
/*
* MONGOC_ENABLE_SASL_CYRUS is set from configure to determine if we are
* compiled with Cyrus SASL support.
*/
#define MONGOC_ENABLE_SASL_CYRUS @MONGOC_ENABLE_SASL_CYRUS@
#if MONGOC_ENABLE_SASL_CYRUS != 1
# undef MONGOC_ENABLE_SASL_CYRUS
#endif
/*
* MONGOC_ENABLE_SASL_SSPI is set from configure to determine if we are
* compiled with SSPI support.
*/
#define MONGOC_ENABLE_SASL_SSPI @MONGOC_ENABLE_SASL_SSPI@
#if MONGOC_ENABLE_SASL_SSPI != 1
# undef MONGOC_ENABLE_SASL_SSPI
#endif
/*
* MONGOC_ENABLE_SASL_GSSAPI is set from configure to determine if we are
* compiled with GSSAPI support.
*/
#define MONGOC_ENABLE_SASL_GSSAPI @MONGOC_ENABLE_SASL_GSSAPI@
#if MONGOC_ENABLE_SASL_GSSAPI != 1
# undef MONGOC_ENABLE_SASL_GSSAPI
#endif
/*
* MONGOC_HAVE_SASL_CLIENT_DONE is set from configure to determine if we
* have SASL and its version is new enough to use sasl_client_done (),
* which supersedes sasl_done ().
*/
#define MONGOC_HAVE_SASL_CLIENT_DONE @MONGOC_HAVE_SASL_CLIENT_DONE@
#if MONGOC_HAVE_SASL_CLIENT_DONE != 1
# undef MONGOC_HAVE_SASL_CLIENT_DONE
#endif
/*
* MONGOC_HAVE_WEAK_SYMBOLS is set from configure to determine if the
* compiler supports the (weak) annotation. We use it to prevent
* Link-Time-Optimization (LTO) in our constant-time mongoc_memcmp()
* This is known to work with GNU GCC and Solaris Studio
*/
#define MONGOC_HAVE_WEAK_SYMBOLS @MONGOC_HAVE_WEAK_SYMBOLS@
#if MONGOC_HAVE_WEAK_SYMBOLS != 1
# undef MONGOC_HAVE_WEAK_SYMBOLS
#endif
/*
* Disable automatic calls to mongoc_init() and mongoc_cleanup()
* before main() is called, and after exit() (respectively).
*/
#define MONGOC_NO_AUTOMATIC_GLOBALS @MONGOC_NO_AUTOMATIC_GLOBALS@
#if MONGOC_NO_AUTOMATIC_GLOBALS != 1
# undef MONGOC_NO_AUTOMATIC_GLOBALS
#endif
/*
* MONGOC_HAVE_SOCKLEN is set from configure to determine if we
* need to emulate the type.
*/
#define MONGOC_HAVE_SOCKLEN @MONGOC_HAVE_SOCKLEN@
#if MONGOC_HAVE_SOCKLEN != 1
# undef MONGOC_HAVE_SOCKLEN
#endif
+/*
+ * MONGOC_HAVE_DNSAPI is set from configure to determine if we should use the
+ * Windows dnsapi for SRV record lookups.
+ */
+#define MONGOC_HAVE_DNSAPI @MONGOC_HAVE_DNSAPI@
+
+#if MONGOC_HAVE_DNSAPI != 1
+# undef MONGOC_HAVE_DNSAPI
+#endif
+
+
+/*
+ * MONGOC_HAVE_RES_NSEARCH is set from configure to determine if we
+ * have thread-safe res_nsearch().
+ */
+#define MONGOC_HAVE_RES_NSEARCH @MONGOC_HAVE_RES_NSEARCH@
+
+#if MONGOC_HAVE_RES_NSEARCH != 1
+# undef MONGOC_HAVE_RES_NSEARCH
+#endif
+
+
+/*
+ * MONGOC_HAVE_RES_NDESTROY is set from configure to determine if we
+ * have BSD / Darwin's res_ndestroy().
+ */
+#define MONGOC_HAVE_RES_NDESTROY @MONGOC_HAVE_RES_NDESTROY@
+
+#if MONGOC_HAVE_RES_NDESTROY != 1
+# undef MONGOC_HAVE_RES_NDESTROY
+#endif
+
+
+/*
+ * MONGOC_HAVE_RES_NCLOSE is set from configure to determine if we
+ * have Linux's res_nclose().
+ */
+#define MONGOC_HAVE_RES_NCLOSE @MONGOC_HAVE_RES_NCLOSE@
+
+#if MONGOC_HAVE_RES_NCLOSE != 1
+# undef MONGOC_HAVE_RES_NCLOSE
+#endif
+
+
+/*
+ * MONGOC_HAVE_RES_SEARCH is set from configure to determine if we
+ * have thread-unsafe res_search(). It's unset if we have the preferred
+ * res_nsearch().
+ */
+#define MONGOC_HAVE_RES_SEARCH @MONGOC_HAVE_RES_SEARCH@
+
+#if MONGOC_HAVE_RES_SEARCH != 1
+# undef MONGOC_HAVE_RES_SEARCH
+#endif
+
+
/*
* Set from configure, see
* https://curl.haxx.se/mail/lib-2009-04/0287.html
*/
#define MONGOC_SOCKET_ARG2 @MONGOC_SOCKET_ARG2@
#define MONGOC_SOCKET_ARG3 @MONGOC_SOCKET_ARG3@
/*
* Enable wire protocol compression negotiation
*
*/
#define MONGOC_ENABLE_COMPRESSION @MONGOC_ENABLE_COMPRESSION@
#if MONGOC_ENABLE_COMPRESSION != 1
# undef MONGOC_ENABLE_COMPRESSION
#endif
/*
* Set if we have snappy compression support
*
*/
#define MONGOC_ENABLE_COMPRESSION_SNAPPY @MONGOC_ENABLE_COMPRESSION_SNAPPY@
#if MONGOC_ENABLE_COMPRESSION_SNAPPY != 1
# undef MONGOC_ENABLE_COMPRESSION_SNAPPY
#endif
/*
* Set if we have zlib compression support
*
*/
#define MONGOC_ENABLE_COMPRESSION_ZLIB @MONGOC_ENABLE_COMPRESSION_ZLIB@
#if MONGOC_ENABLE_COMPRESSION_ZLIB != 1
# undef MONGOC_ENABLE_COMPRESSION_ZLIB
#endif
/*
* NOTICE:
* If you're about to update this file and add a config flag, make sure to
* update:
* o The bitfield in mongoc-handshake-private.h
* o _get_config_bitfield() in mongoc-handshake.c
* o examples/parse_handshake_cfg.py
*/
#endif /* MONGOC_CONFIG_H */
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-counters-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-counters-private.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-counters-private.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-counters-private.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-counters.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-counters.c
similarity index 98%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-counters.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-counters.c
index a5d171a5..d81fe6f9 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-counters.c
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-counters.c
@@ -1,317 +1,317 @@
/*
* Copyright 2013 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <bson.h>
#include <fcntl.h>
#include <stdlib.h>
#include <string.h>
#ifdef BSON_OS_UNIX
#include <sys/mman.h>
#include <sys/shm.h>
#endif
#ifdef _MSC_VER
#include <windows.h>
#endif
#include "mongoc-counters-private.h"
#include "mongoc-log.h"
#pragma pack(1)
typedef struct {
uint32_t offset;
uint32_t slot;
char category[24];
char name[32];
char description[64];
} mongoc_counter_info_t;
#pragma pack()
-BSON_STATIC_ASSERT (sizeof (mongoc_counter_info_t) == 128);
+BSON_STATIC_ASSERT2 (counter_info_t, sizeof (mongoc_counter_info_t) == 128);
#pragma pack(1)
typedef struct {
uint32_t size;
uint32_t n_cpu;
uint32_t n_counters;
uint32_t infos_offset;
uint32_t values_offset;
uint8_t padding[44];
} mongoc_counters_t;
#pragma pack()
-BSON_STATIC_ASSERT (sizeof (mongoc_counters_t) == 64);
+BSON_STATIC_ASSERT2 (counters_t, sizeof (mongoc_counters_t) == 64);
static void *gCounterFallback = NULL;
#define COUNTER(ident, Category, Name, Description) \
mongoc_counter_t __mongoc_counter_##ident;
#include "mongoc-counters.defs"
#undef COUNTER
/**
* mongoc_counters_use_shm:
*
* Checks to see if counters should be exported over a shared memory segment.
*
* Returns: true if SHM is to be used.
*/
#if defined(BSON_OS_UNIX) && defined(MONGOC_ENABLE_SHM_COUNTERS)
static bool
mongoc_counters_use_shm (void)
{
return !getenv ("MONGOC_DISABLE_SHM");
}
#endif
/**
* mongoc_counters_calc_size:
*
* Returns the number of bytes required for the shared memory segment of
* the process. This segment contains the various statistical counters for
* the process.
*
* Returns: The number of bytes required.
*/
static size_t
mongoc_counters_calc_size (void)
{
size_t n_cpu;
size_t n_groups;
size_t size;
n_cpu = _mongoc_get_cpu_count ();
n_groups = (LAST_COUNTER / SLOTS_PER_CACHELINE) + 1;
size = (sizeof (mongoc_counters_t) +
(LAST_COUNTER * sizeof (mongoc_counter_info_t)) +
(n_cpu * n_groups * sizeof (mongoc_counter_slots_t)));
#ifdef BSON_OS_UNIX
return BSON_MAX (getpagesize (), size);
#else
return size;
#endif
}
/**
* mongoc_counters_destroy:
*
* Removes the shared memory segment for the current processes counters.
*/
void
_mongoc_counters_cleanup (void)
{
if (gCounterFallback) {
bson_free (gCounterFallback);
gCounterFallback = NULL;
#if defined(BSON_OS_UNIX) && defined(MONGOC_ENABLE_SHM_COUNTERS)
} else {
char name[32];
int pid;
pid = getpid ();
bson_snprintf (name, sizeof name, "/mongoc-%u", pid);
shm_unlink (name);
#endif
}
}
/**
* mongoc_counters_alloc:
* @size: The size of the shared memory segment.
*
* This function allocates the shared memory segment for use by counters
* within the process.
*
* Returns: A shared memory segment, or malloc'd memory on failure.
*/
static void *
mongoc_counters_alloc (size_t size)
{
#if defined(BSON_OS_UNIX) && defined(MONGOC_ENABLE_SHM_COUNTERS)
void *mem;
char name[32];
int pid;
int fd;
if (!mongoc_counters_use_shm ()) {
goto skip_shm;
}
pid = getpid ();
bson_snprintf (name, sizeof name, "/mongoc-%u", pid);
#ifndef O_NOFOLLOW
#define O_NOFOLLOW 0
#endif
if (-1 == (fd = shm_open (name,
O_CREAT | O_EXCL | O_RDWR,
S_IRUSR | S_IWUSR | O_NOFOLLOW))) {
goto fail_noclean;
}
/*
* NOTE:
*
* ftruncate() will cause reads to be zero. Therefore, we don't need to
* do write() of zeroes to initialize the shared memory area.
*/
if (-1 == ftruncate (fd, size)) {
goto fail_cleanup;
}
mem = mmap (NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
if (mem == MAP_FAILED) {
goto fail_cleanup;
}
close (fd);
memset (mem, 0, size);
return mem;
fail_cleanup:
shm_unlink (name);
close (fd);
fail_noclean:
MONGOC_WARNING ("Falling back to malloc for counters.");
skip_shm:
#endif
gCounterFallback = (void *) bson_malloc0 (size);
return gCounterFallback;
}
/**
* mongoc_counters_register:
* @counters: A mongoc_counter_t.
* @num: The counter number.
* @category: The counter category.
* @name: THe counter name.
* @description The counter description.
*
* Registers a new counter in the memory segment for counters. If the counters
* are exported over shared memory, it will be made available.
*
* Returns: The offset to the data for the counters values.
*/
static size_t
mongoc_counters_register (mongoc_counters_t *counters,
uint32_t num,
const char *category,
const char *name,
const char *description)
{
mongoc_counter_info_t *infos;
char *segment;
int n_cpu;
BSON_ASSERT (counters);
BSON_ASSERT (category);
BSON_ASSERT (name);
BSON_ASSERT (description);
/*
* Implementation Note:
*
* The memory barrier is required so that all of the above has been
* completed. Then increment the n_counters so that a reading application
* only knows about the counter after we have initialized it.
*/
n_cpu = _mongoc_get_cpu_count ();
segment = (char *) counters;
infos = (mongoc_counter_info_t *) (segment + counters->infos_offset);
infos = &infos[counters->n_counters];
infos->slot = num % SLOTS_PER_CACHELINE;
infos->offset =
(counters->values_offset +
((num / SLOTS_PER_CACHELINE) * n_cpu * sizeof (mongoc_counter_slots_t)));
bson_strncpy (infos->category, category, sizeof infos->category);
bson_strncpy (infos->name, name, sizeof infos->name);
bson_strncpy (infos->description, description, sizeof infos->description);
bson_memory_barrier ();
counters->n_counters++;
return infos->offset;
}
/**
* mongoc_counters_init:
*
* Initializes the mongoc counters system. This should be run on library
* initialization using the GCC constructor attribute.
*/
void
_mongoc_counters_init (void)
{
mongoc_counter_info_t *info;
mongoc_counters_t *counters;
size_t infos_size;
size_t off;
size_t size;
char *segment;
size = mongoc_counters_calc_size ();
segment = (char *) mongoc_counters_alloc (size);
infos_size = LAST_COUNTER * sizeof *info;
counters = (mongoc_counters_t *) segment;
counters->n_cpu = _mongoc_get_cpu_count ();
counters->n_counters = 0;
counters->infos_offset = sizeof *counters;
counters->values_offset = (uint32_t) (counters->infos_offset + infos_size);
BSON_ASSERT ((counters->values_offset % 64) == 0);
#define COUNTER(ident, Category, Name, Desc) \
off = mongoc_counters_register ( \
counters, COUNTER_##ident, Category, Name, Desc); \
__mongoc_counter_##ident.cpus = (mongoc_counter_slots_t *) (segment + off);
#include "mongoc-counters.defs"
#undef COUNTER
/*
* NOTE:
*
* Only update the size of the shared memory area for the client after
* we have initialized the rest of the counters. Don't forget our memory
* barrier to prevent compiler reordering.
*/
bson_memory_barrier ();
counters->size = (uint32_t) size;
}
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-counters.defs b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-counters.defs
similarity index 77%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-counters.defs
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-counters.defs
index d38241e9..7b779edc 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-counters.defs
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-counters.defs
@@ -1,67 +1,61 @@
/*
* Copyright 2013 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
COUNTER(op_egress_total, "Operations", "Egress Total", "The number of sent operations.")
COUNTER(op_ingress_total, "Operations", "Ingress Total", "The number of received operations.")
+COUNTER(op_egress_msg, "Operations", "Egress Messages", "The number of sent messages operations.")
+COUNTER(op_ingress_msg, "Operations", "Ingress Messages", "The number of received messages operations.")
+COUNTER(op_egress_compressed, "Operations", "Egress Compressed", "The number of sent compressed operations.")
+COUNTER(op_ingress_compressed, "Operations", "Ingress Compressed", "The number of received compressed operations.")
COUNTER(op_egress_query, "Operations", "Egress Queries", "The number of sent Query operations.")
-COUNTER(op_ingress_query, "Operations", "Ingress Queries", "The number of received Query operations.")
+COUNTER(op_ingress_reply, "Operations", "Ingress Reply", "The number of received Reply operations.")
COUNTER(op_egress_getmore, "Operations", "Egress GetMore", "The number of sent GetMore operations.")
-COUNTER(op_ingress_getmore, "Operations", "Ingress GetMore", "The number of received GetMore operations.")
COUNTER(op_egress_insert, "Operations", "Egress Insert", "The number of sent Insert operations.")
-COUNTER(op_ingress_insert, "Operations", "Ingress Insert", "The number of received Insert operations.")
COUNTER(op_egress_delete, "Operations", "Egress Delete", "The number of sent Delete operations.")
-COUNTER(op_ingress_delete, "Operations", "Ingress Delete", "The number of received Delete operations.")
COUNTER(op_egress_update, "Operations", "Egress Update", "The number of sent Update operations.")
-COUNTER(op_ingress_update, "Operations", "Ingress Update", "The number of received Update operations.")
COUNTER(op_egress_killcursors, "Operations", "Egress KillCursors", "The number of sent KillCursors operations.")
-COUNTER(op_ingress_killcursors, "Operations", "Ingress KillCursors", "The number of received KillCursors operations.")
-COUNTER(op_egress_msg, "Operations", "Egress Msg", "The number of sent Msg operations.")
-COUNTER(op_ingress_msg, "Operations", "Ingress Msg", "The number of received Msg operations.")
-COUNTER(op_egress_reply, "Operations", "Egress Reply", "The number of sent Reply operations.")
-COUNTER(op_ingress_reply, "Operations", "Ingress Reply", "The number of received Reply operations.")
-COUNTER(op_egress_compressed, "Operations", "Egress Compressed", "The number of sent compressed operations.")
-COUNTER(op_ingress_compressed, "Operations", "Ingress Compressed", "The number of received compressed operations.")
COUNTER(cursors_active, "Cursors", "Active", "The number of active cursors.")
COUNTER(cursors_disposed, "Cursors", "Disposed", "The number of disposed cursors.")
COUNTER(clients_active, "Clients", "Active", "The number of active clients.")
COUNTER(clients_disposed, "Clients", "Disposed", "The number of disposed clients.")
COUNTER(streams_active, "Streams", "Active", "The number of active streams.")
COUNTER(streams_disposed, "Streams", "Disposed", "The number of disposed streams.")
COUNTER(streams_egress, "Streams", "Egress Bytes", "The number of bytes sent.")
COUNTER(streams_ingress, "Streams", "Ingress Bytes", "The number of bytes received.")
COUNTER(streams_timeout, "Streams", "N Socket Timeouts", "The number of socket timeouts.")
COUNTER(client_pools_active, "Client Pools", "Active", "The number of active client pools.")
COUNTER(client_pools_disposed, "Client Pools", "Disposed", "The number of disposed client pools.")
COUNTER(protocol_ingress_error, "Protocol", "Ingress Errors", "The number of protocol errors on ingress.")
COUNTER(auth_failure, "Auth", "Failures", "The number of failed authentication requests.")
COUNTER(auth_success, "Auth", "Success", "The number of successful authentication requests.")
COUNTER(dns_failure, "DNS", "Failure", "The number of failed DNS requests.")
COUNTER(dns_success, "DNS", "Success", "The number of successful DNS requests.")
+
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-crypto-cng-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-crypto-cng-private.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-crypto-cng-private.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-crypto-cng-private.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-crypto-cng.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-crypto-cng.c
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-crypto-cng.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-crypto-cng.c
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-crypto-cng.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-crypto-cng.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-crypto-cng.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-crypto-cng.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-crypto-common-crypto-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-crypto-common-crypto-private.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-crypto-common-crypto-private.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-crypto-common-crypto-private.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-crypto-common-crypto.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-crypto-common-crypto.c
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-crypto-common-crypto.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-crypto-common-crypto.c
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-crypto-openssl-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-crypto-openssl-private.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-crypto-openssl-private.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-crypto-openssl-private.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-crypto-openssl.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-crypto-openssl.c
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-crypto-openssl.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-crypto-openssl.c
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-crypto-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-crypto-private.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-crypto-private.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-crypto-private.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-crypto.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-crypto.c
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-crypto.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-crypto.c
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-cursor-array-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-cursor-array-private.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-cursor-array-private.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-cursor-array-private.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-cursor-array.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-cursor-array.c
similarity index 98%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-cursor-array.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-cursor-array.c
index 4785a185..fca3a9a2 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-cursor-array.c
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-cursor-array.c
@@ -1,244 +1,245 @@
/*
* Copyright 2013 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "mongoc-cursor.h"
#include "mongoc-cursor-array-private.h"
#include "mongoc-cursor-private.h"
#include "mongoc-client-private.h"
#include "mongoc-counters-private.h"
#include "mongoc-error.h"
#include "mongoc-log.h"
#include "mongoc-opcode.h"
#include "mongoc-trace-private.h"
#undef MONGOC_LOG_DOMAIN
#define MONGOC_LOG_DOMAIN "cursor-array"
typedef struct {
bson_t array;
bool has_array;
bool has_synthetic_bson;
bson_iter_t iter;
bson_t bson; /* current document */
const char *field_name;
} mongoc_cursor_array_t;
static void *
_mongoc_cursor_array_new (const char *field_name)
{
mongoc_cursor_array_t *arr;
ENTRY;
arr = (mongoc_cursor_array_t *) bson_malloc0 (sizeof *arr);
arr->has_array = false;
arr->has_synthetic_bson = false;
arr->field_name = field_name;
RETURN (arr);
}
static void
_mongoc_cursor_array_destroy (mongoc_cursor_t *cursor)
{
mongoc_cursor_array_t *arr;
ENTRY;
arr = (mongoc_cursor_array_t *) cursor->iface_data;
if (arr->has_array) {
bson_destroy (&arr->array);
}
if (arr->has_synthetic_bson) {
bson_destroy (&arr->bson);
}
bson_free (cursor->iface_data);
_mongoc_cursor_destroy (cursor);
EXIT;
}
bool
_mongoc_cursor_array_prime (mongoc_cursor_t *cursor)
{
mongoc_cursor_array_t *arr;
bson_iter_t iter;
ENTRY;
arr = (mongoc_cursor_array_t *) cursor->iface_data;
BSON_ASSERT (arr);
- if (_mongoc_cursor_run_command (cursor, &cursor->filter, &arr->array) &&
+ if (_mongoc_cursor_run_command (
+ cursor, &cursor->filter, &cursor->opts, &arr->array) &&
bson_iter_init_find (&iter, &arr->array, arr->field_name) &&
BSON_ITER_HOLDS_ARRAY (&iter) && bson_iter_recurse (&iter, &arr->iter)) {
arr->has_array = true;
return true;
}
return false;
}
static bool
_mongoc_cursor_array_next (mongoc_cursor_t *cursor, const bson_t **bson)
{
bool ret = true;
mongoc_cursor_array_t *arr;
uint32_t document_len;
const uint8_t *document;
ENTRY;
arr = (mongoc_cursor_array_t *) cursor->iface_data;
*bson = NULL;
if (!arr->has_array && !arr->has_synthetic_bson) {
ret = _mongoc_cursor_array_prime (cursor);
}
if (ret) {
ret = bson_iter_next (&arr->iter);
}
if (ret) {
bson_iter_document (&arr->iter, &document_len, &document);
bson_init_static (&arr->bson, document, document_len);
*bson = &arr->bson;
}
RETURN (ret);
}
static mongoc_cursor_t *
_mongoc_cursor_array_clone (const mongoc_cursor_t *cursor)
{
mongoc_cursor_array_t *arr;
mongoc_cursor_t *clone_;
ENTRY;
arr = (mongoc_cursor_array_t *) cursor->iface_data;
clone_ = _mongoc_cursor_clone (cursor);
_mongoc_cursor_array_init (clone_, &cursor->filter, arr->field_name);
RETURN (clone_);
}
static bool
_mongoc_cursor_array_more (mongoc_cursor_t *cursor)
{
bool ret;
mongoc_cursor_array_t *arr;
bson_iter_t iter;
ENTRY;
arr = (mongoc_cursor_array_t *) cursor->iface_data;
if (arr->has_array || arr->has_synthetic_bson) {
memcpy (&iter, &arr->iter, sizeof iter);
ret = bson_iter_next (&iter);
} else {
ret = true;
}
RETURN (ret);
}
static bool
_mongoc_cursor_array_error_document (mongoc_cursor_t *cursor,
bson_error_t *error,
const bson_t **doc)
{
mongoc_cursor_array_t *arr;
ENTRY;
arr = (mongoc_cursor_array_t *) cursor->iface_data;
if (arr->has_synthetic_bson) {
if (doc) {
*doc = NULL;
}
return false;
}
return _mongoc_cursor_error_document (cursor, error, doc);
}
static mongoc_cursor_interface_t gMongocCursorArray = {
_mongoc_cursor_array_clone,
_mongoc_cursor_array_destroy,
_mongoc_cursor_array_more,
_mongoc_cursor_array_next,
_mongoc_cursor_array_error_document,
};
void
_mongoc_cursor_array_init (mongoc_cursor_t *cursor,
const bson_t *command,
const char *field_name)
{
ENTRY;
if (command) {
bson_destroy (&cursor->filter);
bson_copy_to (command, &cursor->filter);
}
cursor->iface_data = _mongoc_cursor_array_new (field_name);
memcpy (
&cursor->iface, &gMongocCursorArray, sizeof (mongoc_cursor_interface_t));
EXIT;
}
void
_mongoc_cursor_array_set_bson (mongoc_cursor_t *cursor, const bson_t *bson)
{
mongoc_cursor_array_t *arr;
ENTRY;
arr = (mongoc_cursor_array_t *) cursor->iface_data;
bson_copy_to (bson, &arr->bson);
arr->has_synthetic_bson = true;
bson_iter_init (&arr->iter, &arr->bson);
EXIT;
}
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-cursor-cursorid-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-cursor-cursorid-private.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-cursor-cursorid-private.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-cursor-cursorid-private.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-cursor-cursorid.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-cursor-cursorid.c
similarity index 94%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-cursor-cursorid.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-cursor-cursorid.c
index 7b34527b..c893f071 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-cursor-cursorid.c
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-cursor-cursorid.c
@@ -1,395 +1,403 @@
/*
* Copyright 2013 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "mongoc-cursor.h"
#include "mongoc-cursor-private.h"
#include "mongoc-cursor-cursorid-private.h"
#include "mongoc-log.h"
#include "mongoc-trace-private.h"
#include "mongoc-error.h"
#include "mongoc-util-private.h"
#include "mongoc-client-private.h"
#undef MONGOC_LOG_DOMAIN
#define MONGOC_LOG_DOMAIN "cursor-cursorid"
static void *
_mongoc_cursor_cursorid_new (void)
{
mongoc_cursor_cursorid_t *cid;
ENTRY;
cid = (mongoc_cursor_cursorid_t *) bson_malloc0 (sizeof *cid);
bson_init (&cid->array);
cid->in_batch = false;
cid->in_reader = false;
RETURN (cid);
}
static void
_mongoc_cursor_cursorid_destroy (mongoc_cursor_t *cursor)
{
mongoc_cursor_cursorid_t *cid;
ENTRY;
cid = (mongoc_cursor_cursorid_t *) cursor->iface_data;
BSON_ASSERT (cid);
bson_destroy (&cid->array);
bson_free (cid);
_mongoc_cursor_destroy (cursor);
EXIT;
}
/*
* Start iterating the reply to an "aggregate", "find", "getMore" etc. command:
*
* {cursor: {id: 1234, ns: "db.collection", firstBatch: [...]}}
*/
bool
_mongoc_cursor_cursorid_start_batch (mongoc_cursor_t *cursor)
{
mongoc_cursor_cursorid_t *cid;
bson_iter_t iter;
bson_iter_t child;
const char *ns;
uint32_t nslen;
cid = (mongoc_cursor_cursorid_t *) cursor->iface_data;
BSON_ASSERT (cid);
if (bson_iter_init_find (&iter, &cid->array, "cursor") &&
BSON_ITER_HOLDS_DOCUMENT (&iter) && bson_iter_recurse (&iter, &child)) {
while (bson_iter_next (&child)) {
if (BSON_ITER_IS_KEY (&child, "id")) {
cursor->rpc.reply.cursor_id = bson_iter_as_int64 (&child);
} else if (BSON_ITER_IS_KEY (&child, "ns")) {
ns = bson_iter_utf8 (&child, &nslen);
_mongoc_set_cursor_ns (cursor, ns, nslen);
} else if (BSON_ITER_IS_KEY (&child, "firstBatch") ||
BSON_ITER_IS_KEY (&child, "nextBatch")) {
if (BSON_ITER_HOLDS_ARRAY (&child) &&
bson_iter_recurse (&child, &cid->batch_iter)) {
cid->in_batch = true;
}
}
}
}
return cid->in_batch;
}
static bool
_mongoc_cursor_cursorid_refresh_from_command (mongoc_cursor_t *cursor,
- const bson_t *command)
+ const bson_t *command,
+ const bson_t *opts)
{
mongoc_cursor_cursorid_t *cid;
ENTRY;
cid = (mongoc_cursor_cursorid_t *) cursor->iface_data;
BSON_ASSERT (cid);
bson_destroy (&cid->array);
/* server replies to find / aggregate with {cursor: {id: N, firstBatch: []}},
* to getMore command with {cursor: {id: N, nextBatch: []}}. */
- if (_mongoc_cursor_run_command (cursor, command, &cid->array) &&
+ if (_mongoc_cursor_run_command (cursor, command, opts, &cid->array) &&
_mongoc_cursor_cursorid_start_batch (cursor)) {
RETURN (true);
}
- bson_destroy (&cursor->error_doc);
- bson_copy_to (&cid->array, &cursor->error_doc);
+ bson_destroy (&cursor->reply);
+ bson_copy_to (&cid->array, &cursor->reply);
if (!cursor->error.domain) {
bson_set_error (&cursor->error,
MONGOC_ERROR_PROTOCOL,
MONGOC_ERROR_PROTOCOL_INVALID_REPLY,
"Invalid reply to %s command.",
_mongoc_get_command_name (command));
}
RETURN (false);
}
static void
_mongoc_cursor_cursorid_read_from_batch (mongoc_cursor_t *cursor,
const bson_t **bson)
{
mongoc_cursor_cursorid_t *cid;
const uint8_t *data = NULL;
uint32_t data_len = 0;
ENTRY;
cid = (mongoc_cursor_cursorid_t *) cursor->iface_data;
BSON_ASSERT (cid);
if (bson_iter_next (&cid->batch_iter) &&
BSON_ITER_HOLDS_DOCUMENT (&cid->batch_iter)) {
bson_iter_document (&cid->batch_iter, &data_len, &data);
/* bson_iter_next guarantees valid BSON, so this must succeed */
bson_init_static (&cid->current_doc, data, data_len);
*bson = &cid->current_doc;
cursor->end_of_event = false;
} else {
cursor->end_of_event = true;
}
}
bool
_mongoc_cursor_cursorid_prime (mongoc_cursor_t *cursor)
{
+ if (cursor->error.domain != 0) {
+ return false;
+ }
+
cursor->sent = true;
cursor->operation_id = ++cursor->client->cluster.operation_id;
- return _mongoc_cursor_cursorid_refresh_from_command (cursor,
- &cursor->filter);
+ return _mongoc_cursor_cursorid_refresh_from_command (
+ cursor, &cursor->filter, &cursor->opts);
}
bool
_mongoc_cursor_prepare_getmore_command (mongoc_cursor_t *cursor,
bson_t *command)
{
const char *collection;
int collection_len;
int64_t batch_size;
bool await_data;
int32_t max_await_time_ms;
ENTRY;
_mongoc_cursor_collection (cursor, &collection, &collection_len);
bson_init (command);
bson_append_int64 (command, "getMore", 7, mongoc_cursor_get_id (cursor));
bson_append_utf8 (command, "collection", 10, collection, collection_len);
batch_size = mongoc_cursor_get_batch_size (cursor);
/* See find, getMore, and killCursors Spec for batchSize rules */
if (batch_size) {
bson_append_int64 (command,
MONGOC_CURSOR_BATCH_SIZE,
MONGOC_CURSOR_BATCH_SIZE_LEN,
- abs (_mongoc_n_return (cursor)));
+ abs (_mongoc_n_return (false, cursor)));
}
/* Find, getMore And killCursors Commands Spec: "In the case of a tailable
cursor with awaitData == true the driver MUST provide a Cursor level
option named maxAwaitTimeMS (See CRUD specification for details). The
maxTimeMS option on the getMore command MUST be set to the value of the
option maxAwaitTimeMS. If no maxAwaitTimeMS is specified, the driver
SHOULD not set maxTimeMS on the getMore command."
*/
await_data = _mongoc_cursor_get_opt_bool (cursor, MONGOC_CURSOR_TAILABLE) &&
_mongoc_cursor_get_opt_bool (cursor, MONGOC_CURSOR_AWAIT_DATA);
if (await_data) {
max_await_time_ms =
(int32_t) mongoc_cursor_get_max_await_time_ms (cursor);
if (max_await_time_ms) {
bson_append_int32 (command,
MONGOC_CURSOR_MAX_TIME_MS,
MONGOC_CURSOR_MAX_TIME_MS_LEN,
max_await_time_ms);
}
}
RETURN (true);
}
static bool
_mongoc_cursor_cursorid_get_more (mongoc_cursor_t *cursor)
{
mongoc_cursor_cursorid_t *cid;
mongoc_server_stream_t *server_stream;
bson_t command;
bool ret;
ENTRY;
cid = (mongoc_cursor_cursorid_t *) cursor->iface_data;
BSON_ASSERT (cid);
server_stream = _mongoc_cursor_fetch_stream (cursor);
if (!server_stream) {
RETURN (false);
}
if (_use_getmore_command (cursor, server_stream)) {
if (!_mongoc_cursor_prepare_getmore_command (cursor, &command)) {
mongoc_server_stream_cleanup (server_stream);
RETURN (false);
}
- ret = _mongoc_cursor_cursorid_refresh_from_command (cursor, &command);
+ /* don't pass cursor->opts to getMore */
+ ret = _mongoc_cursor_cursorid_refresh_from_command (
+ cursor, &command, NULL /* opts */);
+
bson_destroy (&command);
} else {
ret = _mongoc_cursor_op_getmore (cursor, server_stream);
cid->in_reader = ret;
}
mongoc_server_stream_cleanup (server_stream);
RETURN (ret);
}
bool
_mongoc_cursor_cursorid_next (mongoc_cursor_t *cursor, const bson_t **bson)
{
mongoc_cursor_cursorid_t *cid;
bool refreshed = false;
ENTRY;
*bson = NULL;
cid = (mongoc_cursor_cursorid_t *) cursor->iface_data;
BSON_ASSERT (cid);
if (!cursor->sent) {
if (!_mongoc_cursor_cursorid_prime (cursor)) {
GOTO (done);
}
}
again:
/* Two paths:
* - Mongo 3.2+, sent "getMore" cmd, we're reading reply's "nextBatch" array
* - Mongo 2.6 to 3, after "aggregate" or similar command we sent OP_GETMORE,
* we're reading the raw reply
*/
if (cid->in_batch) {
_mongoc_cursor_cursorid_read_from_batch (cursor, bson);
if (*bson) {
GOTO (done);
}
cid->in_batch = false;
} else if (cid->in_reader) {
_mongoc_read_from_buffer (cursor, bson);
if (*bson) {
GOTO (done);
}
cid->in_reader = false;
}
if (!refreshed && mongoc_cursor_get_id (cursor)) {
if (!_mongoc_cursor_cursorid_get_more (cursor)) {
GOTO (done);
}
refreshed = true;
GOTO (again);
}
done:
if (!*bson && mongoc_cursor_get_id (cursor) == 0) {
cursor->done = 1;
}
RETURN (*bson != NULL);
}
static mongoc_cursor_t *
_mongoc_cursor_cursorid_clone (const mongoc_cursor_t *cursor)
{
mongoc_cursor_t *clone_;
ENTRY;
clone_ = _mongoc_cursor_clone (cursor);
_mongoc_cursor_cursorid_init (clone_, &cursor->filter);
RETURN (clone_);
}
static mongoc_cursor_interface_t gMongocCursorCursorid = {
_mongoc_cursor_cursorid_clone,
_mongoc_cursor_cursorid_destroy,
NULL,
_mongoc_cursor_cursorid_next,
};
void
_mongoc_cursor_cursorid_init (mongoc_cursor_t *cursor, const bson_t *command)
{
ENTRY;
bson_destroy (&cursor->filter);
bson_copy_to (command, &cursor->filter);
cursor->iface_data = _mongoc_cursor_cursorid_new ();
memcpy (&cursor->iface,
&gMongocCursorCursorid,
sizeof (mongoc_cursor_interface_t));
EXIT;
}
void
_mongoc_cursor_cursorid_init_with_reply (mongoc_cursor_t *cursor,
bson_t *reply,
uint32_t server_id)
{
mongoc_cursor_cursorid_t *cid;
cursor->sent = true;
cursor->server_id = server_id;
cid = (mongoc_cursor_cursorid_t *) cursor->iface_data;
BSON_ASSERT (cid);
bson_destroy (&cid->array);
if (!bson_steal (&cid->array, reply)) {
bson_steal (&cid->array, bson_copy (reply));
}
if (!_mongoc_cursor_cursorid_start_batch (cursor)) {
bson_set_error (&cursor->error,
MONGOC_ERROR_CURSOR,
MONGOC_ERROR_CURSOR_INVALID_CURSOR,
"Couldn't parse cursor document");
}
}
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-cursor-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-cursor-private.h
similarity index 94%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-cursor-private.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-cursor-private.h
index d2e50ca9..43d69f2a 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-cursor-private.h
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-cursor-private.h
@@ -1,217 +1,223 @@
/*
* Copyright 2013 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MONGOC_CURSOR_PRIVATE_H
#define MONGOC_CURSOR_PRIVATE_H
#if !defined(MONGOC_COMPILATION)
#error "Only <mongoc.h> can be included directly."
#endif
#include <bson.h>
#include "mongoc-client.h"
#include "mongoc-buffer-private.h"
#include "mongoc-rpc-private.h"
#include "mongoc-server-stream-private.h"
BSON_BEGIN_DECLS
typedef struct _mongoc_cursor_interface_t mongoc_cursor_interface_t;
struct _mongoc_cursor_interface_t {
mongoc_cursor_t *(*clone) (const mongoc_cursor_t *cursor);
void (*destroy) (mongoc_cursor_t *cursor);
bool (*more) (mongoc_cursor_t *cursor);
bool (*next) (mongoc_cursor_t *cursor, const bson_t **bson);
bool (*error_document) (mongoc_cursor_t *cursor,
bson_error_t *error,
const bson_t **doc);
void (*get_host) (mongoc_cursor_t *cursor, mongoc_host_list_t *host);
};
#define MONGOC_CURSOR_ALLOW_PARTIAL_RESULTS "allowPartialResults"
#define MONGOC_CURSOR_ALLOW_PARTIAL_RESULTS_LEN 19
#define MONGOC_CURSOR_AWAIT_DATA "awaitData"
#define MONGOC_CURSOR_AWAIT_DATA_LEN 9
#define MONGOC_CURSOR_BATCH_SIZE "batchSize"
#define MONGOC_CURSOR_BATCH_SIZE_LEN 9
#define MONGOC_CURSOR_COLLATION "collation"
#define MONGOC_CURSOR_COLLATION_LEN 9
#define MONGOC_CURSOR_COMMENT "comment"
#define MONGOC_CURSOR_COMMENT_LEN 7
#define MONGOC_CURSOR_EXHAUST "exhaust"
#define MONGOC_CURSOR_EXHAUST_LEN 7
#define MONGOC_CURSOR_FILTER "filter"
#define MONGOC_CURSOR_FILTER_LEN 6
#define MONGOC_CURSOR_FIND "find"
#define MONGOC_CURSOR_FIND_LEN 4
#define MONGOC_CURSOR_HINT "hint"
#define MONGOC_CURSOR_HINT_LEN 4
#define MONGOC_CURSOR_LIMIT "limit"
#define MONGOC_CURSOR_LIMIT_LEN 5
#define MONGOC_CURSOR_MAX "max"
#define MONGOC_CURSOR_MAX_LEN 3
#define MONGOC_CURSOR_MAX_AWAIT_TIME_MS "maxAwaitTimeMS"
#define MONGOC_CURSOR_MAX_AWAIT_TIME_MS_LEN 14
#define MONGOC_CURSOR_MAX_SCAN "maxScan"
#define MONGOC_CURSOR_MAX_SCAN_LEN 7
#define MONGOC_CURSOR_MAX_TIME_MS "maxTimeMS"
#define MONGOC_CURSOR_MAX_TIME_MS_LEN 9
#define MONGOC_CURSOR_MIN "min"
#define MONGOC_CURSOR_MIN_LEN 3
#define MONGOC_CURSOR_NO_CURSOR_TIMEOUT "noCursorTimeout"
#define MONGOC_CURSOR_NO_CURSOR_TIMEOUT_LEN 15
#define MONGOC_CURSOR_OPLOG_REPLAY "oplogReplay"
#define MONGOC_CURSOR_OPLOG_REPLAY_LEN 11
#define MONGOC_CURSOR_ORDERBY "orderby"
#define MONGOC_CURSOR_ORDERBY_LEN 7
#define MONGOC_CURSOR_PROJECTION "projection"
#define MONGOC_CURSOR_PROJECTION_LEN 10
#define MONGOC_CURSOR_QUERY "query"
#define MONGOC_CURSOR_QUERY_LEN 5
#define MONGOC_CURSOR_READ_CONCERN "readConcern"
#define MONGOC_CURSOR_READ_CONCERN_LEN 11
#define MONGOC_CURSOR_RETURN_KEY "returnKey"
#define MONGOC_CURSOR_RETURN_KEY_LEN 9
#define MONGOC_CURSOR_SHOW_DISK_LOC "showDiskLoc"
#define MONGOC_CURSOR_SHOW_DISK_LOC_LEN 11
#define MONGOC_CURSOR_SHOW_RECORD_ID "showRecordId"
#define MONGOC_CURSOR_SHOW_RECORD_ID_LEN 12
#define MONGOC_CURSOR_SINGLE_BATCH "singleBatch"
#define MONGOC_CURSOR_SINGLE_BATCH_LEN 11
#define MONGOC_CURSOR_SKIP "skip"
#define MONGOC_CURSOR_SKIP_LEN 4
#define MONGOC_CURSOR_SNAPSHOT "snapshot"
#define MONGOC_CURSOR_SNAPSHOT_LEN 8
#define MONGOC_CURSOR_SORT "sort"
#define MONGOC_CURSOR_SORT_LEN 4
#define MONGOC_CURSOR_TAILABLE "tailable"
#define MONGOC_CURSOR_TAILABLE_LEN 8
struct _mongoc_cursor_t {
mongoc_client_t *client;
uint32_t server_id;
bool server_id_set;
bool slave_ok;
- unsigned is_command : 1;
+ unsigned is_find : 1;
unsigned sent : 1;
unsigned done : 1;
unsigned end_of_event : 1;
unsigned has_fields : 1;
unsigned in_exhaust : 1;
+ unsigned explicit_session : 1;
bson_t filter;
bson_t opts;
+ bson_t reply;
mongoc_read_concern_t *read_concern;
mongoc_read_prefs_t *read_prefs;
-
mongoc_write_concern_t *write_concern;
+ mongoc_client_session_t *client_session;
uint32_t count;
char ns[140];
uint32_t nslen;
uint32_t dblen;
bson_error_t error;
- bson_t error_doc;
/* for OP_QUERY and OP_GETMORE replies*/
mongoc_rpc_t rpc;
mongoc_buffer_t buffer;
bson_reader_t *reader;
const bson_t *current;
mongoc_cursor_interface_t iface;
void *iface_data;
int64_t operation_id;
};
int32_t
-_mongoc_n_return (mongoc_cursor_t *cursor);
+_mongoc_n_return (bool is_initial_request, mongoc_cursor_t *cursor);
void
_mongoc_set_cursor_ns (mongoc_cursor_t *cursor, const char *ns, uint32_t nslen);
bool
_mongoc_cursor_get_opt_bool (const mongoc_cursor_t *cursor, const char *option);
mongoc_cursor_t *
_mongoc_cursor_new_with_opts (mongoc_client_t *client,
const char *db_and_collection,
- bool is_command,
+ bool is_find,
const bson_t *filter,
const bson_t *opts,
const mongoc_read_prefs_t *read_prefs,
const mongoc_read_concern_t *read_concern);
mongoc_cursor_t *
_mongoc_cursor_new (mongoc_client_t *client,
const char *db_and_collection,
mongoc_query_flags_t flags,
uint32_t skip,
int32_t limit,
uint32_t batch_size,
bool is_command,
const bson_t *query,
const bson_t *fields,
const mongoc_read_prefs_t *read_prefs,
const mongoc_read_concern_t *read_concern);
mongoc_cursor_t *
_mongoc_cursor_clone (const mongoc_cursor_t *cursor);
void
_mongoc_cursor_destroy (mongoc_cursor_t *cursor);
bool
_mongoc_read_from_buffer (mongoc_cursor_t *cursor, const bson_t **bson);
bool
_use_find_command (const mongoc_cursor_t *cursor,
const mongoc_server_stream_t *server_stream);
bool
_use_getmore_command (const mongoc_cursor_t *cursor,
const mongoc_server_stream_t *server_stream);
mongoc_server_stream_t *
_mongoc_cursor_fetch_stream (mongoc_cursor_t *cursor);
void
_mongoc_cursor_collection (const mongoc_cursor_t *cursor,
const char **collection,
int *collection_len);
bool
_mongoc_cursor_op_getmore (mongoc_cursor_t *cursor,
mongoc_server_stream_t *server_stream);
bool
_mongoc_cursor_run_command (mongoc_cursor_t *cursor,
const bson_t *command,
+ const bson_t *opts,
bson_t *reply);
bool
_mongoc_cursor_more (mongoc_cursor_t *cursor);
bool
_mongoc_cursor_next (mongoc_cursor_t *cursor, const bson_t **bson);
bool
_mongoc_cursor_error_document (mongoc_cursor_t *cursor,
bson_error_t *error,
const bson_t **doc);
void
_mongoc_cursor_get_host (mongoc_cursor_t *cursor, mongoc_host_list_t *host);
+bool
+_mongoc_cursor_set_opt_int64 (mongoc_cursor_t *cursor,
+ const char *option,
+ int64_t value);
BSON_END_DECLS
#endif /* MONGOC_CURSOR_PRIVATE_H */
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-cursor-transform-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-cursor-transform-private.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-cursor-transform-private.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-cursor-transform-private.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-cursor-transform.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-cursor-transform.c
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-cursor-transform.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-cursor-transform.c
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-cursor.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-cursor.c
similarity index 87%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-cursor.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-cursor.c
index db8deac8..14d76158 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-cursor.c
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-cursor.c
@@ -1,2179 +1,2265 @@
/*
* Copyright 2013 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "mongoc-cursor.h"
#include "mongoc-cursor-private.h"
#include "mongoc-client-private.h"
#include "mongoc-counters-private.h"
#include "mongoc-error.h"
#include "mongoc-log.h"
#include "mongoc-trace-private.h"
#include "mongoc-cursor-cursorid-private.h"
#include "mongoc-read-concern-private.h"
#include "mongoc-util-private.h"
#include "mongoc-write-concern-private.h"
+#include "mongoc-read-prefs-private.h"
#undef MONGOC_LOG_DOMAIN
#define MONGOC_LOG_DOMAIN "cursor"
#define CURSOR_FAILED(cursor_) ((cursor_)->error.domain != 0)
static bool
_translate_query_opt (const char *query_field,
const char **cmd_field,
int *len);
static const bson_t *
_mongoc_cursor_op_query (mongoc_cursor_t *cursor,
mongoc_server_stream_t *server_stream);
static bool
-_mongoc_cursor_prepare_find_command (mongoc_cursor_t *cursor,
- bson_t *command,
- mongoc_server_stream_t *server_stream);
+_mongoc_cursor_prepare_find_command (mongoc_cursor_t *cursor, bson_t *command);
static const bson_t *
_mongoc_cursor_find_command (mongoc_cursor_t *cursor,
mongoc_server_stream_t *server_stream);
-static bool
+bool
_mongoc_cursor_set_opt_int64 (mongoc_cursor_t *cursor,
const char *option,
int64_t value)
{
bson_iter_t iter;
if (bson_iter_init_find (&iter, &cursor->opts, option)) {
if (!BSON_ITER_HOLDS_INT64 (&iter)) {
return false;
}
bson_iter_overwrite_int64 (&iter, value);
return true;
}
return BSON_APPEND_INT64 (&cursor->opts, option, value);
}
static int64_t
_mongoc_cursor_get_opt_int64 (const mongoc_cursor_t *cursor,
const char *option,
int64_t default_value)
{
bson_iter_t iter;
if (bson_iter_init_find (&iter, &cursor->opts, option)) {
return bson_iter_as_int64 (&iter);
}
return default_value;
}
static bool
_mongoc_cursor_set_opt_bool (mongoc_cursor_t *cursor,
const char *option,
bool value)
{
bson_iter_t iter;
if (bson_iter_init_find (&iter, &cursor->opts, option)) {
if (!BSON_ITER_HOLDS_BOOL (&iter)) {
return false;
}
bson_iter_overwrite_bool (&iter, value);
return true;
}
return BSON_APPEND_BOOL (&cursor->opts, option, value);
}
bool
_mongoc_cursor_get_opt_bool (const mongoc_cursor_t *cursor, const char *option)
{
bson_iter_t iter;
if (bson_iter_init_find (&iter, &cursor->opts, option)) {
return bson_iter_as_bool (&iter);
}
return false;
}
int32_t
-_mongoc_n_return (mongoc_cursor_t *cursor)
+_mongoc_n_return (bool is_initial_message, mongoc_cursor_t *cursor)
{
int64_t limit;
int64_t batch_size;
int64_t n_return;
- if (cursor->is_command) {
+ if (!cursor->is_find && is_initial_message) {
/* commands always have n_return of 1 */
return 1;
}
limit = mongoc_cursor_get_limit (cursor);
batch_size = mongoc_cursor_get_batch_size (cursor);
if (limit < 0) {
n_return = limit;
} else if (limit) {
int64_t remaining = limit - cursor->count;
BSON_ASSERT (remaining > 0);
if (batch_size) {
n_return = BSON_MIN (batch_size, remaining);
} else {
/* batch_size 0 means accept the default */
n_return = remaining;
}
} else {
n_return = batch_size;
}
if (n_return < INT32_MIN) {
return INT32_MIN;
} else if (n_return > INT32_MAX) {
return INT32_MAX;
} else {
return (int32_t) n_return;
}
}
void
_mongoc_set_cursor_ns (mongoc_cursor_t *cursor, const char *ns, uint32_t nslen)
{
const char *dot;
bson_strncpy (cursor->ns, ns, sizeof cursor->ns);
cursor->nslen = BSON_MIN (nslen, sizeof cursor->ns);
dot = strstr (cursor->ns, ".");
if (dot) {
cursor->dblen = (uint32_t) (dot - cursor->ns);
} else {
/* a database name with no collection name */
cursor->dblen = cursor->nslen;
}
}
/* return first key beginning with $, or NULL. precondition: bson is valid. */
static const char *
_first_dollar_field (const bson_t *bson)
{
bson_iter_t iter;
const char *key;
BSON_ASSERT (bson_iter_init (&iter, bson));
while (bson_iter_next (&iter)) {
key = bson_iter_key (&iter);
if (key[0] == '$') {
return key;
}
}
return NULL;
}
#define MARK_FAILED(c) \
do { \
(c)->done = true; \
(c)->end_of_event = true; \
(c)->sent = true; \
} while (0)
mongoc_cursor_t *
_mongoc_cursor_new_with_opts (mongoc_client_t *client,
const char *db_and_collection,
- bool is_command,
+ bool is_find,
const bson_t *filter,
const bson_t *opts,
const mongoc_read_prefs_t *read_prefs,
const mongoc_read_concern_t *read_concern)
{
mongoc_cursor_t *cursor;
mongoc_topology_description_type_t td_type;
uint32_t server_id;
bson_error_t validate_err;
const char *dollar_field;
+ bson_iter_t iter;
ENTRY;
BSON_ASSERT (client);
cursor = (mongoc_cursor_t *) bson_malloc0 (sizeof *cursor);
cursor->client = client;
- cursor->is_command = is_command ? 1 : 0;
+ cursor->is_find = is_find ? 1 : 0;
bson_init (&cursor->filter);
bson_init (&cursor->opts);
- bson_init (&cursor->error_doc);
+ bson_init (&cursor->reply);
if (filter) {
if (!bson_validate_with_error (
filter, BSON_VALIDATE_EMPTY_KEYS, &validate_err)) {
MARK_FAILED (cursor);
bson_set_error (&cursor->error,
MONGOC_ERROR_CURSOR,
MONGOC_ERROR_CURSOR_INVALID_CURSOR,
"Invalid filter: %s",
validate_err.message);
GOTO (finish);
}
bson_destroy (&cursor->filter);
bson_copy_to (filter, &cursor->filter);
}
if (opts) {
if (!bson_validate_with_error (
opts, BSON_VALIDATE_EMPTY_KEYS, &validate_err)) {
MARK_FAILED (cursor);
bson_set_error (&cursor->error,
MONGOC_ERROR_CURSOR,
MONGOC_ERROR_CURSOR_INVALID_CURSOR,
"Invalid opts: %s",
validate_err.message);
GOTO (finish);
}
dollar_field = _first_dollar_field (opts);
if (dollar_field) {
MARK_FAILED (cursor);
bson_set_error (&cursor->error,
MONGOC_ERROR_CURSOR,
MONGOC_ERROR_CURSOR_INVALID_CURSOR,
"Cannot use $-modifiers in opts: \"%s\"",
dollar_field);
GOTO (finish);
}
- bson_copy_to_excluding_noinit (opts, &cursor->opts, "serverId", NULL);
+ if (bson_iter_init_find (&iter, opts, "sessionId")) {
+ if (!_mongoc_client_session_from_iter (
+ client, &iter, &cursor->client_session, &cursor->error)) {
+ MARK_FAILED (cursor);
+ GOTO (finish);
+ }
+
+ cursor->explicit_session = 1;
+ }
/* true if there's a valid serverId or no serverId, false on err */
if (!_mongoc_get_server_id_from_opts (opts,
MONGOC_ERROR_CURSOR,
MONGOC_ERROR_CURSOR_INVALID_CURSOR,
&server_id,
&cursor->error)) {
MARK_FAILED (cursor);
GOTO (finish);
}
if (server_id) {
mongoc_cursor_set_hint (cursor, server_id);
}
+
+ bson_copy_to_excluding_noinit (
+ opts, &cursor->opts, "serverId", "sessionId", NULL);
}
cursor->read_prefs = read_prefs
? mongoc_read_prefs_copy (read_prefs)
: mongoc_read_prefs_new (MONGOC_READ_PRIMARY);
cursor->read_concern = read_concern ? mongoc_read_concern_copy (read_concern)
: mongoc_read_concern_new ();
if (db_and_collection) {
_mongoc_set_cursor_ns (
cursor, db_and_collection, (uint32_t) strlen (db_and_collection));
}
if (_mongoc_cursor_get_opt_bool (cursor, MONGOC_CURSOR_EXHAUST)) {
if (_mongoc_cursor_get_opt_int64 (cursor, MONGOC_CURSOR_LIMIT, 0)) {
bson_set_error (&cursor->error,
MONGOC_ERROR_CURSOR,
MONGOC_ERROR_CURSOR_INVALID_CURSOR,
"Cannot specify both 'exhaust' and 'limit'.");
MARK_FAILED (cursor);
GOTO (finish);
}
td_type = _mongoc_topology_get_type (client->topology);
if (td_type == MONGOC_TOPOLOGY_SHARDED) {
bson_set_error (&cursor->error,
MONGOC_ERROR_CURSOR,
MONGOC_ERROR_CURSOR_INVALID_CURSOR,
"Cannot use exhaust cursor with sharded cluster.");
MARK_FAILED (cursor);
GOTO (finish);
}
}
_mongoc_buffer_init (&cursor->buffer, NULL, 0, NULL, NULL);
_mongoc_read_prefs_validate (read_prefs, &cursor->error);
finish:
mongoc_counter_cursors_active_inc ();
RETURN (cursor);
}
mongoc_cursor_t *
_mongoc_cursor_new (mongoc_client_t *client,
const char *db_and_collection,
mongoc_query_flags_t qflags,
uint32_t skip,
int32_t limit,
uint32_t batch_size,
- bool is_command,
+ bool is_find,
const bson_t *query,
const bson_t *fields,
const mongoc_read_prefs_t *read_prefs,
const mongoc_read_concern_t *read_concern)
{
bson_t filter;
bool has_filter = false;
bson_t opts = BSON_INITIALIZER;
bool slave_ok = false;
const char *key;
bson_iter_t iter;
const char *opt_key;
int len;
uint32_t data_len;
const uint8_t *data;
mongoc_cursor_t *cursor;
bson_error_t error = {0};
ENTRY;
BSON_ASSERT (client);
if (query) {
if (bson_has_field (query, "$query")) {
/* like "{$query: {a: 1}, $orderby: {b: 1}, $otherModifier: true}" */
bson_iter_init (&iter, query);
while (bson_iter_next (&iter)) {
key = bson_iter_key (&iter);
if (key[0] != '$') {
bson_set_error (&error,
MONGOC_ERROR_CURSOR,
MONGOC_ERROR_CURSOR_INVALID_CURSOR,
"Cannot mix $query with non-dollar field '%s'",
key);
GOTO (done);
}
if (!strcmp (key, "$query")) {
/* set "filter" to the incoming document's "$query" */
bson_iter_document (&iter, &data_len, &data);
bson_init_static (&filter, data, (size_t) data_len);
has_filter = true;
} else if (_translate_query_opt (key, &opt_key, &len)) {
/* "$orderby" becomes "sort", etc., "$unknown" -> "unknown" */
bson_append_iter (&opts, opt_key, len, &iter);
} else {
/* strip leading "$" */
bson_append_iter (&opts, key + 1, -1, &iter);
}
}
}
}
if (!bson_empty0 (fields)) {
bson_append_document (
&opts, MONGOC_CURSOR_PROJECTION, MONGOC_CURSOR_PROJECTION_LEN, fields);
}
if (skip) {
bson_append_int64 (
&opts, MONGOC_CURSOR_SKIP, MONGOC_CURSOR_SKIP_LEN, skip);
}
if (limit) {
bson_append_int64 (
&opts, MONGOC_CURSOR_LIMIT, MONGOC_CURSOR_LIMIT_LEN, llabs (limit));
if (limit < 0) {
bson_append_bool (&opts,
MONGOC_CURSOR_SINGLE_BATCH,
MONGOC_CURSOR_SINGLE_BATCH_LEN,
true);
}
}
if (batch_size) {
bson_append_int64 (&opts,
MONGOC_CURSOR_BATCH_SIZE,
MONGOC_CURSOR_BATCH_SIZE_LEN,
batch_size);
}
if (qflags & MONGOC_QUERY_SLAVE_OK) {
slave_ok = true;
}
if (qflags & MONGOC_QUERY_TAILABLE_CURSOR) {
bson_append_bool (
&opts, MONGOC_CURSOR_TAILABLE, MONGOC_CURSOR_TAILABLE_LEN, true);
}
if (qflags & MONGOC_QUERY_OPLOG_REPLAY) {
bson_append_bool (&opts,
MONGOC_CURSOR_OPLOG_REPLAY,
MONGOC_CURSOR_OPLOG_REPLAY_LEN,
true);
}
if (qflags & MONGOC_QUERY_NO_CURSOR_TIMEOUT) {
bson_append_bool (&opts,
MONGOC_CURSOR_NO_CURSOR_TIMEOUT,
MONGOC_CURSOR_NO_CURSOR_TIMEOUT_LEN,
true);
}
if (qflags & MONGOC_QUERY_AWAIT_DATA) {
bson_append_bool (
&opts, MONGOC_CURSOR_AWAIT_DATA, MONGOC_CURSOR_AWAIT_DATA_LEN, true);
}
if (qflags & MONGOC_QUERY_EXHAUST) {
bson_append_bool (
&opts, MONGOC_CURSOR_EXHAUST, MONGOC_CURSOR_EXHAUST_LEN, true);
}
if (qflags & MONGOC_QUERY_PARTIAL) {
bson_append_bool (&opts,
MONGOC_CURSOR_ALLOW_PARTIAL_RESULTS,
MONGOC_CURSOR_ALLOW_PARTIAL_RESULTS_LEN,
true);
}
done:
if (error.domain != 0) {
cursor = _mongoc_cursor_new_with_opts (
- client, db_and_collection, is_command, NULL, NULL, NULL, NULL);
+ client, db_and_collection, is_find, NULL, NULL, NULL, NULL);
MARK_FAILED (cursor);
memcpy (&cursor->error, &error, sizeof (bson_error_t));
} else {
cursor = _mongoc_cursor_new_with_opts (client,
db_and_collection,
- is_command,
+ is_find,
has_filter ? &filter : query,
&opts,
read_prefs,
read_concern);
if (slave_ok) {
cursor->slave_ok = true;
}
}
if (has_filter) {
bson_destroy (&filter);
}
bson_destroy (&opts);
RETURN (cursor);
}
void
mongoc_cursor_destroy (mongoc_cursor_t *cursor)
{
ENTRY;
BSON_ASSERT (cursor);
if (cursor->iface.destroy) {
cursor->iface.destroy (cursor);
} else {
_mongoc_cursor_destroy (cursor);
}
EXIT;
}
void
_mongoc_cursor_destroy (mongoc_cursor_t *cursor)
{
char db[MONGOC_NAMESPACE_MAX];
ENTRY;
BSON_ASSERT (cursor);
if (cursor->in_exhaust) {
cursor->client->in_exhaust = false;
if (!cursor->done) {
/* The only way to stop an exhaust cursor is to kill the connection */
- mongoc_cluster_disconnect_node (&cursor->client->cluster,
- cursor->server_id, false, NULL);
+ mongoc_cluster_disconnect_node (
+ &cursor->client->cluster, cursor->server_id, false, NULL);
}
} else if (cursor->rpc.reply.cursor_id) {
bson_strncpy (db, cursor->ns, cursor->dblen + 1);
_mongoc_client_kill_cursor (cursor->client,
cursor->server_id,
cursor->rpc.reply.cursor_id,
cursor->operation_id,
db,
- cursor->ns + cursor->dblen + 1);
+ cursor->ns + cursor->dblen + 1,
+ cursor->client_session);
}
if (cursor->reader) {
bson_reader_destroy (cursor->reader);
cursor->reader = NULL;
}
+ if (cursor->client_session && !cursor->explicit_session) {
+ mongoc_client_session_destroy (cursor->client_session);
+ }
+
_mongoc_buffer_destroy (&cursor->buffer);
mongoc_read_prefs_destroy (cursor->read_prefs);
mongoc_read_concern_destroy (cursor->read_concern);
mongoc_write_concern_destroy (cursor->write_concern);
bson_destroy (&cursor->filter);
bson_destroy (&cursor->opts);
- bson_destroy (&cursor->error_doc);
+ bson_destroy (&cursor->reply);
bson_free (cursor);
mongoc_counter_cursors_active_dec ();
mongoc_counter_cursors_disposed_inc ();
EXIT;
}
mongoc_server_stream_t *
_mongoc_cursor_fetch_stream (mongoc_cursor_t *cursor)
{
mongoc_server_stream_t *server_stream;
ENTRY;
if (cursor->server_id) {
server_stream =
mongoc_cluster_stream_for_server (&cursor->client->cluster,
cursor->server_id,
true /* reconnect_ok */,
&cursor->error);
} else {
server_stream = mongoc_cluster_stream_for_reads (
&cursor->client->cluster, cursor->read_prefs, &cursor->error);
if (server_stream) {
cursor->server_id = server_stream->sd->id;
}
}
RETURN (server_stream);
}
bool
_use_find_command (const mongoc_cursor_t *cursor,
const mongoc_server_stream_t *server_stream)
{
/* Find, getMore And killCursors Commands Spec: "the find command cannot be
* used to execute other commands" and "the find command does not support the
* exhaust flag."
*/
return server_stream->sd->max_wire_version >= WIRE_VERSION_FIND_CMD &&
- !cursor->is_command &&
!_mongoc_cursor_get_opt_bool (cursor, MONGOC_CURSOR_EXHAUST);
}
bool
_use_getmore_command (const mongoc_cursor_t *cursor,
const mongoc_server_stream_t *server_stream)
{
return server_stream->sd->max_wire_version >= WIRE_VERSION_FIND_CMD &&
!_mongoc_cursor_get_opt_bool (cursor, MONGOC_CURSOR_EXHAUST);
}
static const bson_t *
_mongoc_cursor_initial_query (mongoc_cursor_t *cursor)
{
mongoc_server_stream_t *server_stream;
const bson_t *b = NULL;
ENTRY;
BSON_ASSERT (cursor);
server_stream = _mongoc_cursor_fetch_stream (cursor);
if (!server_stream) {
GOTO (done);
}
- if (_use_find_command (cursor, server_stream)) {
+ if (!cursor->is_find) {
+ /* cursor created with deprecated mongoc_client_command() */
+ bson_destroy (&cursor->reply);
+
+ if (_mongoc_cursor_run_command (
+ cursor, &cursor->filter, &cursor->opts, &cursor->reply)) {
+ b = &cursor->reply;
+ }
+
+ cursor->sent = true;
+ } else if (_use_find_command (cursor, server_stream)) {
b = _mongoc_cursor_find_command (cursor, server_stream);
} else {
/* When the user explicitly provides a readConcern -- but the server
* doesn't support readConcern, we must error:
* https://github.com/mongodb/specifications/blob/master/source/read-write-concern/read-write-concern.rst#errors-1
*/
if (cursor->read_concern->level != NULL &&
server_stream->sd->max_wire_version < WIRE_VERSION_READ_CONCERN) {
bson_set_error (&cursor->error,
MONGOC_ERROR_COMMAND,
MONGOC_ERROR_PROTOCOL_BAD_WIRE_VERSION,
"The selected server does not support readConcern");
} else {
b = _mongoc_cursor_op_query (cursor, server_stream);
}
}
done:
/* no-op if server_stream is NULL */
mongoc_server_stream_cleanup (server_stream);
if (!b) {
cursor->done = true;
}
RETURN (b);
}
static bool
-_mongoc_cursor_monitor_legacy_query (mongoc_cursor_t *cursor,
- mongoc_server_stream_t *server_stream,
- const char *cmd_name)
+_mongoc_cursor_monitor_command (mongoc_cursor_t *cursor,
+ mongoc_server_stream_t *server_stream,
+ const bson_t *cmd,
+ const char *cmd_name)
{
- bson_t doc;
mongoc_client_t *client;
mongoc_apm_command_started_t event;
char db[MONGOC_NAMESPACE_MAX];
ENTRY;
client = cursor->client;
if (!client->apm_callbacks.started) {
/* successful */
RETURN (true);
}
- bson_init (&doc);
bson_strncpy (db, cursor->ns, cursor->dblen + 1);
- if (!cursor->is_command) {
- /* simulate a MongoDB 3.2+ "find" command */
- if (!_mongoc_cursor_prepare_find_command (cursor, &doc, server_stream)) {
- /* cursor->error is set */
- bson_destroy (&doc);
- RETURN (false);
- }
- }
-
mongoc_apm_command_started_init (&event,
- cursor->is_command ? &cursor->filter : &doc,
+ cmd,
db,
cmd_name,
client->cluster.request_id,
cursor->operation_id,
&server_stream->sd->host,
server_stream->sd->id,
client->apm_context);
client->apm_callbacks.started (&event);
mongoc_apm_command_started_cleanup (&event);
- bson_destroy (&doc);
RETURN (true);
}
+static bool
+_mongoc_cursor_monitor_legacy_query (mongoc_cursor_t *cursor,
+ mongoc_server_stream_t *server_stream)
+{
+ bson_t doc;
+ mongoc_client_t *client;
+ char db[MONGOC_NAMESPACE_MAX];
+ bool r;
+
+ ENTRY;
+
+ client = cursor->client;
+ if (!client->apm_callbacks.started) {
+ /* successful */
+ RETURN (true);
+ }
+
+ bson_init (&doc);
+ bson_strncpy (db, cursor->ns, cursor->dblen + 1);
+
+ /* simulate a MongoDB 3.2+ "find" command */
+ if (!_mongoc_cursor_prepare_find_command (cursor, &doc)) {
+ /* cursor->error is set */
+ bson_destroy (&doc);
+ RETURN (false);
+ }
+
+ bson_copy_to_excluding_noinit (
+ &cursor->opts, &doc, "serverId", "maxAwaitTimeMS", "sessionId", NULL);
+
+ r = _mongoc_cursor_monitor_command (cursor, server_stream, &doc, "find");
+
+ bson_destroy (&doc);
+
+ RETURN (r);
+}
+
+
/* append array of docs from current cursor batch */
static void
_mongoc_cursor_append_docs_array (mongoc_cursor_t *cursor, bson_t *docs)
{
bool eof = false;
char str[16];
const char *key;
uint32_t i = 0;
size_t keylen;
const bson_t *doc;
while ((doc = bson_reader_read (cursor->reader, &eof))) {
keylen = bson_uint32_to_string (i, &key, str, sizeof str);
bson_append_document (docs, key, (int) keylen, doc);
}
bson_reader_reset (cursor->reader);
}
static void
_mongoc_cursor_monitor_succeeded (mongoc_cursor_t *cursor,
int64_t duration,
bool first_batch,
mongoc_server_stream_t *stream,
const char *cmd_name)
{
+ bson_t docs_array;
mongoc_apm_command_succeeded_t event;
mongoc_client_t *client;
bson_t reply;
bson_t reply_cursor;
ENTRY;
client = cursor->client;
if (!client->apm_callbacks.succeeded) {
EXIT;
}
- if (cursor->is_command) {
- /* cursor is from mongoc_client_command. we're in mongoc_cursor_next. */
- if (!_mongoc_rpc_reply_get_first (&cursor->rpc.reply, &reply)) {
- MONGOC_ERROR ("_mongoc_cursor_monitor_succeeded can't parse reply");
- EXIT;
- }
- } else {
- bson_t docs_array;
-
- /* fake reply to find/getMore command:
- * {ok: 1, cursor: {id: 17, ns: "...", first/nextBatch: [ ... docs ... ]}}
- */
- bson_init (&docs_array);
- _mongoc_cursor_append_docs_array (cursor, &docs_array);
-
- bson_init (&reply);
- bson_append_int32 (&reply, "ok", 2, 1);
- bson_append_document_begin (&reply, "cursor", 6, &reply_cursor);
- bson_append_int64 (&reply_cursor, "id", 2, mongoc_cursor_get_id (cursor));
- bson_append_utf8 (&reply_cursor, "ns", 2, cursor->ns, cursor->nslen);
- bson_append_array (&reply_cursor,
- first_batch ? "firstBatch" : "nextBatch",
- first_batch ? 10 : 9,
- &docs_array);
- bson_append_document_end (&reply, &reply_cursor);
- bson_destroy (&docs_array);
- }
+ /* we sent OP_QUERY/OP_GETMORE, fake a reply to find/getMore command:
+ * {ok: 1, cursor: {id: 17, ns: "...", first/nextBatch: [ ... docs ... ]}}
+ */
+ bson_init (&docs_array);
+ _mongoc_cursor_append_docs_array (cursor, &docs_array);
+
+ bson_init (&reply);
+ bson_append_int32 (&reply, "ok", 2, 1);
+ bson_append_document_begin (&reply, "cursor", 6, &reply_cursor);
+ bson_append_int64 (&reply_cursor, "id", 2, mongoc_cursor_get_id (cursor));
+ bson_append_utf8 (&reply_cursor, "ns", 2, cursor->ns, cursor->nslen);
+ bson_append_array (&reply_cursor,
+ first_batch ? "firstBatch" : "nextBatch",
+ first_batch ? 10 : 9,
+ &docs_array);
+ bson_append_document_end (&reply, &reply_cursor);
+ bson_destroy (&docs_array);
mongoc_apm_command_succeeded_init (&event,
duration,
&reply,
cmd_name,
client->cluster.request_id,
cursor->operation_id,
&stream->sd->host,
stream->sd->id,
client->apm_context);
client->apm_callbacks.succeeded (&event);
mongoc_apm_command_succeeded_cleanup (&event);
bson_destroy (&reply);
EXIT;
}
static void
_mongoc_cursor_monitor_failed (mongoc_cursor_t *cursor,
int64_t duration,
mongoc_server_stream_t *stream,
const char *cmd_name)
{
mongoc_apm_command_failed_t event;
mongoc_client_t *client;
ENTRY;
client = cursor->client;
if (!client->apm_callbacks.failed) {
EXIT;
}
mongoc_apm_command_failed_init (&event,
duration,
cmd_name,
&cursor->error,
client->cluster.request_id,
cursor->operation_id,
&stream->sd->host,
stream->sd->id,
client->apm_context);
client->apm_callbacks.failed (&event);
mongoc_apm_command_failed_cleanup (&event);
EXIT;
}
#define OPT_CHECK(_type) \
do { \
if (!BSON_ITER_HOLDS_##_type (&iter)) { \
bson_set_error (&cursor->error, \
MONGOC_ERROR_COMMAND, \
MONGOC_ERROR_COMMAND_INVALID_ARG, \
"invalid option %s, should be type %s", \
key, \
#_type); \
return NULL; \
} \
} while (false)
#define OPT_CHECK_INT() \
do { \
if (!BSON_ITER_HOLDS_INT (&iter)) { \
bson_set_error (&cursor->error, \
MONGOC_ERROR_COMMAND, \
MONGOC_ERROR_COMMAND_INVALID_ARG, \
"invalid option %s, should be integer", \
key); \
return NULL; \
} \
} while (false)
#define OPT_ERR(_msg) \
do { \
bson_set_error (&cursor->error, \
MONGOC_ERROR_COMMAND, \
MONGOC_ERROR_COMMAND_INVALID_ARG, \
_msg); \
return NULL; \
} while (false)
#define OPT_BSON_ERR(_msg) \
do { \
bson_set_error ( \
&cursor->error, MONGOC_ERROR_BSON, MONGOC_ERROR_BSON_INVALID, _msg); \
return NULL; \
} while (false)
#define OPT_FLAG(_flag) \
do { \
OPT_CHECK (BOOL); \
if (bson_iter_as_bool (&iter)) { \
*flags |= _flag; \
} \
} while (false)
#define PUSH_DOLLAR_QUERY() \
do { \
if (!pushed_dollar_query) { \
pushed_dollar_query = true; \
bson_append_document (query, "$query", 6, &cursor->filter); \
} \
} while (false)
#define OPT_SUBDOCUMENT(_opt_name, _legacy_name) \
do { \
OPT_CHECK (DOCUMENT); \
bson_iter_document (&iter, &len, &data); \
if (!bson_init_static (&subdocument, data, (size_t) len)) { \
OPT_BSON_ERR ("Invalid '" #_opt_name "' subdocument in 'opts'."); \
} \
BSON_APPEND_DOCUMENT (query, "$" #_legacy_name, &subdocument); \
} while (false)
#define ADD_FLAG(_flags, _value) \
do { \
if (!BSON_ITER_HOLDS_BOOL (&iter)) { \
bson_set_error (&cursor->error, \
MONGOC_ERROR_COMMAND, \
MONGOC_ERROR_COMMAND_INVALID_ARG, \
"invalid option %s, should be type bool", \
key); \
return false; \
} \
if (bson_iter_as_bool (&iter)) { \
*_flags |= _value; \
} \
} while (false);
static bool
_mongoc_cursor_flags (mongoc_cursor_t *cursor,
mongoc_server_stream_t *stream,
mongoc_query_flags_t *flags /* OUT */)
{
bson_iter_t iter;
const char *key;
*flags = MONGOC_QUERY_NONE;
if (!bson_iter_init (&iter, &cursor->opts)) {
bson_set_error (&cursor->error,
MONGOC_ERROR_BSON,
MONGOC_ERROR_BSON_INVALID,
"Invalid 'opts' parameter.");
return false;
}
while (bson_iter_next (&iter)) {
key = bson_iter_key (&iter);
if (!strcmp (key, MONGOC_CURSOR_ALLOW_PARTIAL_RESULTS)) {
ADD_FLAG (flags, MONGOC_QUERY_PARTIAL);
} else if (!strcmp (key, MONGOC_CURSOR_AWAIT_DATA)) {
ADD_FLAG (flags, MONGOC_QUERY_AWAIT_DATA);
} else if (!strcmp (key, MONGOC_CURSOR_EXHAUST)) {
ADD_FLAG (flags, MONGOC_QUERY_EXHAUST);
} else if (!strcmp (key, MONGOC_CURSOR_NO_CURSOR_TIMEOUT)) {
ADD_FLAG (flags, MONGOC_QUERY_NO_CURSOR_TIMEOUT);
} else if (!strcmp (key, MONGOC_CURSOR_OPLOG_REPLAY)) {
ADD_FLAG (flags, MONGOC_QUERY_OPLOG_REPLAY);
} else if (!strcmp (key, MONGOC_CURSOR_TAILABLE)) {
ADD_FLAG (flags, MONGOC_QUERY_TAILABLE_CURSOR);
}
}
if (cursor->slave_ok) {
*flags |= MONGOC_QUERY_SLAVE_OK;
} else if (cursor->server_id_set &&
(stream->topology_type == MONGOC_TOPOLOGY_RS_WITH_PRIMARY ||
stream->topology_type == MONGOC_TOPOLOGY_RS_NO_PRIMARY) &&
stream->sd->type != MONGOC_SERVER_RS_PRIMARY) {
*flags |= MONGOC_QUERY_SLAVE_OK;
}
return true;
}
static bson_t *
_mongoc_cursor_parse_opts_for_op_query (mongoc_cursor_t *cursor,
mongoc_server_stream_t *stream,
bson_t *query /* OUT */,
bson_t *fields /* OUT */,
mongoc_query_flags_t *flags /* OUT */,
int32_t *skip /* OUT */)
{
bool pushed_dollar_query;
bson_iter_t iter;
uint32_t len;
const uint8_t *data;
bson_t subdocument;
const char *key;
char *dollar_modifier;
*flags = MONGOC_QUERY_NONE;
*skip = 0;
/* assume we'll send filter straight to server, like "{a: 1}". if we find an
* opt we must add, like "sort", we push the query like "$query: {a: 1}",
* then add a query modifier for the option, in this example "$orderby".
*/
pushed_dollar_query = false;
if (!bson_iter_init (&iter, &cursor->opts)) {
OPT_BSON_ERR ("Invalid 'opts' parameter.");
}
while (bson_iter_next (&iter)) {
key = bson_iter_key (&iter);
/* most common options first */
if (!strcmp (key, MONGOC_CURSOR_PROJECTION)) {
OPT_CHECK (DOCUMENT);
bson_iter_document (&iter, &len, &data);
if (!bson_init_static (&subdocument, data, (size_t) len)) {
OPT_BSON_ERR ("Invalid 'projection' subdocument in 'opts'.");
}
bson_copy_to (&subdocument, fields);
} else if (!strcmp (key, MONGOC_CURSOR_SORT)) {
PUSH_DOLLAR_QUERY ();
OPT_SUBDOCUMENT (sort, orderby);
} else if (!strcmp (key, MONGOC_CURSOR_SKIP)) {
OPT_CHECK_INT ();
*skip = (int32_t) bson_iter_as_int64 (&iter);
}
/* the rest of the options, alphabetically */
else if (!strcmp (key, MONGOC_CURSOR_ALLOW_PARTIAL_RESULTS)) {
OPT_FLAG (MONGOC_QUERY_PARTIAL);
} else if (!strcmp (key, MONGOC_CURSOR_AWAIT_DATA)) {
OPT_FLAG (MONGOC_QUERY_AWAIT_DATA);
} else if (!strcmp (key, MONGOC_CURSOR_COMMENT)) {
OPT_CHECK (UTF8);
PUSH_DOLLAR_QUERY ();
BSON_APPEND_UTF8 (query, "$comment", bson_iter_utf8 (&iter, NULL));
} else if (!strcmp (key, MONGOC_CURSOR_HINT)) {
if (BSON_ITER_HOLDS_UTF8 (&iter)) {
PUSH_DOLLAR_QUERY ();
BSON_APPEND_UTF8 (query, "$hint", bson_iter_utf8 (&iter, NULL));
} else if (BSON_ITER_HOLDS_DOCUMENT (&iter)) {
PUSH_DOLLAR_QUERY ();
OPT_SUBDOCUMENT (hint, hint);
} else {
OPT_ERR ("Wrong type for 'hint' field in 'opts'.");
}
} else if (!strcmp (key, MONGOC_CURSOR_MAX)) {
PUSH_DOLLAR_QUERY ();
OPT_SUBDOCUMENT (max, max);
} else if (!strcmp (key, MONGOC_CURSOR_MAX_SCAN)) {
OPT_CHECK_INT ();
PUSH_DOLLAR_QUERY ();
BSON_APPEND_INT64 (query, "$maxScan", bson_iter_as_int64 (&iter));
} else if (!strcmp (key, MONGOC_CURSOR_MAX_TIME_MS)) {
OPT_CHECK_INT ();
PUSH_DOLLAR_QUERY ();
BSON_APPEND_INT64 (query, "$maxTimeMS", bson_iter_as_int64 (&iter));
} else if (!strcmp (key, MONGOC_CURSOR_MIN)) {
PUSH_DOLLAR_QUERY ();
OPT_SUBDOCUMENT (min, min);
} else if (!strcmp (key, MONGOC_CURSOR_READ_CONCERN)) {
OPT_ERR ("Set readConcern on client, database, or collection,"
" not in a query.");
} else if (!strcmp (key, MONGOC_CURSOR_RETURN_KEY)) {
OPT_CHECK (BOOL);
PUSH_DOLLAR_QUERY ();
BSON_APPEND_BOOL (query, "$returnKey", bson_iter_as_bool (&iter));
} else if (!strcmp (key, MONGOC_CURSOR_SHOW_RECORD_ID)) {
OPT_CHECK (BOOL);
PUSH_DOLLAR_QUERY ();
BSON_APPEND_BOOL (query, "$showDiskLoc", bson_iter_as_bool (&iter));
} else if (!strcmp (key, MONGOC_CURSOR_SNAPSHOT)) {
OPT_CHECK (BOOL);
PUSH_DOLLAR_QUERY ();
BSON_APPEND_BOOL (query, "$snapshot", bson_iter_as_bool (&iter));
} else if (!strcmp (key, MONGOC_CURSOR_COLLATION)) {
bson_set_error (&cursor->error,
- MONGOC_ERROR_CURSOR,
+ MONGOC_ERROR_COMMAND,
MONGOC_ERROR_PROTOCOL_BAD_WIRE_VERSION,
- "Collation is not supported by this server");
+ "The selected server does not support collation");
return NULL;
}
/* singleBatch limit and batchSize are handled in _mongoc_n_return,
* exhaust noCursorTimeout oplogReplay tailable in _mongoc_cursor_flags
* maxAwaitTimeMS is handled in _mongoc_cursor_prepare_getmore_command
+ * sessionId is used to retrieve the mongoc_client_session_t
*/
else if (strcmp (key, MONGOC_CURSOR_SINGLE_BATCH) &&
strcmp (key, MONGOC_CURSOR_LIMIT) &&
strcmp (key, MONGOC_CURSOR_BATCH_SIZE) &&
strcmp (key, MONGOC_CURSOR_EXHAUST) &&
strcmp (key, MONGOC_CURSOR_NO_CURSOR_TIMEOUT) &&
strcmp (key, MONGOC_CURSOR_OPLOG_REPLAY) &&
strcmp (key, MONGOC_CURSOR_TAILABLE) &&
strcmp (key, MONGOC_CURSOR_MAX_AWAIT_TIME_MS)) {
/* pass unrecognized options to server, prefixed with $ */
PUSH_DOLLAR_QUERY ();
dollar_modifier = bson_strdup_printf ("$%s", key);
bson_append_iter (query, dollar_modifier, -1, &iter);
bson_free (dollar_modifier);
}
}
if (!_mongoc_cursor_flags (cursor, stream, flags)) {
/* cursor->error is set */
return NULL;
}
return pushed_dollar_query ? query : &cursor->filter;
}
#undef OPT_CHECK
#undef OPT_ERR
#undef OPT_BSON_ERR
#undef OPT_FLAG
#undef OPT_SUBDOCUMENT
static const bson_t *
_mongoc_cursor_op_query (mongoc_cursor_t *cursor,
mongoc_server_stream_t *server_stream)
{
int64_t started;
uint32_t request_id;
mongoc_rpc_t rpc;
- const char *cmd_name; /* for command monitoring */
const bson_t *query_ptr;
bson_t query = BSON_INITIALIZER;
bson_t fields = BSON_INITIALIZER;
mongoc_query_flags_t flags;
- mongoc_apply_read_prefs_result_t result = READ_PREFS_RESULT_INIT;
+ mongoc_assemble_query_result_t result = ASSEMBLE_QUERY_RESULT_INIT;
const bson_t *ret = NULL;
bool succeeded = false;
ENTRY;
+ /* cursors created by mongoc_client_command don't use this function */
+ BSON_ASSERT (cursor->is_find);
+
started = bson_get_monotonic_time ();
cursor->sent = true;
cursor->operation_id = ++cursor->client->cluster.operation_id;
request_id = ++cursor->client->cluster.request_id;
rpc.header.msg_len = 0;
rpc.header.request_id = request_id;
rpc.header.response_to = 0;
rpc.header.opcode = MONGOC_OPCODE_QUERY;
rpc.query.flags = MONGOC_QUERY_NONE;
rpc.query.collection = cursor->ns;
rpc.query.skip = 0;
rpc.query.n_return = 0;
rpc.query.fields = NULL;
- if (cursor->is_command) {
- /* "filter" isn't a query, it's like {commandName: ... }*/
- cmd_name = _mongoc_get_command_name (&cursor->filter);
- BSON_ASSERT (cmd_name);
- } else {
- cmd_name = "find";
- }
-
query_ptr = _mongoc_cursor_parse_opts_for_op_query (
cursor, server_stream, &query, &fields, &flags, &rpc.query.skip);
if (!query_ptr) {
/* invalid opts. cursor->error is set */
GOTO (done);
}
- apply_read_preferences (
+ assemble_query (
cursor->read_prefs, server_stream, query_ptr, flags, &result);
- rpc.query.query = bson_get_data (result.query_with_read_prefs);
+ rpc.query.query = bson_get_data (result.assembled_query);
rpc.query.flags = result.flags;
- rpc.query.n_return = _mongoc_n_return (cursor);
+ rpc.query.n_return = _mongoc_n_return (true, cursor);
if (!bson_empty (&fields)) {
rpc.query.fields = bson_get_data (&fields);
}
- if (!_mongoc_cursor_monitor_legacy_query (cursor, server_stream, cmd_name)) {
+ /* cursor from mongoc_collection_find[_with_opts] is about to send its
+ * initial OP_QUERY to pre-3.2 MongoDB */
+ if (!_mongoc_cursor_monitor_legacy_query (cursor, server_stream)) {
GOTO (done);
}
- if (!mongoc_cluster_sendv_to_server (&cursor->client->cluster,
- &rpc,
- server_stream,
- NULL,
- &cursor->error)) {
+ if (!mongoc_cluster_legacy_rpc_sendv_to_server (
+ &cursor->client->cluster, &rpc, server_stream, &cursor->error)) {
GOTO (done);
}
_mongoc_buffer_clear (&cursor->buffer, false);
if (!_mongoc_client_recv (cursor->client,
&cursor->rpc,
&cursor->buffer,
server_stream,
&cursor->error)) {
GOTO (done);
}
if (cursor->rpc.header.opcode != MONGOC_OPCODE_REPLY) {
bson_set_error (&cursor->error,
MONGOC_ERROR_PROTOCOL,
MONGOC_ERROR_PROTOCOL_INVALID_REPLY,
"Invalid opcode. Expected %d, got %d.",
MONGOC_OPCODE_REPLY,
cursor->rpc.header.opcode);
GOTO (done);
}
if (cursor->rpc.header.response_to != request_id) {
bson_set_error (&cursor->error,
MONGOC_ERROR_PROTOCOL,
MONGOC_ERROR_PROTOCOL_INVALID_REPLY,
"Invalid response_to for query. Expected %d, got %d.",
request_id,
cursor->rpc.header.response_to);
GOTO (done);
}
if (!_mongoc_rpc_check_ok (&cursor->rpc,
- (bool) cursor->is_command,
cursor->client->error_api_version,
&cursor->error,
- &cursor->error_doc)) {
+ &cursor->reply)) {
GOTO (done);
}
if (cursor->reader) {
bson_reader_destroy (cursor->reader);
}
cursor->reader = bson_reader_new_from_data (
cursor->rpc.reply.documents, (size_t) cursor->rpc.reply.documents_len);
if (_mongoc_cursor_get_opt_bool (cursor, MONGOC_CURSOR_EXHAUST)) {
cursor->in_exhaust = true;
cursor->client->in_exhaust = true;
}
_mongoc_cursor_monitor_succeeded (cursor,
bson_get_monotonic_time () - started,
true, /* first_batch */
server_stream,
- cmd_name);
+ "find");
cursor->done = false;
cursor->end_of_event = false;
succeeded = true;
_mongoc_read_from_buffer (cursor, &ret);
done:
if (!succeeded) {
_mongoc_cursor_monitor_failed (
- cursor, bson_get_monotonic_time () - started, server_stream, cmd_name);
+ cursor, bson_get_monotonic_time () - started, server_stream, "find");
}
- apply_read_prefs_result_cleanup (&result);
+ assemble_query_result_cleanup (&result);
bson_destroy (&query);
bson_destroy (&fields);
if (!ret) {
cursor->done = true;
}
RETURN (ret);
}
bool
_mongoc_cursor_run_command (mongoc_cursor_t *cursor,
const bson_t *command,
+ const bson_t *opts,
bson_t *reply)
{
mongoc_cluster_t *cluster;
mongoc_server_stream_t *server_stream;
+ bson_iter_t iter;
mongoc_cmd_parts_t parts;
+ const char *cmd_name;
+ bool is_primary;
+ mongoc_read_prefs_t *prefs = NULL;
char db[MONGOC_NAMESPACE_MAX];
+ mongoc_session_opt_t *session_opts;
bool ret = false;
ENTRY;
cluster = &cursor->client->cluster;
- mongoc_cmd_parts_init (&parts, db, MONGOC_QUERY_NONE, command);
+ mongoc_cmd_parts_init (
+ &parts, cursor->client, db, MONGOC_QUERY_NONE, command);
+ parts.is_read_command = true;
parts.read_prefs = cursor->read_prefs;
parts.assembled.operation_id = cursor->operation_id;
server_stream = _mongoc_cursor_fetch_stream (cursor);
if (!server_stream) {
GOTO (done);
}
+ if (opts) {
+ bson_iter_init (&iter, opts);
+ if (!mongoc_cmd_parts_append_opts (&parts,
+ &iter,
+ server_stream->sd->max_wire_version,
+ &cursor->error)) {
+ GOTO (done);
+ }
+ }
+
+ if (parts.assembled.session) {
+ /* initial query/aggregate/etc, and opts contains "sessionId" */
+ BSON_ASSERT (!cursor->client_session);
+ BSON_ASSERT (!cursor->explicit_session);
+ cursor->client_session = parts.assembled.session;
+ cursor->explicit_session = 1;
+ } else if (cursor->client_session) {
+ /* a getMore with implicit or explicit session already acquired */
+ mongoc_cmd_parts_set_session (&parts, cursor->client_session);
+ } else {
+ /* try to create an implicit session. not causally consistent. we keep
+ * the session but leave cursor->explicit_session as 0, so we use the
+ * same lsid for getMores but destroy the session when the cursor dies.
+ */
+ session_opts = mongoc_session_opts_new ();
+ mongoc_session_opts_set_causal_consistency (session_opts, false);
+ /* returns NULL if sessions aren't supported. ignore errors. */
+ cursor->client_session =
+ mongoc_client_start_session (cursor->client, session_opts, NULL);
+ mongoc_cmd_parts_set_session (&parts, cursor->client_session);
+ mongoc_session_opts_destroy (session_opts);
+ }
+
+ if (cursor->read_concern->level) {
+ bson_concat (&parts.read_concern_document,
+ _mongoc_read_concern_get_bson (cursor->read_concern));
+ }
+
bson_strncpy (db, cursor->ns, cursor->dblen + 1);
parts.assembled.db_name = db;
if (!_mongoc_cursor_flags (cursor, server_stream, &parts.user_query_flags)) {
GOTO (done);
}
+ /* we might use mongoc_cursor_set_hint to target a secondary but have no
+ * read preference, so the secondary rejects the read. same if we have a
+ * direct connection to a secondary (topology type "single"). with
+ * OP_QUERY we handle this by setting slaveOk. here we use $readPreference.
+ */
+ cmd_name = _mongoc_get_command_name (command);
+ is_primary =
+ !cursor->read_prefs || cursor->read_prefs->mode == MONGOC_READ_PRIMARY;
+
+ if (strcmp (cmd_name, "getMore") != 0 &&
+ server_stream->sd->max_wire_version >= WIRE_VERSION_OP_MSG &&
+ is_primary && parts.user_query_flags & MONGOC_QUERY_SLAVE_OK) {
+ parts.read_prefs = prefs =
+ mongoc_read_prefs_new (MONGOC_READ_PRIMARY_PREFERRED);
+ } else {
+ parts.read_prefs = cursor->read_prefs;
+ }
+
if (cursor->write_concern &&
!mongoc_write_concern_is_default (cursor->write_concern) &&
server_stream->sd->max_wire_version >= WIRE_VERSION_CMD_WRITE_CONCERN) {
mongoc_write_concern_append (cursor->write_concern, &parts.extra);
}
+ if (!mongoc_cmd_parts_assemble (&parts, server_stream, &cursor->error)) {
+ _mongoc_bson_init_if_set (reply);
+ GOTO (done);
+ }
+
ret = mongoc_cluster_run_command_monitored (
- cluster, &parts, server_stream, reply, &cursor->error);
+ cluster, &parts.assembled, reply, &cursor->error);
/* Read and Write Concern Spec: "Drivers SHOULD parse server replies for a
* "writeConcernError" field and report the error only in command-specific
* helper methods that take a separate write concern parameter or an options
* parameter that may contain a write concern option.
*
* Only command helpers with names like "_with_write_concern" can create
* cursors with a non-NULL write_concern field.
*/
if (ret && cursor->write_concern) {
ret = !_mongoc_parse_wc_err (reply, &cursor->error);
}
done:
mongoc_server_stream_cleanup (server_stream);
mongoc_cmd_parts_cleanup (&parts);
+ mongoc_read_prefs_destroy (prefs);
return ret;
}
static bool
_translate_query_opt (const char *query_field, const char **cmd_field, int *len)
{
if (query_field[0] != '$') {
*cmd_field = query_field;
*len = -1;
return true;
}
/* strip the leading '$' */
query_field++;
if (!strcmp (MONGOC_CURSOR_ORDERBY, query_field)) {
*cmd_field = MONGOC_CURSOR_SORT;
*len = MONGOC_CURSOR_SORT_LEN;
} else if (!strcmp (MONGOC_CURSOR_SHOW_DISK_LOC,
query_field)) { /* <= MongoDb 3.0 */
*cmd_field = MONGOC_CURSOR_SHOW_RECORD_ID;
*len = MONGOC_CURSOR_SHOW_RECORD_ID_LEN;
} else if (!strcmp (MONGOC_CURSOR_HINT, query_field)) {
*cmd_field = MONGOC_CURSOR_HINT;
*len = MONGOC_CURSOR_HINT_LEN;
} else if (!strcmp (MONGOC_CURSOR_COMMENT, query_field)) {
*cmd_field = MONGOC_CURSOR_COMMENT;
*len = MONGOC_CURSOR_COMMENT_LEN;
} else if (!strcmp (MONGOC_CURSOR_MAX_SCAN, query_field)) {
*cmd_field = MONGOC_CURSOR_MAX_SCAN;
*len = MONGOC_CURSOR_MAX_SCAN_LEN;
} else if (!strcmp (MONGOC_CURSOR_MAX_TIME_MS, query_field)) {
*cmd_field = MONGOC_CURSOR_MAX_TIME_MS;
*len = MONGOC_CURSOR_MAX_TIME_MS_LEN;
} else if (!strcmp (MONGOC_CURSOR_MAX, query_field)) {
*cmd_field = MONGOC_CURSOR_MAX;
*len = MONGOC_CURSOR_MAX_LEN;
} else if (!strcmp (MONGOC_CURSOR_MIN, query_field)) {
*cmd_field = MONGOC_CURSOR_MIN;
*len = MONGOC_CURSOR_MIN_LEN;
} else if (!strcmp (MONGOC_CURSOR_RETURN_KEY, query_field)) {
*cmd_field = MONGOC_CURSOR_RETURN_KEY;
*len = MONGOC_CURSOR_RETURN_KEY_LEN;
} else if (!strcmp (MONGOC_CURSOR_SNAPSHOT, query_field)) {
*cmd_field = MONGOC_CURSOR_SNAPSHOT;
*len = MONGOC_CURSOR_SNAPSHOT_LEN;
} else {
/* not a special command field, must be a query operator like $or */
return false;
}
return true;
}
void
_mongoc_cursor_collection (const mongoc_cursor_t *cursor,
const char **collection,
int *collection_len)
{
/* ns is like "db.collection". Collection name is located past the ".". */
*collection = cursor->ns + (cursor->dblen + 1);
/* Collection name's length is ns length, minus length of db name and ".". */
*collection_len = cursor->nslen - cursor->dblen - 1;
BSON_ASSERT (*collection_len > 0);
}
static bool
-_mongoc_cursor_prepare_find_command (mongoc_cursor_t *cursor,
- bson_t *command,
- mongoc_server_stream_t *server_stream)
+_mongoc_cursor_prepare_find_command (mongoc_cursor_t *cursor, bson_t *command)
{
const char *collection;
int collection_len;
- bson_iter_t iter;
_mongoc_cursor_collection (cursor, &collection, &collection_len);
bson_append_utf8 (command,
MONGOC_CURSOR_FIND,
MONGOC_CURSOR_FIND_LEN,
collection,
collection_len);
bson_append_document (
command, MONGOC_CURSOR_FILTER, MONGOC_CURSOR_FILTER_LEN, &cursor->filter);
- bson_iter_init (&iter, &cursor->opts);
-
- while (bson_iter_next (&iter)) {
- /* don't append "maxAwaitTimeMS" */
- if (!strcmp (bson_iter_key (&iter), MONGOC_CURSOR_COLLATION) &&
- server_stream->sd->max_wire_version < WIRE_VERSION_COLLATION) {
- bson_set_error (&cursor->error,
- MONGOC_ERROR_CURSOR,
- MONGOC_ERROR_PROTOCOL_BAD_WIRE_VERSION,
- "Collation is not supported by this server");
- MARK_FAILED (cursor);
- return false;
- } else if (strcmp (bson_iter_key (&iter),
- MONGOC_CURSOR_MAX_AWAIT_TIME_MS)) {
- if (!bson_append_iter (command, bson_iter_key (&iter), -1, &iter)) {
- bson_set_error (&cursor->error,
- MONGOC_ERROR_BSON,
- MONGOC_ERROR_BSON_INVALID,
- "Cursor opts too large");
- MARK_FAILED (cursor);
- return false;
- }
- }
- }
-
- if (cursor->read_concern->level != NULL) {
- const bson_t *read_concern_bson;
-
- read_concern_bson = _mongoc_read_concern_get_bson (cursor->read_concern);
- bson_append_document (command,
- MONGOC_CURSOR_READ_CONCERN,
- MONGOC_CURSOR_READ_CONCERN_LEN,
- read_concern_bson);
- }
return true;
}
static const bson_t *
_mongoc_cursor_find_command (mongoc_cursor_t *cursor,
mongoc_server_stream_t *server_stream)
{
bson_t command = BSON_INITIALIZER;
const bson_t *bson = NULL;
ENTRY;
- if (!_mongoc_cursor_prepare_find_command (cursor, &command, server_stream)) {
+ /* cursors created by mongoc_client_command don't use this function */
+ BSON_ASSERT (cursor->is_find);
+
+ if (!_mongoc_cursor_prepare_find_command (cursor, &command)) {
RETURN (NULL);
}
_mongoc_cursor_cursorid_init (cursor, &command);
bson_destroy (&command);
BSON_ASSERT (cursor->iface.next);
_mongoc_cursor_cursorid_next (cursor, &bson);
RETURN (bson);
}
static const bson_t *
_mongoc_cursor_get_more (mongoc_cursor_t *cursor)
{
mongoc_server_stream_t *server_stream;
const bson_t *b = NULL;
ENTRY;
BSON_ASSERT (cursor);
server_stream = _mongoc_cursor_fetch_stream (cursor);
if (!server_stream) {
GOTO (failure);
}
if (!cursor->in_exhaust && !cursor->rpc.reply.cursor_id) {
bson_set_error (&cursor->error,
MONGOC_ERROR_CURSOR,
MONGOC_ERROR_CURSOR_INVALID_CURSOR,
"No valid cursor was provided.");
GOTO (failure);
}
if (!_mongoc_cursor_op_getmore (cursor, server_stream)) {
GOTO (failure);
}
mongoc_server_stream_cleanup (server_stream);
if (cursor->reader) {
_mongoc_read_from_buffer (cursor, &b);
}
RETURN (b);
failure:
cursor->done = true;
mongoc_server_stream_cleanup (server_stream);
RETURN (NULL);
}
static bool
_mongoc_cursor_monitor_legacy_get_more (mongoc_cursor_t *cursor,
mongoc_server_stream_t *server_stream)
{
bson_t doc;
char db[MONGOC_NAMESPACE_MAX];
mongoc_client_t *client;
mongoc_apm_command_started_t event;
ENTRY;
client = cursor->client;
if (!client->apm_callbacks.started) {
/* successful */
RETURN (true);
}
bson_init (&doc);
if (!_mongoc_cursor_prepare_getmore_command (cursor, &doc)) {
bson_destroy (&doc);
RETURN (false);
}
bson_strncpy (db, cursor->ns, cursor->dblen + 1);
mongoc_apm_command_started_init (&event,
&doc,
db,
"getMore",
client->cluster.request_id,
cursor->operation_id,
&server_stream->sd->host,
server_stream->sd->id,
client->apm_context);
client->apm_callbacks.started (&event);
mongoc_apm_command_started_cleanup (&event);
bson_destroy (&doc);
RETURN (true);
}
bool
_mongoc_cursor_op_getmore (mongoc_cursor_t *cursor,
mongoc_server_stream_t *server_stream)
{
int64_t started;
mongoc_rpc_t rpc;
uint32_t request_id;
mongoc_cluster_t *cluster;
mongoc_query_flags_t flags;
ENTRY;
started = bson_get_monotonic_time ();
cluster = &cursor->client->cluster;
if (!_mongoc_cursor_flags (cursor, server_stream, &flags)) {
GOTO (fail);
}
if (cursor->in_exhaust) {
request_id = (uint32_t) cursor->rpc.header.request_id;
} else {
request_id = ++cluster->request_id;
rpc.get_more.cursor_id = cursor->rpc.reply.cursor_id;
rpc.header.msg_len = 0;
rpc.header.request_id = request_id;
rpc.header.response_to = 0;
rpc.header.opcode = MONGOC_OPCODE_GET_MORE;
rpc.get_more.zero = 0;
rpc.get_more.collection = cursor->ns;
if (flags & MONGOC_QUERY_TAILABLE_CURSOR) {
rpc.get_more.n_return = 0;
} else {
- rpc.get_more.n_return = _mongoc_n_return (cursor);
+ rpc.get_more.n_return = _mongoc_n_return (false, cursor);
}
if (!_mongoc_cursor_monitor_legacy_get_more (cursor, server_stream)) {
GOTO (fail);
}
- if (!mongoc_cluster_sendv_to_server (
- cluster, &rpc, server_stream, NULL, &cursor->error)) {
+ if (!mongoc_cluster_legacy_rpc_sendv_to_server (
+ cluster, &rpc, server_stream, &cursor->error)) {
GOTO (fail);
}
}
_mongoc_buffer_clear (&cursor->buffer, false);
if (!_mongoc_client_recv (cursor->client,
&cursor->rpc,
&cursor->buffer,
server_stream,
&cursor->error)) {
GOTO (fail);
}
if (cursor->rpc.header.opcode != MONGOC_OPCODE_REPLY) {
bson_set_error (&cursor->error,
MONGOC_ERROR_PROTOCOL,
MONGOC_ERROR_PROTOCOL_INVALID_REPLY,
"Invalid opcode. Expected %d, got %d.",
MONGOC_OPCODE_REPLY,
cursor->rpc.header.opcode);
GOTO (fail);
}
if (cursor->rpc.header.response_to != request_id) {
bson_set_error (&cursor->error,
MONGOC_ERROR_PROTOCOL,
MONGOC_ERROR_PROTOCOL_INVALID_REPLY,
"Invalid response_to for getmore. Expected %d, got %d.",
request_id,
cursor->rpc.header.response_to);
GOTO (fail);
}
if (!_mongoc_rpc_check_ok (&cursor->rpc,
- false /* is_command */,
cursor->client->error_api_version,
&cursor->error,
- &cursor->error_doc)) {
+ &cursor->reply)) {
GOTO (fail);
}
if (cursor->reader) {
bson_reader_destroy (cursor->reader);
}
cursor->reader = bson_reader_new_from_data (
cursor->rpc.reply.documents, (size_t) cursor->rpc.reply.documents_len);
_mongoc_cursor_monitor_succeeded (cursor,
bson_get_monotonic_time () - started,
false, /* not first batch */
server_stream,
"getMore");
RETURN (true);
fail:
_mongoc_cursor_monitor_failed (
cursor, bson_get_monotonic_time () - started, server_stream, "getMore");
RETURN (false);
}
bool
mongoc_cursor_error (mongoc_cursor_t *cursor, bson_error_t *error)
{
ENTRY;
RETURN (mongoc_cursor_error_document (cursor, error, NULL));
}
bool
mongoc_cursor_error_document (mongoc_cursor_t *cursor,
bson_error_t *error,
const bson_t **doc)
{
bool ret;
ENTRY;
BSON_ASSERT (cursor);
if (cursor->iface.error_document) {
ret = cursor->iface.error_document (cursor, error, doc);
} else {
ret = _mongoc_cursor_error_document (cursor, error, doc);
}
RETURN (ret);
}
bool
_mongoc_cursor_error_document (mongoc_cursor_t *cursor,
bson_error_t *error,
const bson_t **doc)
{
ENTRY;
BSON_ASSERT (cursor);
if (BSON_UNLIKELY (CURSOR_FAILED (cursor))) {
bson_set_error (error,
cursor->error.domain,
cursor->error.code,
"%s",
cursor->error.message);
if (doc) {
- *doc = &cursor->error_doc;
+ *doc = &cursor->reply;
}
RETURN (true);
}
if (doc) {
*doc = NULL;
}
RETURN (false);
}
bool
mongoc_cursor_next (mongoc_cursor_t *cursor, const bson_t **bson)
{
bool ret;
ENTRY;
BSON_ASSERT (cursor);
BSON_ASSERT (bson);
TRACE ("cursor_id(%" PRId64 ")", cursor->rpc.reply.cursor_id);
if (bson) {
*bson = NULL;
}
if (CURSOR_FAILED (cursor)) {
return false;
}
if (cursor->done) {
bson_set_error (&cursor->error,
MONGOC_ERROR_CURSOR,
MONGOC_ERROR_CURSOR_INVALID_CURSOR,
"Cannot advance a completed or failed cursor.");
return false;
}
/*
* We cannot proceed if another cursor is receiving results in exhaust mode.
*/
if (cursor->client->in_exhaust && !cursor->in_exhaust) {
bson_set_error (&cursor->error,
MONGOC_ERROR_CLIENT,
MONGOC_ERROR_CLIENT_IN_EXHAUST,
"Another cursor derived from this client is in exhaust.");
RETURN (false);
}
if (cursor->iface.next) {
ret = cursor->iface.next (cursor, bson);
} else {
ret = _mongoc_cursor_next (cursor, bson);
}
cursor->current = *bson;
cursor->count++;
RETURN (ret);
}
bool
_mongoc_read_from_buffer (mongoc_cursor_t *cursor, const bson_t **bson)
{
bool eof = false;
BSON_ASSERT (cursor->reader);
*bson = bson_reader_read (cursor->reader, &eof);
cursor->end_of_event = eof ? 1 : 0;
return *bson ? true : false;
}
bool
_mongoc_cursor_next (mongoc_cursor_t *cursor, const bson_t **bson)
{
int64_t limit;
const bson_t *b = NULL;
bool tailable;
ENTRY;
BSON_ASSERT (cursor);
if (bson) {
*bson = NULL;
}
/*
* If we reached our limit, make sure we mark this as done and do not try to
* make further progress. We also set end_of_event so that
* mongoc_cursor_more will be false.
*/
- limit = mongoc_cursor_get_limit (cursor);
+ limit = cursor->is_find ? mongoc_cursor_get_limit (cursor) : 1;
if (limit && cursor->count >= llabs (limit)) {
cursor->done = true;
cursor->end_of_event = true;
RETURN (false);
}
/*
* Try to read the next document from the reader if it exists, we might
* get NULL back and EOF, in which case we need to submit a getmore.
*/
if (cursor->reader) {
_mongoc_read_from_buffer (cursor, &b);
if (b) {
GOTO (complete);
}
}
/*
* Check to see if we need to send a GET_MORE for more results.
*/
if (!cursor->sent) {
b = _mongoc_cursor_initial_query (cursor);
} else if (BSON_UNLIKELY (cursor->end_of_event) &&
cursor->rpc.reply.cursor_id) {
b = _mongoc_cursor_get_more (cursor);
}
complete:
tailable = _mongoc_cursor_get_opt_bool (cursor, "tailable");
cursor->done = (cursor->end_of_event &&
((cursor->in_exhaust && !cursor->rpc.reply.cursor_id) ||
(!b && !tailable)));
if (bson) {
*bson = b;
}
RETURN (!!b);
}
bool
mongoc_cursor_more (mongoc_cursor_t *cursor)
{
bool ret;
ENTRY;
BSON_ASSERT (cursor);
if (cursor->iface.more) {
ret = cursor->iface.more (cursor);
} else {
ret = _mongoc_cursor_more (cursor);
}
RETURN (ret);
}
bool
_mongoc_cursor_more (mongoc_cursor_t *cursor)
{
BSON_ASSERT (cursor);
if (CURSOR_FAILED (cursor)) {
return false;
}
return !(cursor->sent && cursor->done && cursor->end_of_event);
}
void
mongoc_cursor_get_host (mongoc_cursor_t *cursor, mongoc_host_list_t *host)
{
BSON_ASSERT (cursor);
BSON_ASSERT (host);
if (cursor->iface.get_host) {
cursor->iface.get_host (cursor, host);
} else {
_mongoc_cursor_get_host (cursor, host);
}
EXIT;
}
void
_mongoc_cursor_get_host (mongoc_cursor_t *cursor, mongoc_host_list_t *host)
{
mongoc_server_description_t *description;
BSON_ASSERT (cursor);
BSON_ASSERT (host);
memset (host, 0, sizeof *host);
if (!cursor->server_id) {
MONGOC_WARNING ("%s(): Must send query before fetching peer.", BSON_FUNC);
return;
}
description = mongoc_topology_server_by_id (
cursor->client->topology, cursor->server_id, &cursor->error);
if (!description) {
return;
}
*host = description->host;
mongoc_server_description_destroy (description);
return;
}
mongoc_cursor_t *
mongoc_cursor_clone (const mongoc_cursor_t *cursor)
{
mongoc_cursor_t *ret;
BSON_ASSERT (cursor);
if (cursor->iface.clone) {
ret = cursor->iface.clone (cursor);
} else {
ret = _mongoc_cursor_clone (cursor);
}
RETURN (ret);
}
mongoc_cursor_t *
_mongoc_cursor_clone (const mongoc_cursor_t *cursor)
{
mongoc_cursor_t *_clone;
ENTRY;
BSON_ASSERT (cursor);
_clone = (mongoc_cursor_t *) bson_malloc0 (sizeof *_clone);
_clone->client = cursor->client;
- _clone->is_command = cursor->is_command;
+ _clone->is_find = cursor->is_find;
_clone->nslen = cursor->nslen;
_clone->dblen = cursor->dblen;
_clone->has_fields = cursor->has_fields;
+ _clone->explicit_session = cursor->explicit_session;
if (cursor->read_prefs) {
_clone->read_prefs = mongoc_read_prefs_copy (cursor->read_prefs);
}
if (cursor->read_concern) {
_clone->read_concern = mongoc_read_concern_copy (cursor->read_concern);
}
+ if (cursor->explicit_session) {
+ _clone->client_session = cursor->client_session;
+ }
bson_copy_to (&cursor->filter, &_clone->filter);
bson_copy_to (&cursor->opts, &_clone->opts);
- bson_copy_to (&cursor->error_doc, &_clone->error_doc);
+ bson_copy_to (&cursor->reply, &_clone->reply);
bson_strncpy (_clone->ns, cursor->ns, sizeof _clone->ns);
_mongoc_buffer_init (&_clone->buffer, NULL, 0, NULL, NULL);
mongoc_counter_cursors_active_inc ();
RETURN (_clone);
}
/*
*--------------------------------------------------------------------------
*
* mongoc_cursor_is_alive --
*
* Checks to see if a cursor is alive.
*
* This is primarily useful with tailable cursors.
*
* Returns:
* true if the cursor is alive.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
bool
mongoc_cursor_is_alive (const mongoc_cursor_t *cursor) /* IN */
{
BSON_ASSERT (cursor);
return !cursor->done;
}
const bson_t *
mongoc_cursor_current (const mongoc_cursor_t *cursor) /* IN */
{
BSON_ASSERT (cursor);
return cursor->current;
}
void
mongoc_cursor_set_batch_size (mongoc_cursor_t *cursor, uint32_t batch_size)
{
BSON_ASSERT (cursor);
_mongoc_cursor_set_opt_int64 (
cursor, MONGOC_CURSOR_BATCH_SIZE, (int64_t) batch_size);
}
uint32_t
mongoc_cursor_get_batch_size (const mongoc_cursor_t *cursor)
{
BSON_ASSERT (cursor);
return (uint32_t) _mongoc_cursor_get_opt_int64 (
cursor, MONGOC_CURSOR_BATCH_SIZE, 0);
}
bool
mongoc_cursor_set_limit (mongoc_cursor_t *cursor, int64_t limit)
{
BSON_ASSERT (cursor);
if (!cursor->sent) {
if (limit < 0) {
return _mongoc_cursor_set_opt_int64 (
cursor, MONGOC_CURSOR_LIMIT, -limit) &&
_mongoc_cursor_set_opt_bool (
cursor, MONGOC_CURSOR_SINGLE_BATCH, true);
} else {
return _mongoc_cursor_set_opt_int64 (
cursor, MONGOC_CURSOR_LIMIT, limit);
}
} else {
return false;
}
}
int64_t
mongoc_cursor_get_limit (const mongoc_cursor_t *cursor)
{
int64_t limit;
bool single_batch;
BSON_ASSERT (cursor);
limit = _mongoc_cursor_get_opt_int64 (cursor, MONGOC_CURSOR_LIMIT, 0);
single_batch =
_mongoc_cursor_get_opt_bool (cursor, MONGOC_CURSOR_SINGLE_BATCH);
if (limit > 0 && single_batch) {
limit = -limit;
}
return limit;
}
bool
mongoc_cursor_set_hint (mongoc_cursor_t *cursor, uint32_t server_id)
{
BSON_ASSERT (cursor);
if (cursor->server_id) {
MONGOC_ERROR ("mongoc_cursor_set_hint: server_id already set");
return false;
}
if (!server_id) {
MONGOC_ERROR ("mongoc_cursor_set_hint: cannot set server_id to 0");
return false;
}
cursor->server_id = server_id;
cursor->server_id_set = true;
return true;
}
uint32_t
mongoc_cursor_get_hint (const mongoc_cursor_t *cursor)
{
BSON_ASSERT (cursor);
return cursor->server_id;
}
int64_t
mongoc_cursor_get_id (const mongoc_cursor_t *cursor)
{
BSON_ASSERT (cursor);
return cursor->rpc.reply.cursor_id;
}
void
mongoc_cursor_set_max_await_time_ms (mongoc_cursor_t *cursor,
uint32_t max_await_time_ms)
{
BSON_ASSERT (cursor);
if (!cursor->sent) {
_mongoc_cursor_set_opt_int64 (
cursor, MONGOC_CURSOR_MAX_AWAIT_TIME_MS, (int64_t) max_await_time_ms);
}
}
uint32_t
mongoc_cursor_get_max_await_time_ms (const mongoc_cursor_t *cursor)
{
bson_iter_t iter;
BSON_ASSERT (cursor);
if (bson_iter_init_find (
&iter, &cursor->opts, MONGOC_CURSOR_MAX_AWAIT_TIME_MS)) {
return (uint32_t) bson_iter_as_int64 (&iter);
}
return 0;
}
/*
*--------------------------------------------------------------------------
*
* mongoc_cursor_new_from_command_reply --
*
* Low-level function to initialize a mongoc_cursor_t from the
* reply to a command like "aggregate", "find", or "listCollections".
*
* Useful in drivers that wrap the C driver; in applications, use
* high-level functions like mongoc_collection_aggregate instead.
*
* Returns:
* A cursor.
*
* Side effects:
* On failure, the cursor's error is set: retrieve it with
* mongoc_cursor_error. On success or failure, "reply" is
* destroyed.
*
*--------------------------------------------------------------------------
*/
mongoc_cursor_t *
mongoc_cursor_new_from_command_reply (mongoc_client_t *client,
bson_t *reply,
uint32_t server_id)
{
mongoc_cursor_t *cursor;
bson_t cmd = BSON_INITIALIZER;
+ bson_t opts = BSON_INITIALIZER;
BSON_ASSERT (client);
BSON_ASSERT (reply);
+ bson_copy_to_excluding_noinit (reply,
+ &opts,
+ "cursor",
+ "ok",
+ "operationTime",
+ "$clusterTime",
+ "$gleStats",
+ NULL);
+
cursor = _mongoc_cursor_new_with_opts (
- client, NULL, false /* is_command */, NULL, NULL, NULL, NULL);
+ client, NULL, true /* is_find */, NULL, &opts, NULL, NULL);
_mongoc_cursor_cursorid_init (cursor, &cmd);
_mongoc_cursor_cursorid_init_with_reply (cursor, reply, server_id);
bson_destroy (&cmd);
+ bson_destroy (&opts);
return cursor;
}
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-cursor.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-cursor.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-cursor.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-cursor.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-cyrus-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-cyrus-private.h
similarity index 98%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-cyrus-private.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-cyrus-private.h
index edf408c3..b202cb69 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-cyrus-private.h
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-cyrus-private.h
@@ -1,77 +1,77 @@
/*
* Copyright 2013 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MONGOC_CYRUS_PRIVATE_H
#define MONGOC_CYRUS_PRIVATE_H
#if !defined(MONGOC_COMPILATION)
#error "Only <mongoc.h> can be included directly."
#endif
#include "mongoc-uri.h"
#include "mongoc-cluster-private.h"
#include "mongoc-sasl-private.h"
#include <bson.h>
#include <sasl/sasl.h>
#include <sasl/saslutil.h>
BSON_BEGIN_DECLS
typedef struct _mongoc_cyrus_t mongoc_cyrus_t;
struct _mongoc_cyrus_t {
mongoc_sasl_t credentials;
- sasl_callback_t callbacks[5];
+ sasl_callback_t callbacks[6];
sasl_conn_t *conn;
bool done;
int step;
sasl_interact_t *interact;
};
#ifndef SASL_CALLBACK_FN
#define SASL_CALLBACK_FN(_f) ((int (*) (void)) (_f))
#endif
void
_mongoc_cyrus_init (mongoc_cyrus_t *sasl);
bool
_mongoc_cyrus_new_from_cluster (mongoc_cyrus_t *sasl,
mongoc_cluster_t *cluster,
mongoc_stream_t *stream,
const char *hostname,
bson_error_t *error);
int
_mongoc_cyrus_log (mongoc_cyrus_t *sasl, int level, const char *message);
void
_mongoc_cyrus_destroy (mongoc_cyrus_t *sasl);
bool
_mongoc_cyrus_step (mongoc_cyrus_t *sasl,
const uint8_t *inbuf,
uint32_t inbuflen,
uint8_t *outbuf,
uint32_t outbufmax,
uint32_t *outbuflen,
bson_error_t *error);
BSON_END_DECLS
#endif /* MONGOC_CYRUS_PRIVATE_H */
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-cyrus.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-cyrus.c
similarity index 97%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-cyrus.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-cyrus.c
index 61023e32..8699b61c 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-cyrus.c
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-cyrus.c
@@ -1,425 +1,418 @@
/*
* Copyright 2013 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "mongoc-config.h"
#ifdef MONGOC_ENABLE_SASL_CYRUS
#include <string.h>
#include "mongoc-error.h"
#include "mongoc-cyrus-private.h"
#include "mongoc-util-private.h"
#include "mongoc-trace-private.h"
#undef MONGOC_LOG_DOMAIN
#define MONGOC_LOG_DOMAIN "CYRUS-SASL"
-int
-_mongoc_cyrus_log (mongoc_cyrus_t *sasl, int level, const char *message)
-{
- TRACE ("SASL Log; level=%d: message=%s", level, message);
- return SASL_OK;
-}
-
bool
_mongoc_cyrus_set_mechanism (mongoc_cyrus_t *sasl,
const char *mechanism,
bson_error_t *error)
{
bson_string_t *str = bson_string_new ("");
const char **mechs = sasl_global_listmech ();
int i = 0;
bool ok = false;
BSON_ASSERT (sasl);
for (i = 0; mechs[i]; i++) {
if (!strcmp (mechs[i], mechanism)) {
ok = true;
break;
}
bson_string_append (str, mechs[i]);
if (mechs[i + 1]) {
bson_string_append (str, ",");
}
}
if (ok) {
bson_free (sasl->credentials.mechanism);
sasl->credentials.mechanism = mechanism ? bson_strdup (mechanism) : NULL;
} else {
bson_set_error (error,
MONGOC_ERROR_SASL,
SASL_NOMECH,
"SASL Failure: Unsupported mechanism by client: %s. "
"Available mechanisms: %s",
mechanism,
str->str);
}
- bson_string_free (str, 0);
+ bson_string_free (str, true);
return ok;
}
static int
_mongoc_cyrus_get_pass (mongoc_cyrus_t *sasl,
int param_id,
const char **result,
unsigned *result_len)
{
BSON_ASSERT (sasl);
BSON_ASSERT (param_id == SASL_CB_PASS);
if (result) {
*result = sasl->credentials.pass;
}
if (result_len) {
*result_len = sasl->credentials.pass
? (unsigned) strlen (sasl->credentials.pass)
: 0;
}
return (sasl->credentials.pass != NULL) ? SASL_OK : SASL_FAIL;
}
static int
_mongoc_cyrus_canon_user (sasl_conn_t *conn,
mongoc_cyrus_t *sasl,
const char *in,
unsigned inlen,
unsigned flags,
const char *user_realm,
char *out,
unsigned out_max,
unsigned *out_len)
{
TRACE ("Canonicalizing %s (%" PRIu32 ")\n", in, inlen);
strcpy (out, in);
*out_len = inlen;
return SASL_OK;
}
static int
_mongoc_cyrus_get_user (mongoc_cyrus_t *sasl,
int param_id,
const char **result,
unsigned *result_len)
{
BSON_ASSERT (sasl);
BSON_ASSERT ((param_id == SASL_CB_USER) || (param_id == SASL_CB_AUTHNAME));
if (result) {
*result = sasl->credentials.user;
}
if (result_len) {
*result_len = sasl->credentials.user
? (unsigned) strlen (sasl->credentials.user)
: 0;
}
return (sasl->credentials.user != NULL) ? SASL_OK : SASL_FAIL;
}
void
_mongoc_cyrus_init (mongoc_cyrus_t *sasl)
{
sasl_callback_t callbacks[] = {
{SASL_CB_AUTHNAME, SASL_CALLBACK_FN (_mongoc_cyrus_get_user), sasl},
{SASL_CB_USER, SASL_CALLBACK_FN (_mongoc_cyrus_get_user), sasl},
{SASL_CB_PASS, SASL_CALLBACK_FN (_mongoc_cyrus_get_pass), sasl},
{SASL_CB_CANON_USER, SASL_CALLBACK_FN (_mongoc_cyrus_canon_user), sasl},
{SASL_CB_LIST_END}};
BSON_ASSERT (sasl);
memset (sasl, 0, sizeof *sasl);
memcpy (&sasl->callbacks, callbacks, sizeof callbacks);
sasl->done = false;
sasl->step = 0;
sasl->conn = NULL;
sasl->interact = NULL;
sasl->credentials.mechanism = NULL;
sasl->credentials.user = NULL;
sasl->credentials.pass = NULL;
sasl->credentials.service_name = NULL;
sasl->credentials.service_host = NULL;
}
bool
_mongoc_cyrus_new_from_cluster (mongoc_cyrus_t *sasl,
mongoc_cluster_t *cluster,
mongoc_stream_t *stream,
const char *hostname,
bson_error_t *error)
{
const char *mechanism;
char real_name[BSON_HOST_NAME_MAX + 1];
_mongoc_cyrus_init (sasl);
mechanism = mongoc_uri_get_auth_mechanism (cluster->uri);
if (!mechanism) {
mechanism = "GSSAPI";
}
if (!_mongoc_cyrus_set_mechanism (sasl, mechanism, error)) {
_mongoc_cyrus_destroy (sasl);
return false;
}
_mongoc_sasl_set_pass ((mongoc_sasl_t *) sasl,
mongoc_uri_get_password (cluster->uri));
_mongoc_sasl_set_user ((mongoc_sasl_t *) sasl,
mongoc_uri_get_username (cluster->uri));
_mongoc_sasl_set_properties ((mongoc_sasl_t *) sasl, cluster->uri);
/*
* If the URI requested canonicalizeHostname, we need to resolve the real
* hostname for the IP Address and pass that to the SASL layer. Some
* underlying GSSAPI layers will do this for us, but can be disabled in
* their config (krb.conf).
*
* This allows the consumer to specify canonicalizeHostname=true in the URI
* and have us do that for them.
*
* See CDRIVER-323 for more information.
*/
if (sasl->credentials.canonicalize_host_name &&
_mongoc_sasl_get_canonicalized_name (
- stream, real_name, sizeof real_name, error)) {
+ stream, real_name, sizeof real_name)) {
_mongoc_sasl_set_service_host ((mongoc_sasl_t *) sasl, real_name);
} else {
_mongoc_sasl_set_service_host ((mongoc_sasl_t *) sasl, hostname);
}
return true;
}
void
_mongoc_cyrus_destroy (mongoc_cyrus_t *sasl)
{
BSON_ASSERT (sasl);
if (sasl->conn) {
sasl_dispose (&sasl->conn);
}
bson_free (sasl->credentials.user);
bson_free (sasl->credentials.pass);
bson_free (sasl->credentials.mechanism);
bson_free (sasl->credentials.service_name);
bson_free (sasl->credentials.service_host);
}
static bool
_mongoc_cyrus_is_failure (int status, bson_error_t *error)
{
bool ret = (status < 0);
TRACE ("Got status: %d ok is %d, continue=%d interact=%d\n",
status,
SASL_OK,
SASL_CONTINUE,
SASL_INTERACT);
if (ret) {
switch (status) {
case SASL_NOMEM:
bson_set_error (error,
MONGOC_ERROR_SASL,
status,
"SASL Failure: insufficient memory.");
break;
case SASL_NOMECH: {
bson_string_t *str = bson_string_new ("available mechanisms: ");
const char **mechs = sasl_global_listmech ();
int i = 0;
for (i = 0; mechs[i]; i++) {
bson_string_append (str, mechs[i]);
if (mechs[i + 1]) {
bson_string_append (str, ",");
}
}
bson_set_error (error,
MONGOC_ERROR_SASL,
status,
"SASL Failure: failure to negotiate mechanism (%s)",
str->str);
bson_string_free (str, 0);
} break;
case SASL_BADPARAM:
bson_set_error (error,
MONGOC_ERROR_SASL,
status,
"Bad parameter supplied. Please file a bug "
"with mongo-c-driver.");
break;
default:
bson_set_error (error,
MONGOC_ERROR_SASL,
status,
"SASL Failure: (%d): %s",
status,
sasl_errstring (status, NULL, NULL));
break;
}
}
return ret;
}
static bool
_mongoc_cyrus_start (mongoc_cyrus_t *sasl,
uint8_t *outbuf,
uint32_t outbufmax,
uint32_t *outbuflen,
bson_error_t *error)
{
const char *service_name = "mongodb";
const char *service_host = "";
const char *mechanism = NULL;
const char *raw = NULL;
unsigned raw_len = 0;
int status;
BSON_ASSERT (sasl);
BSON_ASSERT (outbuf);
BSON_ASSERT (outbufmax);
BSON_ASSERT (outbuflen);
if (sasl->credentials.service_name) {
service_name = sasl->credentials.service_name;
}
if (sasl->credentials.service_host) {
service_host = sasl->credentials.service_host;
}
status = sasl_client_new (
service_name, service_host, NULL, NULL, sasl->callbacks, 0, &sasl->conn);
TRACE ("Created new sasl client %s",
status == SASL_OK ? "successfully" : "UNSUCCESSFULLY");
if (_mongoc_cyrus_is_failure (status, error)) {
return false;
}
status = sasl_client_start (sasl->conn,
sasl->credentials.mechanism,
&sasl->interact,
&raw,
&raw_len,
&mechanism);
TRACE ("Started the sasl client %s",
status == SASL_CONTINUE ? "successfully" : "UNSUCCESSFULLY");
if (_mongoc_cyrus_is_failure (status, error)) {
return false;
}
if ((0 != strcasecmp (mechanism, "GSSAPI")) &&
(0 != strcasecmp (mechanism, "PLAIN"))) {
bson_set_error (error,
MONGOC_ERROR_SASL,
SASL_NOMECH,
"SASL Failure: invalid mechanism \"%s\"",
mechanism);
return false;
}
status = sasl_encode64 (raw, raw_len, (char *) outbuf, outbufmax, outbuflen);
if (_mongoc_cyrus_is_failure (status, error)) {
return false;
}
return true;
}
bool
_mongoc_cyrus_step (mongoc_cyrus_t *sasl,
const uint8_t *inbuf,
uint32_t inbuflen,
uint8_t *outbuf,
uint32_t outbufmax,
uint32_t *outbuflen,
bson_error_t *error)
{
const char *raw = NULL;
unsigned rawlen = 0;
int status;
BSON_ASSERT (sasl);
BSON_ASSERT (inbuf);
BSON_ASSERT (outbuf);
BSON_ASSERT (outbuflen);
TRACE ("Running %d, inbuflen: %" PRIu32, sasl->step, inbuflen);
sasl->step++;
if (sasl->step == 1) {
return _mongoc_cyrus_start (sasl, outbuf, outbufmax, outbuflen, error);
} else if (sasl->step >= 10) {
bson_set_error (error,
MONGOC_ERROR_SASL,
SASL_NOTDONE,
"SASL Failure: maximum steps detected");
return false;
}
TRACE ("Running %d, inbuflen: %" PRIu32, sasl->step, inbuflen);
if (!inbuflen) {
bson_set_error (error,
MONGOC_ERROR_SASL,
MONGOC_ERROR_CLIENT_AUTHENTICATE,
"SASL Failure: no payload provided from server: %s",
sasl_errdetail (sasl->conn));
return false;
}
status = sasl_decode64 (
(char *) inbuf, inbuflen, (char *) outbuf, outbufmax, outbuflen);
if (_mongoc_cyrus_is_failure (status, error)) {
return false;
}
TRACE ("%s", "Running client_step");
status = sasl_client_step (
sasl->conn, (char *) outbuf, *outbuflen, &sasl->interact, &raw, &rawlen);
TRACE ("%s sent a client step",
status == SASL_OK ? "Successfully" : "UNSUCCESSFULLY");
if (_mongoc_cyrus_is_failure (status, error)) {
return false;
}
status = sasl_encode64 (raw, rawlen, (char *) outbuf, outbufmax, outbuflen);
if (_mongoc_cyrus_is_failure (status, error)) {
return false;
}
return true;
}
#endif
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-database-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-database-private.h
similarity index 87%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-database-private.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-database-private.h
index 2c0a2ae5..e3055d0b 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-database-private.h
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-database-private.h
@@ -1,59 +1,53 @@
/*
* Copyright 2013 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MONGOC_DATABASE_PRIVATE_H
#define MONGOC_DATABASE_PRIVATE_H
#if !defined(MONGOC_COMPILATION)
#error "Only <mongoc.h> can be included directly."
#endif
#include <bson.h>
#include "mongoc-client.h"
#include "mongoc-read-prefs.h"
#include "mongoc-read-concern.h"
#include "mongoc-write-concern.h"
-
BSON_BEGIN_DECLS
struct _mongoc_database_t {
mongoc_client_t *client;
char name[128];
mongoc_read_prefs_t *read_prefs;
mongoc_read_concern_t *read_concern;
mongoc_write_concern_t *write_concern;
};
mongoc_database_t *
_mongoc_database_new (mongoc_client_t *client,
const char *name,
const mongoc_read_prefs_t *read_prefs,
const mongoc_read_concern_t *read_concern,
const mongoc_write_concern_t *write_concern);
-mongoc_cursor_t *
-_mongoc_database_find_collections_legacy (mongoc_database_t *database,
- const bson_t *filter,
- bson_error_t *error);
-
BSON_END_DECLS
#endif /* MONGOC_DATABASE_PRIVATE_H */
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-database.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-database.c
similarity index 80%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-database.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-database.c
index a00b75d3..bc3b99e8 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-database.c
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-database.c
@@ -1,1226 +1,1133 @@
/*
* Copyright 2013 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "mongoc-client-private.h"
#include "mongoc-collection.h"
#include "mongoc-collection-private.h"
#include "mongoc-cursor.h"
#include "mongoc-cursor-array-private.h"
#include "mongoc-cursor-cursorid-private.h"
#include "mongoc-cursor-transform-private.h"
#include "mongoc-cursor-private.h"
#include "mongoc-database.h"
#include "mongoc-database-private.h"
#include "mongoc-error.h"
#include "mongoc-log.h"
#include "mongoc-trace-private.h"
#include "mongoc-util-private.h"
#include "mongoc-write-concern-private.h"
#undef MONGOC_LOG_DOMAIN
#define MONGOC_LOG_DOMAIN "database"
/*
*--------------------------------------------------------------------------
*
* _mongoc_database_new --
*
* Create a new instance of mongoc_database_t for @client.
*
* @client must stay valid for the life of the resulting
* database structure.
*
* Returns:
* A newly allocated mongoc_database_t that should be freed with
* mongoc_database_destroy().
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
mongoc_database_t *
_mongoc_database_new (mongoc_client_t *client,
const char *name,
const mongoc_read_prefs_t *read_prefs,
const mongoc_read_concern_t *read_concern,
const mongoc_write_concern_t *write_concern)
{
mongoc_database_t *db;
ENTRY;
BSON_ASSERT (client);
BSON_ASSERT (name);
db = (mongoc_database_t *) bson_malloc0 (sizeof *db);
db->client = client;
db->write_concern = write_concern ? mongoc_write_concern_copy (write_concern)
: mongoc_write_concern_new ();
db->read_concern = read_concern ? mongoc_read_concern_copy (read_concern)
: mongoc_read_concern_new ();
db->read_prefs = read_prefs ? mongoc_read_prefs_copy (read_prefs)
: mongoc_read_prefs_new (MONGOC_READ_PRIMARY);
bson_strncpy (db->name, name, sizeof db->name);
RETURN (db);
}
/*
*--------------------------------------------------------------------------
*
* mongoc_database_destroy --
*
* Releases resources associated with @database.
*
* Returns:
* None.
*
* Side effects:
* Everything.
*
*--------------------------------------------------------------------------
*/
void
mongoc_database_destroy (mongoc_database_t *database)
{
ENTRY;
BSON_ASSERT (database);
if (database->read_prefs) {
mongoc_read_prefs_destroy (database->read_prefs);
database->read_prefs = NULL;
}
if (database->read_concern) {
mongoc_read_concern_destroy (database->read_concern);
database->read_concern = NULL;
}
if (database->write_concern) {
mongoc_write_concern_destroy (database->write_concern);
database->write_concern = NULL;
}
bson_free (database);
EXIT;
}
/*
*--------------------------------------------------------------------------
*
* mongoc_database_copy --
*
* Returns a copy of @database that needs to be freed by calling
* mongoc_database_destroy.
*
* Returns:
* A copy of this database.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
mongoc_database_t *
mongoc_database_copy (mongoc_database_t *database)
{
ENTRY;
BSON_ASSERT (database);
RETURN (_mongoc_database_new (database->client,
database->name,
database->read_prefs,
database->read_concern,
database->write_concern));
}
mongoc_cursor_t *
mongoc_database_command (mongoc_database_t *database,
mongoc_query_flags_t flags,
uint32_t skip,
uint32_t limit,
uint32_t batch_size,
const bson_t *command,
const bson_t *fields,
const mongoc_read_prefs_t *read_prefs)
{
+ char ns[MONGOC_NAMESPACE_MAX];
+
BSON_ASSERT (database);
BSON_ASSERT (command);
+ bson_snprintf (ns, sizeof ns, "%s.$cmd", database->name);
+
/* Server Selection Spec: "The generic command method has a default read
* preference of mode 'primary'. The generic command method MUST ignore any
* default read preference from client, database or collection
* configuration. The generic command method SHOULD allow an optional read
* preference argument."
*/
- return mongoc_client_command (database->client,
- database->name,
- flags,
- skip,
- limit,
- batch_size,
- command,
- fields,
- read_prefs);
+
+ /* flags, skip, limit, batch_size, fields are unused */
+ return _mongoc_cursor_new_with_opts (database->client,
+ ns,
+ false /* is_find */,
+ command,
+ NULL /* opts */,
+ read_prefs,
+ NULL /* read concern */);
}
bool
mongoc_database_command_simple (mongoc_database_t *database,
const bson_t *command,
const mongoc_read_prefs_t *read_prefs,
bson_t *reply,
bson_error_t *error)
{
BSON_ASSERT (database);
BSON_ASSERT (command);
/* Server Selection Spec: "The generic command method has a default read
* preference of mode 'primary'. The generic command method MUST ignore any
* default read preference from client, database or collection
* configuration. The generic command method SHOULD allow an optional read
* preference argument."
*/
- return mongoc_client_command_simple (
- database->client, database->name, command, read_prefs, reply, error);
+
+ return _mongoc_client_command_with_opts (database->client,
+ database->name,
+ command,
+ MONGOC_CMD_READ,
+ NULL /* opts */,
+ MONGOC_QUERY_NONE,
+ read_prefs,
+ NULL /* read concern */,
+ NULL /* write concern */,
+ reply,
+ error);
}
bool
mongoc_database_read_command_with_opts (mongoc_database_t *database,
const bson_t *command,
const mongoc_read_prefs_t *read_prefs,
const bson_t *opts,
bson_t *reply,
bson_error_t *error)
{
return _mongoc_client_command_with_opts (
database->client,
database->name,
command,
MONGOC_CMD_READ,
opts,
MONGOC_QUERY_NONE,
COALESCE (read_prefs, database->read_prefs),
database->read_concern,
database->write_concern,
reply,
error);
}
bool
mongoc_database_write_command_with_opts (mongoc_database_t *database,
const bson_t *command,
const bson_t *opts,
bson_t *reply,
bson_error_t *error)
{
return _mongoc_client_command_with_opts (database->client,
database->name,
command,
MONGOC_CMD_WRITE,
opts,
MONGOC_QUERY_NONE,
database->read_prefs,
database->read_concern,
database->write_concern,
reply,
error);
}
bool
mongoc_database_read_write_command_with_opts (
mongoc_database_t *database,
const bson_t *command,
const mongoc_read_prefs_t *read_prefs /* IGNORED */,
const bson_t *opts,
bson_t *reply,
bson_error_t *error)
{
return _mongoc_client_command_with_opts (
database->client,
database->name,
command,
MONGOC_CMD_RW,
opts,
MONGOC_QUERY_NONE,
COALESCE (read_prefs, database->read_prefs),
database->read_concern,
database->write_concern,
reply,
error);
}
+bool
+mongoc_database_command_with_opts (mongoc_database_t *database,
+ const bson_t *command,
+ const mongoc_read_prefs_t *read_prefs,
+ const bson_t *opts,
+ bson_t *reply,
+ bson_error_t *error)
+{
+ return _mongoc_client_command_with_opts (database->client,
+ database->name,
+ command,
+ MONGOC_CMD_RAW,
+ opts,
+ MONGOC_QUERY_NONE,
+ read_prefs,
+ database->read_concern,
+ database->write_concern,
+ reply,
+ error);
+}
+
+
/*
*--------------------------------------------------------------------------
*
* mongoc_database_drop --
*
* Requests that the MongoDB server drops @database, including all
* collections and indexes associated with @database.
*
* Make sure this is really what you want!
*
* Returns:
* true if @database was dropped.
*
* Side effects:
* @error may be set.
*
*--------------------------------------------------------------------------
*/
bool
mongoc_database_drop (mongoc_database_t *database, bson_error_t *error)
{
return mongoc_database_drop_with_opts (database, NULL, error);
}
bool
mongoc_database_drop_with_opts (mongoc_database_t *database,
const bson_t *opts,
bson_error_t *error)
{
bool ret;
bson_t cmd;
BSON_ASSERT (database);
bson_init (&cmd);
bson_append_int32 (&cmd, "dropDatabase", 12, 1);
ret = _mongoc_client_command_with_opts (database->client,
database->name,
&cmd,
MONGOC_CMD_WRITE,
opts,
MONGOC_QUERY_NONE,
database->read_prefs,
database->read_concern,
database->write_concern,
NULL, /* reply */
error);
bson_destroy (&cmd);
return ret;
}
-/*
- *--------------------------------------------------------------------------
- *
- * mongoc_database_add_user_legacy --
- *
- * A helper to add a user or update their password on @database.
- * This uses the legacy protocol by inserting into system.users.
- *
- * Returns:
- * true if successful; otherwise false and @error is set.
- *
- * Side effects:
- * @error may be set.
- *
- *--------------------------------------------------------------------------
- */
-
-static bool
-mongoc_database_add_user_legacy (mongoc_database_t *database,
- const char *username,
- const char *password,
- bson_error_t *error)
-{
- mongoc_collection_t *collection;
- mongoc_cursor_t *cursor = NULL;
- const bson_t *doc;
- bool ret = false;
- bson_t query;
- bson_t opts;
- bson_t user;
- char *input;
- char *pwd = NULL;
-
- ENTRY;
-
- BSON_ASSERT (database);
- BSON_ASSERT (username);
- BSON_ASSERT (password);
-
- /*
- * Users are stored in the <dbname>.system.users virtual collection.
- */
- collection = mongoc_client_get_collection (
- database->client, database->name, "system.users");
- BSON_ASSERT (collection);
-
- /*
- * Hash the users password.
- */
- input = bson_strdup_printf ("%s:mongo:%s", username, password);
- pwd = _mongoc_hex_md5 (input);
- bson_free (input);
-
- /*
- * Check to see if the user exists. If so, we will update the
- * password instead of inserting a new user.
- */
- bson_init (&query);
- bson_append_utf8 (&query, "user", 4, username, -1);
-
- bson_init (&opts);
- bson_append_int64 (&opts, "limit", 5, 1);
- bson_append_bool (&opts, "singleBatch", 11, true);
-
- cursor = mongoc_collection_find_with_opts (collection, &query, &opts, NULL);
- if (!mongoc_cursor_next (cursor, &doc)) {
- if (mongoc_cursor_error (cursor, error)) {
- GOTO (failure);
- }
- bson_init (&user);
- bson_append_utf8 (&user, "user", 4, username, -1);
- bson_append_bool (&user, "readOnly", 8, false);
- bson_append_utf8 (&user, "pwd", 3, pwd, -1);
- } else {
- bson_init (&user);
- bson_copy_to_excluding_noinit (doc, &user, "pwd", (char *) NULL);
- bson_append_utf8 (&user, "pwd", 3, pwd, -1);
- }
-
- if (!mongoc_collection_insert (
- collection, MONGOC_INSERT_NONE, &user, NULL, error)) {
- GOTO (failure_with_user);
- }
-
- ret = true;
-
-failure_with_user:
- bson_destroy (&user);
-
-failure:
- if (cursor) {
- mongoc_cursor_destroy (cursor);
- }
- mongoc_collection_destroy (collection);
- bson_destroy (&query);
- bson_destroy (&opts);
- bson_free (pwd);
-
- RETURN (ret);
-}
-
-
bool
mongoc_database_remove_user (mongoc_database_t *database,
const char *username,
bson_error_t *error)
{
- mongoc_collection_t *col;
- bson_error_t lerror;
bson_t cmd;
bool ret;
ENTRY;
BSON_ASSERT (database);
BSON_ASSERT (username);
bson_init (&cmd);
BSON_APPEND_UTF8 (&cmd, "dropUser", username);
- ret = mongoc_database_command_simple (database, &cmd, NULL, NULL, &lerror);
+ ret = mongoc_database_command_simple (database, &cmd, NULL, NULL, error);
bson_destroy (&cmd);
- if (!ret && (lerror.code == MONGOC_ERROR_QUERY_COMMAND_NOT_FOUND)) {
- bson_init (&cmd);
- BSON_APPEND_UTF8 (&cmd, "user", username);
-
- col = mongoc_client_get_collection (
- database->client, database->name, "system.users");
- BSON_ASSERT (col);
-
- ret = mongoc_collection_remove (
- col, MONGOC_REMOVE_SINGLE_REMOVE, &cmd, NULL, error);
-
- bson_destroy (&cmd);
- mongoc_collection_destroy (col);
- } else if (error) {
- memcpy (error, &lerror, sizeof *error);
- }
-
RETURN (ret);
}
bool
mongoc_database_remove_all_users (mongoc_database_t *database,
bson_error_t *error)
{
- mongoc_collection_t *col;
- bson_error_t lerror;
bson_t cmd;
bool ret;
ENTRY;
BSON_ASSERT (database);
bson_init (&cmd);
BSON_APPEND_INT32 (&cmd, "dropAllUsersFromDatabase", 1);
- ret = mongoc_database_command_simple (database, &cmd, NULL, NULL, &lerror);
+ ret = mongoc_database_command_simple (database, &cmd, NULL, NULL, error);
bson_destroy (&cmd);
- if (!ret && (lerror.code == MONGOC_ERROR_QUERY_COMMAND_NOT_FOUND)) {
- bson_init (&cmd);
-
- col = mongoc_client_get_collection (
- database->client, database->name, "system.users");
- BSON_ASSERT (col);
-
- ret =
- mongoc_collection_remove (col, MONGOC_REMOVE_NONE, &cmd, NULL, error);
-
- bson_destroy (&cmd);
- mongoc_collection_destroy (col);
- } else if (error) {
- memcpy (error, &lerror, sizeof *error);
- }
-
RETURN (ret);
}
/**
* mongoc_database_add_user:
* @database: A #mongoc_database_t.
* @username: A string containing the username.
* @password: (allow-none): A string containing password, or NULL.
* @roles: (allow-none): An optional bson_t of roles.
* @custom_data: (allow-none): An optional bson_t of data to store.
* @error: (out) (allow-none): A location for a bson_error_t or %NULL.
*
* Creates a new user with access to @database.
*
* Returns: None.
* Side effects: None.
*/
bool
mongoc_database_add_user (mongoc_database_t *database,
const char *username,
const char *password,
const bson_t *roles,
const bson_t *custom_data,
bson_error_t *error)
{
- bson_error_t lerror;
bson_t cmd;
bson_t ar;
char *input;
char *hashed_password;
bool ret = false;
ENTRY;
BSON_ASSERT (database);
BSON_ASSERT (username);
- /*
- * CDRIVER-232:
- *
- * Perform a (slow and tedious) round trip to mongod to determine if
- * we can safely call createUser. Otherwise, we will fallback and
- * perform legacy insertion into users collection.
- */
- bson_init (&cmd);
- BSON_APPEND_UTF8 (&cmd, "usersInfo", username);
- ret = mongoc_database_command_simple (database, &cmd, NULL, NULL, &lerror);
- bson_destroy (&cmd);
+ /* usersInfo succeeded or failed with auth err, we're on modern mongod */
+ input = bson_strdup_printf ("%s:mongo:%s", username, password);
+ hashed_password = _mongoc_hex_md5 (input);
+ bson_free (input);
- if (!ret && (lerror.code == MONGOC_ERROR_QUERY_COMMAND_NOT_FOUND)) {
- ret =
- mongoc_database_add_user_legacy (database, username, password, error);
- } else if (ret || (lerror.code == 13)) {
- /* usersInfo succeeded or failed with auth err, we're on modern mongod */
- input = bson_strdup_printf ("%s:mongo:%s", username, password);
- hashed_password = _mongoc_hex_md5 (input);
- bson_free (input);
-
- bson_init (&cmd);
- BSON_APPEND_UTF8 (&cmd, "createUser", username);
- BSON_APPEND_UTF8 (&cmd, "pwd", hashed_password);
- BSON_APPEND_BOOL (&cmd, "digestPassword", false);
- if (custom_data) {
- BSON_APPEND_DOCUMENT (&cmd, "customData", custom_data);
- }
- if (roles) {
- BSON_APPEND_ARRAY (&cmd, "roles", roles);
- } else {
- bson_append_array_begin (&cmd, "roles", 5, &ar);
- bson_append_array_end (&cmd, &ar);
- }
+ bson_init (&cmd);
+ BSON_APPEND_UTF8 (&cmd, "createUser", username);
+ BSON_APPEND_UTF8 (&cmd, "pwd", hashed_password);
+ BSON_APPEND_BOOL (&cmd, "digestPassword", false);
+ if (custom_data) {
+ BSON_APPEND_DOCUMENT (&cmd, "customData", custom_data);
+ }
+ if (roles) {
+ BSON_APPEND_ARRAY (&cmd, "roles", roles);
+ } else {
+ bson_append_array_begin (&cmd, "roles", 5, &ar);
+ bson_append_array_end (&cmd, &ar);
+ }
- ret = mongoc_database_command_simple (database, &cmd, NULL, NULL, error);
+ ret = mongoc_database_command_simple (database, &cmd, NULL, NULL, error);
- bson_free (hashed_password);
- bson_destroy (&cmd);
- } else if (error) {
- memcpy (error, &lerror, sizeof *error);
- }
+ bson_free (hashed_password);
+ bson_destroy (&cmd);
RETURN (ret);
}
/*
*--------------------------------------------------------------------------
*
* mongoc_database_get_read_prefs --
*
* Fetch the read preferences for @database.
*
* Returns:
* A mongoc_read_prefs_t that should not be modified or freed.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
const mongoc_read_prefs_t *
mongoc_database_get_read_prefs (const mongoc_database_t *database) /* IN */
{
BSON_ASSERT (database);
return database->read_prefs;
}
/*
*--------------------------------------------------------------------------
*
* mongoc_database_set_read_prefs --
*
* Sets the default read preferences for @database.
*
* Returns:
* None.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
void
mongoc_database_set_read_prefs (mongoc_database_t *database,
const mongoc_read_prefs_t *read_prefs)
{
BSON_ASSERT (database);
if (database->read_prefs) {
mongoc_read_prefs_destroy (database->read_prefs);
database->read_prefs = NULL;
}
if (read_prefs) {
database->read_prefs = mongoc_read_prefs_copy (read_prefs);
}
}
/*
*--------------------------------------------------------------------------
*
* mongoc_database_get_read_concern --
*
* Fetches the read concern for @database.
*
* Returns:
* A mongoc_read_concern_t that should not be modified or freed.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
const mongoc_read_concern_t *
mongoc_database_get_read_concern (const mongoc_database_t *database)
{
BSON_ASSERT (database);
return database->read_concern;
}
/*
*--------------------------------------------------------------------------
*
* mongoc_database_set_read_concern --
*
* Set the default read concern for @database.
*
* Returns:
* None.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
void
mongoc_database_set_read_concern (mongoc_database_t *database,
const mongoc_read_concern_t *read_concern)
{
BSON_ASSERT (database);
if (database->read_concern) {
mongoc_read_concern_destroy (database->read_concern);
database->read_concern = NULL;
}
if (read_concern) {
database->read_concern = mongoc_read_concern_copy (read_concern);
}
}
/*
*--------------------------------------------------------------------------
*
* mongoc_database_get_write_concern --
*
* Fetches the write concern for @database.
*
* Returns:
* A mongoc_write_concern_t that should not be modified or freed.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
const mongoc_write_concern_t *
mongoc_database_get_write_concern (const mongoc_database_t *database)
{
BSON_ASSERT (database);
return database->write_concern;
}
/*
*--------------------------------------------------------------------------
*
* mongoc_database_set_write_concern --
*
* Set the default write concern for @database.
*
* Returns:
* None.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
void
mongoc_database_set_write_concern (mongoc_database_t *database,
const mongoc_write_concern_t *write_concern)
{
BSON_ASSERT (database);
if (database->write_concern) {
mongoc_write_concern_destroy (database->write_concern);
database->write_concern = NULL;
}
if (write_concern) {
database->write_concern = mongoc_write_concern_copy (write_concern);
}
}
/**
* mongoc_database_has_collection:
* @database: (in): A #mongoc_database_t.
* @name: (in): The name of the collection to check for.
* @error: (out) (allow-none): A location for a #bson_error_t, or %NULL.
*
* Checks to see if a collection exists within the database on the MongoDB
* server.
*
* This will return %false if their was an error communicating with the
* server, or if the collection does not exist.
*
* If @error is provided, it will first be zeroed. Upon error, error.domain
* will be set.
*
* Returns: %true if @name exists, otherwise %false. @error may be set.
*/
bool
mongoc_database_has_collection (mongoc_database_t *database,
const char *name,
bson_error_t *error)
{
bson_iter_t col_iter;
bool ret = false;
const char *cur_name;
- bson_t filter = BSON_INITIALIZER;
+ bson_t opts = BSON_INITIALIZER;
+ bson_t filter;
mongoc_cursor_t *cursor;
const bson_t *doc;
ENTRY;
BSON_ASSERT (database);
BSON_ASSERT (name);
if (error) {
memset (error, 0, sizeof *error);
}
+ BSON_APPEND_DOCUMENT_BEGIN (&opts, "filter", &filter);
BSON_APPEND_UTF8 (&filter, "name", name);
+ bson_append_document_end (&opts, &filter);
- cursor = mongoc_database_find_collections (database, &filter, error);
-
- if (!cursor) {
- return ret;
- }
-
- if (error && ((error->domain != 0) || (error->code != 0))) {
- GOTO (cleanup);
- }
-
+ cursor = mongoc_database_find_collections_with_opts (database, &opts);
while (mongoc_cursor_next (cursor, &doc)) {
if (bson_iter_init (&col_iter, doc) &&
bson_iter_find (&col_iter, "name") &&
BSON_ITER_HOLDS_UTF8 (&col_iter) &&
(cur_name = bson_iter_utf8 (&col_iter, NULL))) {
if (!strcmp (cur_name, name)) {
ret = true;
GOTO (cleanup);
}
}
}
+ mongoc_cursor_error (cursor, error);
+
cleanup:
mongoc_cursor_destroy (cursor);
+ bson_destroy (&opts);
RETURN (ret);
}
typedef struct {
const char *dbname;
size_t dbname_len;
const char *name;
} mongoc_database_find_collections_legacy_ctx_t;
static mongoc_cursor_transform_mode_t
_mongoc_database_find_collections_legacy_filter (const bson_t *bson, void *ctx_)
{
bson_iter_t iter;
mongoc_database_find_collections_legacy_ctx_t *ctx;
ctx = (mongoc_database_find_collections_legacy_ctx_t *) ctx_;
if (bson_iter_init_find (&iter, bson, "name") &&
BSON_ITER_HOLDS_UTF8 (&iter) &&
(ctx->name = bson_iter_utf8 (&iter, NULL)) && !strchr (ctx->name, '$') &&
(0 == strncmp (ctx->name, ctx->dbname, ctx->dbname_len))) {
return MONGO_CURSOR_TRANSFORM_MUTATE;
} else {
return MONGO_CURSOR_TRANSFORM_DROP;
}
}
static void
_mongoc_database_find_collections_legacy_mutate (const bson_t *bson,
bson_t *out,
void *ctx_)
{
mongoc_database_find_collections_legacy_ctx_t *ctx;
ctx = (mongoc_database_find_collections_legacy_ctx_t *) ctx_;
bson_copy_to_excluding_noinit (bson, out, "name", NULL);
BSON_APPEND_UTF8 (
out, "name", ctx->name + (ctx->dbname_len + 1)); /* +1 for the '.' */
}
/* Uses old way of querying system.namespaces. */
-mongoc_cursor_t *
+static mongoc_cursor_t *
_mongoc_database_find_collections_legacy (mongoc_database_t *database,
- const bson_t *filter,
- bson_error_t *error)
+ const bson_t *filter)
{
mongoc_collection_t *col;
mongoc_cursor_t *cursor = NULL;
mongoc_read_prefs_t *read_prefs;
uint32_t dbname_len;
bson_t legacy_filter;
bson_iter_t iter;
const char *col_filter;
bson_t q = BSON_INITIALIZER;
mongoc_database_find_collections_legacy_ctx_t *ctx;
BSON_ASSERT (database);
col = mongoc_client_get_collection (
database->client, database->name, "system.namespaces");
BSON_ASSERT (col);
dbname_len = (uint32_t) strlen (database->name);
ctx = (mongoc_database_find_collections_legacy_ctx_t *) bson_malloc (
sizeof (*ctx));
ctx->dbname = database->name;
ctx->dbname_len = dbname_len;
/* Filtering on name needs to be handled differently for old servers. */
if (filter && bson_iter_init_find (&iter, filter, "name")) {
bson_string_t *buf;
/* on legacy servers, this must be a string (i.e. not a regex) */
if (!BSON_ITER_HOLDS_UTF8 (&iter)) {
+ cursor = _mongoc_cursor_new_with_opts (
+ col->client, col->ns, false, filter, NULL, NULL, NULL);
bson_set_error (
- error,
+ &cursor->error,
MONGOC_ERROR_NAMESPACE,
MONGOC_ERROR_NAMESPACE_INVALID_FILTER_TYPE,
"On legacy servers, a filter on name can only be a string.");
bson_free (ctx);
goto cleanup_filter;
}
BSON_ASSERT (BSON_ITER_HOLDS_UTF8 (&iter));
col_filter = bson_iter_utf8 (&iter, NULL);
bson_init (&legacy_filter);
bson_copy_to_excluding_noinit (filter, &legacy_filter, "name", NULL);
/* We must db-qualify filters on name. */
buf = bson_string_new (database->name);
bson_string_append_c (buf, '.');
bson_string_append (buf, col_filter);
BSON_APPEND_UTF8 (&legacy_filter, "name", buf->str);
bson_string_free (buf, true);
filter = &legacy_filter;
}
/* Enumerate Collections Spec: "run listCollections on the primary node in
* replicaset mode" */
read_prefs = mongoc_read_prefs_new (MONGOC_READ_PRIMARY);
cursor = mongoc_collection_find_with_opts (
col, filter ? filter : &q, NULL, read_prefs);
_mongoc_cursor_transform_init (
cursor,
_mongoc_database_find_collections_legacy_filter,
_mongoc_database_find_collections_legacy_mutate,
&bson_free,
ctx);
mongoc_read_prefs_destroy (read_prefs);
cleanup_filter:
mongoc_collection_destroy (col);
return cursor;
}
mongoc_cursor_t *
mongoc_database_find_collections (mongoc_database_t *database,
const bson_t *filter,
bson_error_t *error)
{
+ bson_t opts = BSON_INITIALIZER;
mongoc_cursor_t *cursor;
- bson_t cmd = BSON_INITIALIZER;
- bson_t child;
- bson_error_t lerror;
BSON_ASSERT (database);
- BSON_APPEND_INT32 (&cmd, "listCollections", 1);
-
if (filter) {
- BSON_APPEND_DOCUMENT (&cmd, "filter", filter);
- BSON_APPEND_DOCUMENT_BEGIN (&cmd, "cursor", &child);
- bson_append_document_end (&cmd, &child);
+ if (!BSON_APPEND_DOCUMENT (&opts, "filter", filter)) {
+ bson_set_error (error,
+ MONGOC_ERROR_BSON,
+ MONGOC_ERROR_BSON_INVALID,
+ "Invalid 'filter' parameter.");
+ return NULL;
+ }
+ }
+
+ cursor = mongoc_database_find_collections_with_opts (database, &opts);
+
+ /* this deprecated API returns NULL on error */
+ if (mongoc_cursor_error (cursor, error)) {
+ mongoc_cursor_destroy (cursor);
+ return NULL;
}
+ return cursor;
+}
+
+
+mongoc_cursor_t *
+mongoc_database_find_collections_with_opts (mongoc_database_t *database,
+ const bson_t *opts)
+{
+ mongoc_cursor_t *cursor;
+ bson_t cmd = BSON_INITIALIZER;
+ bson_iter_t iter;
+ bson_t filter;
+ bson_error_t error;
+ uint32_t len;
+ const uint8_t *data;
+
+ BSON_ASSERT (database);
+
+ BSON_APPEND_INT32 (&cmd, "listCollections", 1);
+
/* Enumerate Collections Spec: "run listCollections on the primary node in
* replicaset mode" */
cursor = _mongoc_cursor_new_with_opts (database->client,
database->name,
- true /* is_command */,
- NULL,
+ false /* is_find */,
NULL,
+ opts,
NULL,
NULL);
_mongoc_cursor_cursorid_init (cursor, &cmd);
- if (_mongoc_cursor_cursorid_prime (cursor)) {
- /* intentionally empty */
- } else {
- if (mongoc_cursor_error (cursor, &lerror)) {
- if (lerror.code == MONGOC_ERROR_QUERY_COMMAND_NOT_FOUND) {
- /* We are talking to a server that doesn' support listCollections.
- */
- /* clear out the error. */
- memset (&lerror, 0, sizeof lerror);
- /* try again with using system.namespaces */
- mongoc_cursor_destroy (cursor);
- cursor = _mongoc_database_find_collections_legacy (
- database, filter, error);
- } else if (error) {
- memcpy (error, &lerror, sizeof *error);
+ if (!_mongoc_cursor_cursorid_prime (cursor)) {
+ mongoc_cursor_error (cursor, &error);
+ if (error.code == MONGOC_ERROR_QUERY_COMMAND_NOT_FOUND) {
+ /* old server doesn't have listCollections, use system.namespaces */
+ memset (&error, 0, sizeof error);
+ mongoc_cursor_destroy (cursor);
+
+ if (opts && bson_iter_init_find (&iter, opts, "filter")) {
+ bson_iter_document (&iter, &len, &data);
+ bson_init_static (&filter, data, len);
+ cursor =
+ _mongoc_database_find_collections_legacy (database, &filter);
+ } else {
+ cursor = _mongoc_database_find_collections_legacy (database, NULL);
}
}
}
bson_destroy (&cmd);
return cursor;
}
char **
mongoc_database_get_collection_names (mongoc_database_t *database,
bson_error_t *error)
+{
+ return mongoc_database_get_collection_names_with_opts (database, NULL, error);
+}
+
+
+char **
+mongoc_database_get_collection_names_with_opts (mongoc_database_t *database,
+ const bson_t *opts,
+ bson_error_t *error)
{
bson_iter_t col;
const char *name;
char *namecopy;
mongoc_array_t strv_buf;
mongoc_cursor_t *cursor;
const bson_t *doc;
char **ret;
BSON_ASSERT (database);
- cursor = mongoc_database_find_collections (database, NULL, error);
-
- if (!cursor) {
- return NULL;
- }
+ cursor = mongoc_database_find_collections_with_opts (database, opts);
_mongoc_array_init (&strv_buf, sizeof (char *));
while (mongoc_cursor_next (cursor, &doc)) {
if (bson_iter_init (&col, doc) && bson_iter_find (&col, "name") &&
BSON_ITER_HOLDS_UTF8 (&col) && (name = bson_iter_utf8 (&col, NULL))) {
namecopy = bson_strdup (name);
_mongoc_array_append_val (&strv_buf, namecopy);
}
}
/* append a null pointer for the last value. also handles the case
* of no values. */
namecopy = NULL;
_mongoc_array_append_val (&strv_buf, namecopy);
if (mongoc_cursor_error (cursor, error)) {
_mongoc_array_destroy (&strv_buf);
ret = NULL;
} else {
ret = (char **) strv_buf.data;
}
mongoc_cursor_destroy (cursor);
return ret;
}
mongoc_collection_t *
mongoc_database_create_collection (mongoc_database_t *database,
const char *name,
const bson_t *opts,
bson_error_t *error)
{
mongoc_collection_t *collection = NULL;
bson_iter_t iter;
bson_t cmd;
bool capped = false;
BSON_ASSERT (database);
BSON_ASSERT (name);
if (strchr (name, '$')) {
bson_set_error (error,
MONGOC_ERROR_NAMESPACE,
MONGOC_ERROR_NAMESPACE_INVALID,
"The namespace \"%s\" is invalid.",
name);
return NULL;
}
if (opts) {
if (bson_iter_init_find (&iter, opts, "capped")) {
if (!BSON_ITER_HOLDS_BOOL (&iter)) {
bson_set_error (error,
MONGOC_ERROR_COMMAND,
MONGOC_ERROR_COMMAND_INVALID_ARG,
"The argument \"capped\" must be a boolean.");
return NULL;
}
capped = bson_iter_bool (&iter);
}
if (bson_iter_init_find (&iter, opts, "size")) {
if (!BSON_ITER_HOLDS_INT (&iter)) {
bson_set_error (error,
MONGOC_ERROR_COMMAND,
MONGOC_ERROR_COMMAND_INVALID_ARG,
"The argument \"size\" must be an integer.");
return NULL;
}
if (!capped) {
bson_set_error (
error,
MONGOC_ERROR_COMMAND,
MONGOC_ERROR_COMMAND_INVALID_ARG,
"The \"size\" parameter requires {\"capped\": true}");
return NULL;
}
}
if (bson_iter_init_find (&iter, opts, "max")) {
if (!BSON_ITER_HOLDS_INT (&iter)) {
bson_set_error (error,
MONGOC_ERROR_COMMAND,
MONGOC_ERROR_COMMAND_INVALID_ARG,
"The argument \"max\" must be an integer.");
return NULL;
}
if (!capped) {
bson_set_error (
error,
MONGOC_ERROR_COMMAND,
MONGOC_ERROR_COMMAND_INVALID_ARG,
"The \"max\" parameter requires {\"capped\": true}");
return NULL;
}
}
if (bson_iter_init_find (&iter, opts, "storageEngine")) {
if (!BSON_ITER_HOLDS_DOCUMENT (&iter)) {
bson_set_error (
error,
MONGOC_ERROR_COMMAND,
MONGOC_ERROR_COMMAND_INVALID_ARG,
"The \"storageEngine\" parameter must be a document");
return NULL;
}
if (bson_iter_find (&iter, "wiredTiger")) {
if (!BSON_ITER_HOLDS_DOCUMENT (&iter)) {
bson_set_error (error,
MONGOC_ERROR_COMMAND,
MONGOC_ERROR_COMMAND_INVALID_ARG,
"The \"wiredTiger\" option must take a document "
"argument with a \"configString\" field");
return NULL;
}
if (bson_iter_find (&iter, "configString")) {
if (!BSON_ITER_HOLDS_UTF8 (&iter)) {
bson_set_error (
error,
MONGOC_ERROR_COMMAND,
MONGOC_ERROR_COMMAND_INVALID_ARG,
"The \"configString\" parameter must be a string");
return NULL;
}
} else {
bson_set_error (error,
MONGOC_ERROR_COMMAND,
MONGOC_ERROR_COMMAND_INVALID_ARG,
"The \"wiredTiger\" option must take a document "
"argument with a \"configString\" field");
return NULL;
}
}
}
}
bson_init (&cmd);
BSON_APPEND_UTF8 (&cmd, "create", name);
if (_mongoc_client_command_with_opts (database->client,
database->name,
&cmd,
MONGOC_CMD_WRITE,
opts,
MONGOC_QUERY_NONE,
database->read_prefs,
database->read_concern,
database->write_concern,
NULL, /* reply */
error)) {
collection = _mongoc_collection_new (database->client,
database->name,
name,
database->read_prefs,
database->read_concern,
database->write_concern);
}
bson_destroy (&cmd);
return collection;
}
mongoc_collection_t *
mongoc_database_get_collection (mongoc_database_t *database,
const char *collection)
{
BSON_ASSERT (database);
BSON_ASSERT (collection);
return _mongoc_collection_new (database->client,
database->name,
collection,
database->read_prefs,
database->read_concern,
database->write_concern);
}
const char *
mongoc_database_get_name (mongoc_database_t *database)
{
BSON_ASSERT (database);
return database->name;
}
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-database.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-database.h
similarity index 84%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-database.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-database.h
index 012d4987..aea74835 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-database.h
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-database.h
@@ -1,140 +1,154 @@
/*
* Copyright 2013 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MONGOC_DATABASE_H
#define MONGOC_DATABASE_H
#if !defined(MONGOC_INSIDE) && !defined(MONGOC_COMPILATION)
#error "Only <mongoc.h> can be included directly."
#endif
#include <bson.h>
#include "mongoc-macros.h"
#include "mongoc-cursor.h"
#include "mongoc-flags.h"
#include "mongoc-read-prefs.h"
#include "mongoc-read-concern.h"
#include "mongoc-write-concern.h"
-
BSON_BEGIN_DECLS
typedef struct _mongoc_database_t mongoc_database_t;
MONGOC_EXPORT (const char *)
mongoc_database_get_name (mongoc_database_t *database);
MONGOC_EXPORT (bool)
mongoc_database_remove_user (mongoc_database_t *database,
const char *username,
bson_error_t *error);
MONGOC_EXPORT (bool)
mongoc_database_remove_all_users (mongoc_database_t *database,
bson_error_t *error);
MONGOC_EXPORT (bool)
mongoc_database_add_user (mongoc_database_t *database,
const char *username,
const char *password,
const bson_t *roles,
const bson_t *custom_data,
bson_error_t *error);
MONGOC_EXPORT (void)
mongoc_database_destroy (mongoc_database_t *database);
MONGOC_EXPORT (mongoc_database_t *)
mongoc_database_copy (mongoc_database_t *database);
MONGOC_EXPORT (mongoc_cursor_t *)
mongoc_database_command (mongoc_database_t *database,
mongoc_query_flags_t flags,
uint32_t skip,
uint32_t limit,
uint32_t batch_size,
const bson_t *command,
const bson_t *fields,
const mongoc_read_prefs_t *read_prefs);
MONGOC_EXPORT (bool)
mongoc_database_read_command_with_opts (mongoc_database_t *database,
const bson_t *command,
const mongoc_read_prefs_t *read_prefs,
const bson_t *opts,
bson_t *reply,
bson_error_t *error);
MONGOC_EXPORT (bool)
mongoc_database_write_command_with_opts (mongoc_database_t *database,
const bson_t *command,
const bson_t *opts,
bson_t *reply,
bson_error_t *error);
MONGOC_EXPORT (bool)
mongoc_database_read_write_command_with_opts (
mongoc_database_t *database,
const bson_t *command,
const mongoc_read_prefs_t *read_prefs /* IGNORED */,
const bson_t *opts,
bson_t *reply,
bson_error_t *error);
MONGOC_EXPORT (bool)
+mongoc_database_command_with_opts (mongoc_database_t *database,
+ const bson_t *command,
+ const mongoc_read_prefs_t *read_prefs,
+ const bson_t *opts,
+ bson_t *reply,
+ bson_error_t *error);
+MONGOC_EXPORT (bool)
mongoc_database_command_simple (mongoc_database_t *database,
const bson_t *command,
const mongoc_read_prefs_t *read_prefs,
bson_t *reply,
bson_error_t *error);
MONGOC_EXPORT (bool)
mongoc_database_drop (mongoc_database_t *database, bson_error_t *error);
MONGOC_EXPORT (bool)
mongoc_database_drop_with_opts (mongoc_database_t *database,
const bson_t *opts,
bson_error_t *error);
MONGOC_EXPORT (bool)
mongoc_database_has_collection (mongoc_database_t *database,
const char *name,
bson_error_t *error);
MONGOC_EXPORT (mongoc_collection_t *)
mongoc_database_create_collection (mongoc_database_t *database,
const char *name,
const bson_t *options,
bson_error_t *error);
MONGOC_EXPORT (const mongoc_read_prefs_t *)
mongoc_database_get_read_prefs (const mongoc_database_t *database);
MONGOC_EXPORT (void)
mongoc_database_set_read_prefs (mongoc_database_t *database,
const mongoc_read_prefs_t *read_prefs);
MONGOC_EXPORT (const mongoc_write_concern_t *)
mongoc_database_get_write_concern (const mongoc_database_t *database);
MONGOC_EXPORT (void)
mongoc_database_set_write_concern (mongoc_database_t *database,
const mongoc_write_concern_t *write_concern);
MONGOC_EXPORT (const mongoc_read_concern_t *)
mongoc_database_get_read_concern (const mongoc_database_t *database);
MONGOC_EXPORT (void)
mongoc_database_set_read_concern (mongoc_database_t *database,
const mongoc_read_concern_t *read_concern);
MONGOC_EXPORT (mongoc_cursor_t *)
mongoc_database_find_collections (mongoc_database_t *database,
const bson_t *filter,
- bson_error_t *error);
+ bson_error_t *error)
+ BSON_GNUC_DEPRECATED_FOR (mongoc_database_find_collections_with_opts);
+MONGOC_EXPORT (mongoc_cursor_t *)
+mongoc_database_find_collections_with_opts (mongoc_database_t *database,
+ const bson_t *opts);
MONGOC_EXPORT (char **)
mongoc_database_get_collection_names (mongoc_database_t *database,
- bson_error_t *error);
+ bson_error_t *error)
+ BSON_GNUC_DEPRECATED_FOR (mongoc_database_get_collection_names_with_opts);
+MONGOC_EXPORT (char **)
+mongoc_database_get_collection_names_with_opts (mongoc_database_t *database,
+ const bson_t *opts,
+ bson_error_t *error);
MONGOC_EXPORT (mongoc_collection_t *)
mongoc_database_get_collection (mongoc_database_t *database, const char *name);
-
BSON_END_DECLS
#endif /* MONGOC_DATABASE_H */
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-errno-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-errno-private.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-errno-private.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-errno-private.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-error.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-error.h
similarity index 97%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-error.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-error.h
index 51dfa58f..324b8db4 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-error.h
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-error.h
@@ -1,117 +1,120 @@
/*
* Copyright 2013 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MONGOC_ERRORS_H
#define MONGOC_ERRORS_H
#if !defined(MONGOC_INSIDE) && !defined(MONGOC_COMPILATION)
#error "Only <mongoc.h> can be included directly."
#endif
#include <bson.h>
#define MONGOC_ERROR_API_VERSION_LEGACY 1
#define MONGOC_ERROR_API_VERSION_2 2
BSON_BEGIN_DECLS
typedef enum {
MONGOC_ERROR_CLIENT = 1,
MONGOC_ERROR_STREAM,
MONGOC_ERROR_PROTOCOL,
MONGOC_ERROR_CURSOR,
MONGOC_ERROR_QUERY,
MONGOC_ERROR_INSERT,
MONGOC_ERROR_SASL,
MONGOC_ERROR_BSON,
MONGOC_ERROR_MATCHER,
MONGOC_ERROR_NAMESPACE,
MONGOC_ERROR_COMMAND,
MONGOC_ERROR_COLLECTION,
MONGOC_ERROR_GRIDFS,
MONGOC_ERROR_SCRAM,
MONGOC_ERROR_SERVER_SELECTION,
MONGOC_ERROR_WRITE_CONCERN,
MONGOC_ERROR_SERVER, /* Error API Version 2 only */
} mongoc_error_domain_t;
typedef enum {
MONGOC_ERROR_STREAM_INVALID_TYPE = 1,
MONGOC_ERROR_STREAM_INVALID_STATE,
MONGOC_ERROR_STREAM_NAME_RESOLUTION,
MONGOC_ERROR_STREAM_SOCKET,
MONGOC_ERROR_STREAM_CONNECT,
MONGOC_ERROR_STREAM_NOT_ESTABLISHED,
MONGOC_ERROR_CLIENT_NOT_READY,
MONGOC_ERROR_CLIENT_TOO_BIG,
MONGOC_ERROR_CLIENT_TOO_SMALL,
MONGOC_ERROR_CLIENT_GETNONCE,
MONGOC_ERROR_CLIENT_AUTHENTICATE,
MONGOC_ERROR_CLIENT_NO_ACCEPTABLE_PEER,
MONGOC_ERROR_CLIENT_IN_EXHAUST,
MONGOC_ERROR_PROTOCOL_INVALID_REPLY,
MONGOC_ERROR_PROTOCOL_BAD_WIRE_VERSION,
MONGOC_ERROR_CURSOR_INVALID_CURSOR,
MONGOC_ERROR_QUERY_FAILURE,
MONGOC_ERROR_BSON_INVALID,
MONGOC_ERROR_MATCHER_INVALID,
MONGOC_ERROR_NAMESPACE_INVALID,
MONGOC_ERROR_NAMESPACE_INVALID_FILTER_TYPE,
MONGOC_ERROR_COMMAND_INVALID_ARG,
MONGOC_ERROR_COLLECTION_INSERT_FAILED,
MONGOC_ERROR_COLLECTION_UPDATE_FAILED,
MONGOC_ERROR_COLLECTION_DELETE_FAILED,
MONGOC_ERROR_COLLECTION_DOES_NOT_EXIST = 26,
MONGOC_ERROR_GRIDFS_INVALID_FILENAME,
MONGOC_ERROR_SCRAM_NOT_DONE,
MONGOC_ERROR_SCRAM_PROTOCOL_ERROR,
MONGOC_ERROR_QUERY_COMMAND_NOT_FOUND = 59,
MONGOC_ERROR_QUERY_NOT_TAILABLE = 13051,
MONGOC_ERROR_SERVER_SELECTION_BAD_WIRE_VERSION,
MONGOC_ERROR_SERVER_SELECTION_FAILURE,
MONGOC_ERROR_SERVER_SELECTION_INVALID_ID,
MONGOC_ERROR_GRIDFS_CHUNK_MISSING,
MONGOC_ERROR_GRIDFS_PROTOCOL_ERROR,
/* Dup with query failure. */
MONGOC_ERROR_PROTOCOL_ERROR = 17,
MONGOC_ERROR_WRITE_CONCERN_ERROR = 64,
MONGOC_ERROR_DUPLICATE_KEY = 11000,
+
+ MONGOC_ERROR_CHANGE_STREAM_NO_RESUME_TOKEN,
+ MONGOC_ERROR_CLIENT_SESSION_FAILURE,
} mongoc_error_code_t;
BSON_END_DECLS
#endif /* MONGOC_ERRORS_H */
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-find-and-modify-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-find-and-modify-private.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-find-and-modify-private.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-find-and-modify-private.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-find-and-modify.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-find-and-modify.c
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-find-and-modify.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-find-and-modify.c
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-find-and-modify.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-find-and-modify.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-find-and-modify.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-find-and-modify.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-flags.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-flags.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-flags.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-flags.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-gridfs-file-list-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-gridfs-file-list-private.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-gridfs-file-list-private.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-gridfs-file-list-private.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-gridfs-file-list.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-gridfs-file-list.c
similarity index 98%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-gridfs-file-list.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-gridfs-file-list.c
index ccf6519e..95982766 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-gridfs-file-list.c
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-gridfs-file-list.c
@@ -1,119 +1,119 @@
/*
* Copyright 2013 MongoDB Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <limits.h>
#include "mongoc-cursor.h"
#include "mongoc-cursor-private.h"
#include "mongoc-collection-private.h"
#include "mongoc-gridfs.h"
#include "mongoc-gridfs-private.h"
#include "mongoc-gridfs-file.h"
#include "mongoc-gridfs-file-private.h"
#include "mongoc-gridfs-file-list.h"
#include "mongoc-gridfs-file-list-private.h"
#include "mongoc-trace-private.h"
#undef MONGOC_LOG_DOMAIN
#define MONGOC_LOG_DOMAIN "gridfs_file_list"
mongoc_gridfs_file_list_t *
_mongoc_gridfs_file_list_new (mongoc_gridfs_t *gridfs,
const bson_t *query,
uint32_t limit)
{
mongoc_gridfs_file_list_t *list;
mongoc_cursor_t *cursor;
cursor = _mongoc_cursor_new (gridfs->client,
gridfs->files->ns,
MONGOC_QUERY_NONE,
0,
limit,
0,
- false /* is command */,
+ true /* is_find */,
query,
NULL,
gridfs->files->read_prefs,
gridfs->files->read_concern);
BSON_ASSERT (cursor);
list = (mongoc_gridfs_file_list_t *) bson_malloc0 (sizeof *list);
list->cursor = cursor;
list->gridfs = gridfs;
return list;
}
mongoc_gridfs_file_list_t *
_mongoc_gridfs_file_list_new_with_opts (mongoc_gridfs_t *gridfs,
const bson_t *filter,
const bson_t *opts)
{
mongoc_gridfs_file_list_t *list;
mongoc_cursor_t *cursor;
cursor = mongoc_collection_find_with_opts (
gridfs->files, filter, opts, NULL /* read prefs */);
BSON_ASSERT (cursor);
list = (mongoc_gridfs_file_list_t *) bson_malloc0 (sizeof *list);
list->cursor = cursor;
list->gridfs = gridfs;
return list;
}
mongoc_gridfs_file_t *
mongoc_gridfs_file_list_next (mongoc_gridfs_file_list_t *list)
{
const bson_t *bson;
BSON_ASSERT (list);
if (mongoc_cursor_next (list->cursor, &bson)) {
return _mongoc_gridfs_file_new_from_bson (list->gridfs, bson);
} else {
return NULL;
}
}
bool
mongoc_gridfs_file_list_error (mongoc_gridfs_file_list_t *list,
bson_error_t *error)
{
return mongoc_cursor_error (list->cursor, error);
}
void
mongoc_gridfs_file_list_destroy (mongoc_gridfs_file_list_t *list)
{
BSON_ASSERT (list);
mongoc_cursor_destroy (list->cursor);
bson_free (list);
}
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-gridfs-file-list.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-gridfs-file-list.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-gridfs-file-list.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-gridfs-file-list.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-gridfs-file-page-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-gridfs-file-page-private.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-gridfs-file-page-private.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-gridfs-file-page-private.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-gridfs-file-page.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-gridfs-file-page.c
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-gridfs-file-page.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-gridfs-file-page.c
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-gridfs-file-page.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-gridfs-file-page.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-gridfs-file-page.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-gridfs-file-page.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-gridfs-file-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-gridfs-file-private.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-gridfs-file-private.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-gridfs-file-private.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-gridfs-file.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-gridfs-file.c
similarity index 97%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-gridfs-file.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-gridfs-file.c
index 653355dd..f3bfb5ce 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-gridfs-file.c
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-gridfs-file.c
@@ -1,1052 +1,1049 @@
/*
* Copyright 2013 MongoDB Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#undef MONGOC_LOG_DOMAIN
#define MONGOC_LOG_DOMAIN "gridfs_file"
#include <limits.h>
#include <time.h>
#include <errno.h>
#include "mongoc-cursor.h"
#include "mongoc-cursor-private.h"
#include "mongoc-collection.h"
#include "mongoc-gridfs.h"
#include "mongoc-gridfs-private.h"
#include "mongoc-gridfs-file.h"
#include "mongoc-gridfs-file-private.h"
#include "mongoc-gridfs-file-page.h"
#include "mongoc-gridfs-file-page-private.h"
#include "mongoc-iovec.h"
#include "mongoc-trace-private.h"
#include "mongoc-error.h"
static bool
_mongoc_gridfs_file_refresh_page (mongoc_gridfs_file_t *file);
static bool
_mongoc_gridfs_file_flush_page (mongoc_gridfs_file_t *file);
static ssize_t
_mongoc_gridfs_file_extend (mongoc_gridfs_file_t *file);
/*****************************************************************
-* Magic accessor generation
-*
-* We need some accessors to get and set properties on files, to handle memory
-* ownership and to determine dirtiness. These macros produce the getters and
-* setters we need
-*****************************************************************/
+ * Magic accessor generation
+ *
+ * We need some accessors to get and set properties on files, to handle memory
+ * ownership and to determine dirtiness. These macros produce the getters and
+ * setters we need
+ *****************************************************************/
#define MONGOC_GRIDFS_FILE_STR_ACCESSOR(name) \
const char *mongoc_gridfs_file_get_##name (mongoc_gridfs_file_t *file) \
{ \
return file->name ? file->name : file->bson_##name; \
} \
void mongoc_gridfs_file_set_##name (mongoc_gridfs_file_t *file, \
const char *str) \
{ \
if (file->name) { \
bson_free (file->name); \
} \
file->name = bson_strdup (str); \
file->is_dirty = 1; \
}
#define MONGOC_GRIDFS_FILE_BSON_ACCESSOR(name) \
const bson_t *mongoc_gridfs_file_get_##name (mongoc_gridfs_file_t *file) \
{ \
if (file->name.len) { \
return &file->name; \
} else if (file->bson_##name.len) { \
return &file->bson_##name; \
} else { \
return NULL; \
} \
} \
void mongoc_gridfs_file_set_##name (mongoc_gridfs_file_t *file, \
const bson_t *bson) \
{ \
if (file->name.len) { \
bson_destroy (&file->name); \
} \
bson_copy_to (bson, &(file->name)); \
file->is_dirty = 1; \
}
MONGOC_GRIDFS_FILE_STR_ACCESSOR (md5)
MONGOC_GRIDFS_FILE_STR_ACCESSOR (filename)
MONGOC_GRIDFS_FILE_STR_ACCESSOR (content_type)
MONGOC_GRIDFS_FILE_BSON_ACCESSOR (aliases)
MONGOC_GRIDFS_FILE_BSON_ACCESSOR (metadata)
/**
* mongoc_gridfs_file_set_id:
*
* the user can set the files_id to an id of any type. Must be called before
* mongoc_gridfs_file_save.
*
*/
bool
mongoc_gridfs_file_set_id (mongoc_gridfs_file_t *file,
const bson_value_t *id,
bson_error_t *error)
{
if (!file->is_dirty) {
bson_set_error (error,
MONGOC_ERROR_GRIDFS,
MONGOC_ERROR_GRIDFS_PROTOCOL_ERROR,
"Cannot set file id after saving file.");
return false;
}
bson_value_copy (id, &file->files_id);
return true;
}
/** save a gridfs file */
bool
mongoc_gridfs_file_save (mongoc_gridfs_file_t *file)
{
bson_t *selector, *update, child;
const char *md5;
const char *filename;
const char *content_type;
const bson_t *aliases;
const bson_t *metadata;
bool r;
ENTRY;
if (!file->is_dirty) {
return 1;
}
if (file->page && _mongoc_gridfs_file_page_is_dirty (file->page)) {
_mongoc_gridfs_file_flush_page (file);
}
md5 = mongoc_gridfs_file_get_md5 (file);
filename = mongoc_gridfs_file_get_filename (file);
content_type = mongoc_gridfs_file_get_content_type (file);
aliases = mongoc_gridfs_file_get_aliases (file);
metadata = mongoc_gridfs_file_get_metadata (file);
selector = bson_new ();
bson_append_value (selector, "_id", -1, &file->files_id);
update = bson_new ();
bson_append_document_begin (update, "$set", -1, &child);
bson_append_int64 (&child, "length", -1, file->length);
bson_append_int32 (&child, "chunkSize", -1, file->chunk_size);
bson_append_date_time (&child, "uploadDate", -1, file->upload_date);
if (md5) {
bson_append_utf8 (&child, "md5", -1, md5, -1);
}
if (filename) {
bson_append_utf8 (&child, "filename", -1, filename, -1);
}
if (content_type) {
bson_append_utf8 (&child, "contentType", -1, content_type, -1);
}
if (aliases) {
bson_append_array (&child, "aliases", -1, aliases);
}
if (metadata) {
bson_append_document (&child, "metadata", -1, metadata);
}
bson_append_document_end (update, &child);
r = mongoc_collection_update (file->gridfs->files,
MONGOC_UPDATE_UPSERT,
selector,
update,
NULL,
&file->error);
bson_destroy (selector);
bson_destroy (update);
file->is_dirty = 0;
RETURN (r);
}
/**
* _mongoc_gridfs_file_new_from_bson:
*
* creates a gridfs file from a bson object
*
* This is only really useful for instantiating a gridfs file from a server
* side object
*/
mongoc_gridfs_file_t *
_mongoc_gridfs_file_new_from_bson (mongoc_gridfs_t *gridfs, const bson_t *data)
{
mongoc_gridfs_file_t *file;
const bson_value_t *value;
const char *key;
bson_iter_t iter;
const uint8_t *buf;
uint32_t buf_len;
ENTRY;
BSON_ASSERT (gridfs);
BSON_ASSERT (data);
file = (mongoc_gridfs_file_t *) bson_malloc0 (sizeof *file);
file->gridfs = gridfs;
bson_copy_to (data, &file->bson);
bson_iter_init (&iter, &file->bson);
while (bson_iter_next (&iter)) {
key = bson_iter_key (&iter);
if (0 == strcmp (key, "_id")) {
value = bson_iter_value (&iter);
bson_value_copy (value, &file->files_id);
} else if (0 == strcmp (key, "length")) {
if (!BSON_ITER_HOLDS_NUMBER (&iter)) {
GOTO (failure);
}
file->length = bson_iter_as_int64 (&iter);
} else if (0 == strcmp (key, "chunkSize")) {
if (!BSON_ITER_HOLDS_NUMBER (&iter)) {
GOTO (failure);
}
if (bson_iter_as_int64 (&iter) > INT32_MAX) {
GOTO (failure);
}
file->chunk_size = (int32_t) bson_iter_as_int64 (&iter);
} else if (0 == strcmp (key, "uploadDate")) {
if (!BSON_ITER_HOLDS_DATE_TIME (&iter)) {
GOTO (failure);
}
file->upload_date = bson_iter_date_time (&iter);
} else if (0 == strcmp (key, "md5")) {
if (!BSON_ITER_HOLDS_UTF8 (&iter)) {
GOTO (failure);
}
file->bson_md5 = bson_iter_utf8 (&iter, NULL);
} else if (0 == strcmp (key, "filename")) {
if (!BSON_ITER_HOLDS_UTF8 (&iter)) {
GOTO (failure);
}
file->bson_filename = bson_iter_utf8 (&iter, NULL);
} else if (0 == strcmp (key, "contentType")) {
if (!BSON_ITER_HOLDS_UTF8 (&iter)) {
GOTO (failure);
}
file->bson_content_type = bson_iter_utf8 (&iter, NULL);
} else if (0 == strcmp (key, "aliases")) {
if (!BSON_ITER_HOLDS_ARRAY (&iter)) {
GOTO (failure);
}
bson_iter_array (&iter, &buf_len, &buf);
bson_init_static (&file->bson_aliases, buf, buf_len);
} else if (0 == strcmp (key, "metadata")) {
if (!BSON_ITER_HOLDS_DOCUMENT (&iter)) {
GOTO (failure);
}
bson_iter_document (&iter, &buf_len, &buf);
bson_init_static (&file->bson_metadata, buf, buf_len);
}
}
/* TODO: is there are a minimal object we should be verifying that we
* actually have here? */
RETURN (file);
failure:
bson_destroy (&file->bson);
RETURN (NULL);
}
/**
* _mongoc_gridfs_file_new:
*
* Create a new empty gridfs file
*/
mongoc_gridfs_file_t *
_mongoc_gridfs_file_new (mongoc_gridfs_t *gridfs, mongoc_gridfs_file_opt_t *opt)
{
mongoc_gridfs_file_t *file;
mongoc_gridfs_file_opt_t default_opt = {0};
ENTRY;
BSON_ASSERT (gridfs);
if (!opt) {
opt = &default_opt;
}
file = (mongoc_gridfs_file_t *) bson_malloc0 (sizeof *file);
file->gridfs = gridfs;
file->is_dirty = 1;
if (opt->chunk_size) {
file->chunk_size = opt->chunk_size;
} else {
/*
* The default chunk size is now 255kb. This used to be 256k but has been
* reduced to allow for them to fit within power of two sizes in mongod.
*
* See CDRIVER-322.
*/
file->chunk_size = (1 << 18) - 1024;
}
file->files_id.value_type = BSON_TYPE_OID;
bson_oid_init (&file->files_id.value.v_oid, NULL);
file->upload_date = ((int64_t) time (NULL)) * 1000;
if (opt->md5) {
file->md5 = bson_strdup (opt->md5);
}
if (opt->filename) {
file->filename = bson_strdup (opt->filename);
}
if (opt->content_type) {
file->content_type = bson_strdup (opt->content_type);
}
if (opt->aliases) {
bson_copy_to (opt->aliases, &(file->aliases));
}
if (opt->metadata) {
bson_copy_to (opt->metadata, &(file->metadata));
}
file->pos = 0;
file->n = 0;
RETURN (file);
}
void
mongoc_gridfs_file_destroy (mongoc_gridfs_file_t *file)
{
ENTRY;
BSON_ASSERT (file);
if (file->page) {
_mongoc_gridfs_file_page_destroy (file->page);
}
if (file->bson.len) {
bson_destroy (&file->bson);
}
if (file->cursor) {
mongoc_cursor_destroy (file->cursor);
}
if (file->files_id.value_type) {
bson_value_destroy (&file->files_id);
}
if (file->md5) {
bson_free (file->md5);
}
if (file->filename) {
bson_free (file->filename);
}
if (file->content_type) {
bson_free (file->content_type);
}
if (file->aliases.len) {
bson_destroy (&file->aliases);
}
if (file->bson_aliases.len) {
bson_destroy (&file->bson_aliases);
}
if (file->metadata.len) {
bson_destroy (&file->metadata);
}
if (file->bson_metadata.len) {
bson_destroy (&file->bson_metadata);
}
bson_free (file);
EXIT;
}
/** readv against a gridfs file
* timeout_msec is unused */
ssize_t
mongoc_gridfs_file_readv (mongoc_gridfs_file_t *file,
mongoc_iovec_t *iov,
size_t iovcnt,
size_t min_bytes,
uint32_t timeout_msec)
{
uint32_t bytes_read = 0;
int32_t r;
size_t i;
uint32_t iov_pos;
ENTRY;
BSON_ASSERT (file);
BSON_ASSERT (iov);
BSON_ASSERT (iovcnt);
/* Reading when positioned past the end does nothing */
if (file->pos >= file->length) {
return 0;
}
/* Try to get the current chunk */
if (!file->page && !_mongoc_gridfs_file_refresh_page (file)) {
return -1;
}
for (i = 0; i < iovcnt; i++) {
iov_pos = 0;
for (;;) {
r = _mongoc_gridfs_file_page_read (
file->page,
(uint8_t *) iov[i].iov_base + iov_pos,
(uint32_t) (iov[i].iov_len - iov_pos));
BSON_ASSERT (r >= 0);
iov_pos += r;
file->pos += r;
bytes_read += r;
if (iov_pos == iov[i].iov_len) {
/* filled a bucket, keep going */
break;
} else if (file->length == file->pos) {
/* we're at the end of the file. So we're done */
RETURN (bytes_read);
} else if (bytes_read >= min_bytes) {
/* we need a new page, but we've read enough bytes to stop */
RETURN (bytes_read);
} else if (!_mongoc_gridfs_file_refresh_page (file)) {
return -1;
}
}
}
RETURN (bytes_read);
}
/** writev against a gridfs file
* timeout_msec is unused */
ssize_t
mongoc_gridfs_file_writev (mongoc_gridfs_file_t *file,
const mongoc_iovec_t *iov,
size_t iovcnt,
uint32_t timeout_msec)
{
uint32_t bytes_written = 0;
int32_t r;
size_t i;
uint32_t iov_pos;
ENTRY;
BSON_ASSERT (file);
BSON_ASSERT (iov);
BSON_ASSERT (iovcnt);
/* Pull in the correct page */
if (!file->page && !_mongoc_gridfs_file_refresh_page (file)) {
return -1;
}
/* When writing past the end-of-file, fill the gap with zeros */
if (file->pos > file->length && !_mongoc_gridfs_file_extend (file)) {
return -1;
}
for (i = 0; i < iovcnt; i++) {
iov_pos = 0;
for (;;) {
if (!file->page && !_mongoc_gridfs_file_refresh_page (file)) {
return -1;
}
/* write bytes until an iov is exhausted or the page is full */
r = _mongoc_gridfs_file_page_write (
file->page,
(uint8_t *) iov[i].iov_base + iov_pos,
(uint32_t) (iov[i].iov_len - iov_pos));
BSON_ASSERT (r >= 0);
iov_pos += r;
file->pos += r;
bytes_written += r;
file->length = BSON_MAX (file->length, (int64_t) file->pos);
if (iov_pos == iov[i].iov_len) {
/** filled a bucket, keep going */
break;
} else {
/** flush the buffer, the next pass through will bring in a new page
*/
if (!_mongoc_gridfs_file_flush_page (file)) {
return -1;
}
}
}
}
file->is_dirty = 1;
RETURN (bytes_written);
}
/**
* _mongoc_gridfs_file_extend:
*
* Extend a GridFS file to the current position pointer. Zeros will be
* appended to the end of the file until file->length is even with
* file->pos.
*
* If file->length >= file->pos, the function exits successfully with no
* operation performed.
*
* Parameters:
* @file: A mongoc_gridfs_file_t.
*
* Returns:
* The number of zero bytes written, or -1 on failure.
*/
static ssize_t
_mongoc_gridfs_file_extend (mongoc_gridfs_file_t *file)
{
int64_t target_length;
ssize_t diff;
ENTRY;
BSON_ASSERT (file);
if (file->length >= file->pos) {
RETURN (0);
}
diff = (ssize_t) (file->pos - file->length);
target_length = file->pos;
mongoc_gridfs_file_seek (file, 0, SEEK_END);
while (true) {
if (!file->page && !_mongoc_gridfs_file_refresh_page (file)) {
RETURN (-1);
}
/* Set bytes until we reach the limit or fill a page */
file->pos += _mongoc_gridfs_file_page_memset0 (file->page,
target_length - file->pos);
if (file->pos == target_length) {
/* We're done */
break;
} else if (!_mongoc_gridfs_file_flush_page (file)) {
/* We tried to flush a full buffer, but an error occurred */
RETURN (-1);
}
}
file->length = target_length;
file->is_dirty = true;
RETURN (diff);
}
/**
* _mongoc_gridfs_file_flush_page:
*
* Unconditionally flushes the file's current page to the database.
* The page to flush is determined by page->n.
*
* Side Effects:
*
* On success, file->page is properly destroyed and set to NULL.
*
* Returns:
*
* True on success; false otherwise.
*/
static bool
_mongoc_gridfs_file_flush_page (mongoc_gridfs_file_t *file)
{
bson_t *selector, *update;
bool r;
const uint8_t *buf;
uint32_t len;
ENTRY;
BSON_ASSERT (file);
BSON_ASSERT (file->page);
buf = _mongoc_gridfs_file_page_get_data (file->page);
len = _mongoc_gridfs_file_page_get_len (file->page);
selector = bson_new ();
bson_append_value (selector, "files_id", -1, &file->files_id);
bson_append_int32 (selector, "n", -1, file->n);
update = bson_sized_new (file->chunk_size + 100);
bson_append_value (update, "files_id", -1, &file->files_id);
bson_append_int32 (update, "n", -1, file->n);
bson_append_binary (update, "data", -1, BSON_SUBTYPE_BINARY, buf, len);
r = mongoc_collection_update (file->gridfs->chunks,
MONGOC_UPDATE_UPSERT,
selector,
update,
NULL,
&file->error);
bson_destroy (selector);
bson_destroy (update);
if (r) {
_mongoc_gridfs_file_page_destroy (file->page);
file->page = NULL;
r = mongoc_gridfs_file_save (file);
}
RETURN (r);
}
/**
* _mongoc_gridfs_file_keep_cursor:
*
* After a seek, decide if the next read should use the current cursor or
* start a new query.
*
* Preconditions:
*
* file has a cursor and cursor range.
*
* Side Effects:
*
* None.
*/
static bool
_mongoc_gridfs_file_keep_cursor (mongoc_gridfs_file_t *file)
{
uint32_t chunk_no;
uint32_t chunks_per_batch;
if (file->n < 0 || file->chunk_size <= 0) {
return false;
}
chunk_no = (uint32_t) file->n;
/* server returns roughly 4 MB batches by default */
chunks_per_batch = (4 * 1024 * 1024) / (uint32_t) file->chunk_size;
return (
/* cursor is on or before the desired chunk */
file->cursor_range[0] <= chunk_no &&
/* chunk_no is before end of file */
chunk_no <= file->cursor_range[1] &&
/* desired chunk is in this batch or next one */
chunk_no < file->cursor_range[0] + 2 * chunks_per_batch);
}
static int64_t
divide_round_up (int64_t num, int64_t denom)
{
return (num + denom - 1) / denom;
}
static void
missing_chunk (mongoc_gridfs_file_t *file)
{
bson_set_error (&file->error,
MONGOC_ERROR_GRIDFS,
MONGOC_ERROR_GRIDFS_CHUNK_MISSING,
"missing chunk number %" PRId32,
file->n);
if (file->cursor) {
mongoc_cursor_destroy (file->cursor);
file->cursor = NULL;
}
}
/**
* _mongoc_gridfs_file_refresh_page:
*
* Refresh a GridFS file's underlying page. This recalculates the current
* page number based on the file's stream position, then fetches that page
* from the database.
*
* Note that this fetch is unconditional and the page is queried from the
* database even if the current page covers the same theoretical chunk.
*
*
* Side Effects:
*
* file->page is loaded with the appropriate buffer, fetched from the
* database. If the file position is at the end of the file and on a new
* chunk boundary, a new page is created. If the position is far past the
* end of the file, _mongoc_gridfs_file_extend is responsible for creating
* chunks to file the gap.
*
* file->n is set based on file->pos. file->error is set on error.
*/
static bool
_mongoc_gridfs_file_refresh_page (mongoc_gridfs_file_t *file)
{
bson_t query;
bson_t child;
bson_t opts;
const bson_t *chunk;
const char *key;
bson_iter_t iter;
int64_t existing_chunks;
int64_t required_chunks;
const uint8_t *data = NULL;
uint32_t len;
ENTRY;
BSON_ASSERT (file);
file->n = (int32_t) (file->pos / file->chunk_size);
if (file->page) {
_mongoc_gridfs_file_page_destroy (file->page);
file->page = NULL;
}
/* if the file pointer is past the end of the current file (i.e. pointing to
* a new chunk), we'll pass the page constructor a new empty page. */
existing_chunks = divide_round_up (file->length, file->chunk_size);
required_chunks = divide_round_up (file->pos + 1, file->chunk_size);
if (required_chunks > existing_chunks) {
data = (uint8_t *) "";
len = 0;
} else {
/* if we have a cursor, but the cursor doesn't have the chunk we're going
* to need, destroy it (we'll grab a new one immediately there after) */
if (file->cursor && !_mongoc_gridfs_file_keep_cursor (file)) {
mongoc_cursor_destroy (file->cursor);
file->cursor = NULL;
}
if (!file->cursor) {
bson_init (&query);
BSON_APPEND_VALUE (&query, "files_id", &file->files_id);
BSON_APPEND_DOCUMENT_BEGIN (&query, "n", &child);
BSON_APPEND_INT32 (&child, "$gte", file->n);
bson_append_document_end (&query, &child);
bson_init (&opts);
BSON_APPEND_DOCUMENT_BEGIN (&opts, "sort", &child);
BSON_APPEND_INT32 (&child, "n", 1);
bson_append_document_end (&opts, &child);
BSON_APPEND_DOCUMENT_BEGIN (&opts, "projection", &child);
BSON_APPEND_INT32 (&child, "n", 1);
BSON_APPEND_INT32 (&child, "data", 1);
BSON_APPEND_INT32 (&child, "_id", 0);
bson_append_document_end (&opts, &child);
/* find all chunks greater than or equal to our current file pos */
file->cursor = mongoc_collection_find_with_opts (
file->gridfs->chunks, &query, &opts, NULL);
file->cursor_range[0] = file->n;
file->cursor_range[1] = (uint32_t) (file->length / file->chunk_size);
bson_destroy (&query);
bson_destroy (&opts);
BSON_ASSERT (file->cursor);
}
/* we might have had a cursor before, then seeked ahead past a chunk.
* iterate until we're on the right chunk */
while (file->cursor_range[0] <= file->n) {
if (!mongoc_cursor_next (file->cursor, &chunk)) {
/* copy cursor error; if there's none, we're missing a chunk */
if (!mongoc_cursor_error (file->cursor, &file->error)) {
missing_chunk (file);
}
RETURN (0);
}
file->cursor_range[0]++;
}
bson_iter_init (&iter, chunk);
/* grab out what we need from the chunk */
while (bson_iter_next (&iter)) {
key = bson_iter_key (&iter);
if (strcmp (key, "n") == 0) {
if (file->n != bson_iter_int32 (&iter)) {
missing_chunk (file);
RETURN (0);
}
} else if (strcmp (key, "data") == 0) {
bson_iter_binary (&iter, NULL, &len, &data);
} else {
/* Unexpected key. This should never happen */
RETURN (0);
}
}
if (file->n != file->pos / file->chunk_size) {
return 0;
}
}
if (!data) {
bson_set_error (&file->error,
MONGOC_ERROR_GRIDFS,
MONGOC_ERROR_GRIDFS_CHUNK_MISSING,
"corrupt chunk number %" PRId32,
file->n);
RETURN (0);
}
file->page = _mongoc_gridfs_file_page_new (data, len, file->chunk_size);
/* seek in the page towards wherever we're supposed to be */
RETURN (
_mongoc_gridfs_file_page_seek (file->page, file->pos % file->chunk_size));
}
/**
* mongoc_gridfs_file_seek:
*
* Adjust the file position pointer in `file` by `delta`, starting from the
* position `whence`. The `whence` argument is interpreted as in fseek(2):
*
* SEEK_SET Set the position relative to the start of the file.
* SEEK_CUR Move `delta` from the current file position.
* SEEK_END Move `delta` from the end-of-file.
*
* Parameters:
*
* @file: A mongoc_gridfs_file_t.
* @delta: The amount to move. May be positive or negative.
* @whence: One of SEEK_SET, SEEK_CUR or SEEK_END.
*
* Errors:
*
* [EINVAL] `whence` is not one of SEEK_SET, SEEK_CUR or SEEK_END.
* [EINVAL] Resulting file position would be negative.
*
* Side Effects:
*
* On success, the file's underlying position pointer is set appropriately.
* On failure, the file position is NOT changed and errno is set.
*
* Returns:
*
* 0 on success.
* -1 on error, and errno set to indicate the error.
*/
int
mongoc_gridfs_file_seek (mongoc_gridfs_file_t *file, int64_t delta, int whence)
{
int64_t offset;
BSON_ASSERT (file);
switch (whence) {
case SEEK_SET:
offset = delta;
break;
case SEEK_CUR:
offset = file->pos + delta;
break;
case SEEK_END:
offset = file->length + delta;
break;
default:
errno = EINVAL;
return -1;
break;
}
if (offset < 0) {
errno = EINVAL;
return -1;
}
if (offset / file->chunk_size != file->n) {
/** no longer on the same page */
if (file->page) {
if (_mongoc_gridfs_file_page_is_dirty (file->page)) {
_mongoc_gridfs_file_flush_page (file);
} else {
_mongoc_gridfs_file_page_destroy (file->page);
file->page = NULL;
}
}
/** we'll pick up the seek when we fetch a page on the next action. We
* lazily load */
} else if (file->page) {
_mongoc_gridfs_file_page_seek (file->page, offset % file->chunk_size);
}
file->pos = offset;
file->n = file->pos / file->chunk_size;
return 0;
}
uint64_t
mongoc_gridfs_file_tell (mongoc_gridfs_file_t *file)
{
BSON_ASSERT (file);
return file->pos;
}
bool
mongoc_gridfs_file_error (mongoc_gridfs_file_t *file, bson_error_t *error)
{
BSON_ASSERT (file);
BSON_ASSERT (error);
if (BSON_UNLIKELY (file->error.domain)) {
bson_set_error (error,
file->error.domain,
file->error.code,
"%s",
file->error.message);
RETURN (true);
}
RETURN (false);
}
const bson_value_t *
mongoc_gridfs_file_get_id (mongoc_gridfs_file_t *file)
{
BSON_ASSERT (file);
return &file->files_id;
}
int64_t
mongoc_gridfs_file_get_length (mongoc_gridfs_file_t *file)
{
BSON_ASSERT (file);
return file->length;
}
int32_t
mongoc_gridfs_file_get_chunk_size (mongoc_gridfs_file_t *file)
{
BSON_ASSERT (file);
return file->chunk_size;
}
int64_t
mongoc_gridfs_file_get_upload_date (mongoc_gridfs_file_t *file)
{
BSON_ASSERT (file);
return file->upload_date;
}
bool
mongoc_gridfs_file_remove (mongoc_gridfs_file_t *file, bson_error_t *error)
{
bson_t sel = BSON_INITIALIZER;
bool ret = false;
BSON_ASSERT (file);
BSON_APPEND_VALUE (&sel, "_id", &file->files_id);
- if (!mongoc_collection_remove (file->gridfs->files,
- MONGOC_REMOVE_SINGLE_REMOVE,
- &sel,
- NULL,
- error)) {
+ if (!mongoc_collection_delete_one (
+ file->gridfs->files, &sel, NULL, NULL, error)) {
goto cleanup;
}
bson_reinit (&sel);
BSON_APPEND_VALUE (&sel, "files_id", &file->files_id);
- if (!mongoc_collection_remove (
- file->gridfs->chunks, MONGOC_REMOVE_NONE, &sel, NULL, error)) {
+ if (!mongoc_collection_delete_many (
+ file->gridfs->chunks, &sel, NULL, NULL, error)) {
goto cleanup;
}
ret = true;
cleanup:
bson_destroy (&sel);
return ret;
}
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-gridfs-file.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-gridfs-file.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-gridfs-file.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-gridfs-file.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-gridfs-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-gridfs-private.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-gridfs-private.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-gridfs-private.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-gridfs.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-gridfs.c
similarity index 92%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-gridfs.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-gridfs.c
index 816c156e..d0673c74 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-gridfs.c
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-gridfs.c
@@ -1,489 +1,482 @@
/*
* Copyright 2013 MongoDB Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#undef MONGOC_LOG_DOMAIN
#define MONGOC_LOG_DOMAIN "gridfs"
#include "mongoc-bulk-operation.h"
#include "mongoc-client-private.h"
#include "mongoc-collection.h"
#include "mongoc-collection-private.h"
#include "mongoc-error.h"
#include "mongoc-index.h"
#include "mongoc-gridfs.h"
#include "mongoc-gridfs-private.h"
#include "mongoc-gridfs-file.h"
#include "mongoc-gridfs-file-private.h"
#include "mongoc-gridfs-file-list.h"
#include "mongoc-gridfs-file-list-private.h"
#include "mongoc-client.h"
#include "mongoc-trace-private.h"
#include "mongoc-cursor-private.h"
#include "mongoc-util-private.h"
#define MONGOC_GRIDFS_STREAM_CHUNK 4096
/**
* _mongoc_gridfs_ensure_index:
*
* ensure gridfs indexes
*
* Ensure fast searches for chunks via [ files_id, n ]
* Ensure fast searches for files via [ filename ]
*/
static bool
_mongoc_gridfs_ensure_index (mongoc_gridfs_t *gridfs, bson_error_t *error)
{
bson_t keys;
mongoc_index_opt_t opt;
bool r;
ENTRY;
bson_init (&keys);
bson_append_int32 (&keys, "files_id", -1, 1);
bson_append_int32 (&keys, "n", -1, 1);
mongoc_index_opt_init (&opt);
opt.unique = 1;
- /* mongoc_collection_create_index is deprecated, but works with MongoDB 2.4
- * once we really drop 2.4, call "createIndexes" command directly */
BEGIN_IGNORE_DEPRECATIONS
r = mongoc_collection_create_index (gridfs->chunks, &keys, &opt, error);
END_IGNORE_DEPRECATIONS
bson_destroy (&keys);
if (!r) {
RETURN (r);
}
bson_init (&keys);
bson_append_int32 (&keys, "filename", -1, 1);
bson_append_int32 (&keys, "uploadDate", -1, 1);
opt.unique = 0;
BEGIN_IGNORE_DEPRECATIONS
r = mongoc_collection_create_index (gridfs->files, &keys, &opt, error);
END_IGNORE_DEPRECATIONS
bson_destroy (&keys);
if (!r) {
RETURN (r);
}
RETURN (1);
}
mongoc_gridfs_t *
_mongoc_gridfs_new (mongoc_client_t *client,
const char *db,
const char *prefix,
bson_error_t *error)
{
mongoc_gridfs_t *gridfs;
- const mongoc_read_prefs_t *read_prefs;
- const mongoc_read_concern_t *read_concern;
- const mongoc_write_concern_t *write_concern;
char buf[128];
bool r;
uint32_t prefix_len;
ENTRY;
BSON_ASSERT (client);
BSON_ASSERT (db);
if (!prefix) {
prefix = "fs";
}
/* make sure prefix is short enough to bucket the chunks and files
* collections
*/
prefix_len = (uint32_t) strlen (prefix);
BSON_ASSERT (prefix_len + sizeof (".chunks") < sizeof (buf));
gridfs = (mongoc_gridfs_t *) bson_malloc0 (sizeof *gridfs);
gridfs->client = client;
- read_prefs = mongoc_client_get_read_prefs (client);
- read_concern = mongoc_client_get_read_concern (client);
- write_concern = mongoc_client_get_write_concern (client);
-
bson_snprintf (buf, sizeof (buf), "%s.chunks", prefix);
- gridfs->chunks = _mongoc_collection_new (
- client, db, buf, read_prefs, read_concern, write_concern);
+ gridfs->chunks = mongoc_client_get_collection (client, db, buf);
bson_snprintf (buf, sizeof (buf), "%s.files", prefix);
- gridfs->files = _mongoc_collection_new (
- client, db, buf, read_prefs, read_concern, write_concern);
+ gridfs->files = mongoc_client_get_collection (client, db, buf);
r = _mongoc_gridfs_ensure_index (gridfs, error);
if (!r) {
mongoc_gridfs_destroy (gridfs);
RETURN (NULL);
}
RETURN (gridfs);
}
bool
mongoc_gridfs_drop (mongoc_gridfs_t *gridfs, bson_error_t *error)
{
bool r;
ENTRY;
r = mongoc_collection_drop (gridfs->files, error);
if (!r) {
RETURN (0);
}
r = mongoc_collection_drop (gridfs->chunks, error);
if (!r) {
RETURN (0);
}
RETURN (1);
}
void
mongoc_gridfs_destroy (mongoc_gridfs_t *gridfs)
{
ENTRY;
BSON_ASSERT (gridfs);
mongoc_collection_destroy (gridfs->files);
mongoc_collection_destroy (gridfs->chunks);
bson_free (gridfs);
EXIT;
}
/** find all matching gridfs files */
mongoc_gridfs_file_list_t *
mongoc_gridfs_find (mongoc_gridfs_t *gridfs, const bson_t *query)
{
return _mongoc_gridfs_file_list_new (gridfs, query, 0);
}
/** find a single gridfs file */
mongoc_gridfs_file_t *
mongoc_gridfs_find_one (mongoc_gridfs_t *gridfs,
const bson_t *query,
bson_error_t *error)
{
mongoc_gridfs_file_list_t *list;
mongoc_gridfs_file_t *file;
ENTRY;
list = _mongoc_gridfs_file_list_new (gridfs, query, 1);
file = mongoc_gridfs_file_list_next (list);
if (!mongoc_gridfs_file_list_error (list, error) && error) {
/* no error, but an error out-pointer was provided - clear it */
memset (error, 0, sizeof (*error));
}
mongoc_gridfs_file_list_destroy (list);
RETURN (file);
}
/** find all matching gridfs files */
mongoc_gridfs_file_list_t *
mongoc_gridfs_find_with_opts (mongoc_gridfs_t *gridfs,
const bson_t *filter,
const bson_t *opts)
{
return _mongoc_gridfs_file_list_new_with_opts (gridfs, filter, opts);
}
/** find a single gridfs file */
mongoc_gridfs_file_t *
mongoc_gridfs_find_one_with_opts (mongoc_gridfs_t *gridfs,
const bson_t *filter,
const bson_t *opts,
bson_error_t *error)
{
mongoc_gridfs_file_list_t *list;
mongoc_gridfs_file_t *file;
bson_t new_opts;
ENTRY;
bson_init (&new_opts);
if (opts) {
bson_copy_to_excluding_noinit (opts, &new_opts, "limit", (char *) NULL);
}
BSON_APPEND_INT32 (&new_opts, "limit", 1);
list = _mongoc_gridfs_file_list_new_with_opts (gridfs, filter, &new_opts);
file = mongoc_gridfs_file_list_next (list);
if (!mongoc_gridfs_file_list_error (list, error) && error) {
/* no error, but an error out-pointer was provided - clear it */
memset (error, 0, sizeof (*error));
}
mongoc_gridfs_file_list_destroy (list);
bson_destroy (&new_opts);
RETURN (file);
}
/** find a single gridfs file by filename */
mongoc_gridfs_file_t *
mongoc_gridfs_find_one_by_filename (mongoc_gridfs_t *gridfs,
const char *filename,
bson_error_t *error)
{
mongoc_gridfs_file_t *file;
bson_t filter;
bson_init (&filter);
bson_append_utf8 (&filter, "filename", -1, filename, -1);
file = mongoc_gridfs_find_one_with_opts (gridfs, &filter, NULL, error);
bson_destroy (&filter);
return file;
}
/** create a gridfs file from a stream
*
* The stream is fully consumed in creating the file
*/
mongoc_gridfs_file_t *
mongoc_gridfs_create_file_from_stream (mongoc_gridfs_t *gridfs,
mongoc_stream_t *stream,
mongoc_gridfs_file_opt_t *opt)
{
mongoc_gridfs_file_t *file;
ssize_t r;
uint8_t buf[MONGOC_GRIDFS_STREAM_CHUNK];
mongoc_iovec_t iov;
int timeout;
ENTRY;
BSON_ASSERT (gridfs);
BSON_ASSERT (stream);
iov.iov_base = (void *) buf;
iov.iov_len = 0;
file = _mongoc_gridfs_file_new (gridfs, opt);
timeout = gridfs->client->cluster.sockettimeoutms;
for (;;) {
r = mongoc_stream_read (
stream, iov.iov_base, MONGOC_GRIDFS_STREAM_CHUNK, 0, timeout);
if (r > 0) {
iov.iov_len = r;
mongoc_gridfs_file_writev (file, &iov, 1, timeout);
} else if (r == 0) {
break;
} else {
mongoc_gridfs_file_destroy (file);
RETURN (NULL);
}
}
mongoc_stream_failed (stream);
mongoc_gridfs_file_seek (file, 0, SEEK_SET);
RETURN (file);
}
/** create an empty gridfs file */
mongoc_gridfs_file_t *
mongoc_gridfs_create_file (mongoc_gridfs_t *gridfs,
mongoc_gridfs_file_opt_t *opt)
{
mongoc_gridfs_file_t *file;
ENTRY;
BSON_ASSERT (gridfs);
file = _mongoc_gridfs_file_new (gridfs, opt);
RETURN (file);
}
/** accessor functions for collections */
mongoc_collection_t *
mongoc_gridfs_get_files (mongoc_gridfs_t *gridfs)
{
BSON_ASSERT (gridfs);
return gridfs->files;
}
mongoc_collection_t *
mongoc_gridfs_get_chunks (mongoc_gridfs_t *gridfs)
{
BSON_ASSERT (gridfs);
return gridfs->chunks;
}
bool
mongoc_gridfs_remove_by_filename (mongoc_gridfs_t *gridfs,
const char *filename,
bson_error_t *error)
{
mongoc_bulk_operation_t *bulk_files = NULL;
mongoc_bulk_operation_t *bulk_chunks = NULL;
mongoc_cursor_t *cursor = NULL;
bson_error_t files_error;
bson_error_t chunks_error;
const bson_t *doc;
const char *key;
char keybuf[16];
int count = 0;
bool chunks_ret;
bool files_ret;
bool ret = false;
bson_iter_t iter;
bson_t *files_q = NULL;
bson_t *chunks_q = NULL;
bson_t q = BSON_INITIALIZER;
bson_t fields = BSON_INITIALIZER;
bson_t ar = BSON_INITIALIZER;
+ bson_t opts = BSON_INITIALIZER;
BSON_ASSERT (gridfs);
if (!filename) {
bson_set_error (error,
MONGOC_ERROR_GRIDFS,
MONGOC_ERROR_GRIDFS_INVALID_FILENAME,
"A non-NULL filename must be specified.");
return false;
}
/*
* Find all files matching this filename. Hopefully just one, but not
* strictly required!
*/
BSON_APPEND_UTF8 (&q, "filename", filename);
BSON_APPEND_INT32 (&fields, "_id", 1);
cursor = _mongoc_cursor_new (gridfs->client,
gridfs->files->ns,
MONGOC_QUERY_NONE,
0,
0,
0,
- false /* is command */,
+ true /* is_find */,
&q,
&fields,
NULL,
NULL);
BSON_ASSERT (cursor);
while (mongoc_cursor_next (cursor, &doc)) {
if (bson_iter_init_find (&iter, doc, "_id")) {
const bson_value_t *value = bson_iter_value (&iter);
bson_uint32_to_string (count, &key, keybuf, sizeof keybuf);
BSON_APPEND_VALUE (&ar, key, value);
}
}
if (mongoc_cursor_error (cursor, error)) {
goto failure;
}
+ bson_append_bool (&opts, "ordered", 7, false);
bulk_files =
- mongoc_collection_create_bulk_operation (gridfs->files, false, NULL);
+ mongoc_collection_create_bulk_operation_with_opts (gridfs->files, &opts);
bulk_chunks =
- mongoc_collection_create_bulk_operation (gridfs->chunks, false, NULL);
+ mongoc_collection_create_bulk_operation_with_opts (gridfs->chunks, &opts);
+
+ bson_destroy (&opts);
files_q = BCON_NEW ("_id", "{", "$in", BCON_ARRAY (&ar), "}");
chunks_q = BCON_NEW ("files_id", "{", "$in", BCON_ARRAY (&ar), "}");
mongoc_bulk_operation_remove (bulk_files, files_q);
mongoc_bulk_operation_remove (bulk_chunks, chunks_q);
files_ret = mongoc_bulk_operation_execute (bulk_files, NULL, &files_error);
chunks_ret =
mongoc_bulk_operation_execute (bulk_chunks, NULL, &chunks_error);
if (error) {
if (!files_ret) {
memcpy (error, &files_error, sizeof *error);
} else if (!chunks_ret) {
memcpy (error, &chunks_error, sizeof *error);
}
}
ret = (files_ret && chunks_ret);
failure:
if (cursor) {
mongoc_cursor_destroy (cursor);
}
if (bulk_files) {
mongoc_bulk_operation_destroy (bulk_files);
}
if (bulk_chunks) {
mongoc_bulk_operation_destroy (bulk_chunks);
}
bson_destroy (&q);
bson_destroy (&fields);
bson_destroy (&ar);
if (files_q) {
bson_destroy (files_q);
}
if (chunks_q) {
bson_destroy (chunks_q);
}
return ret;
}
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-gridfs.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-gridfs.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-gridfs.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-gridfs.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-gssapi-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-gssapi-private.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-gssapi-private.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-gssapi-private.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-gssapi.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-gssapi.c
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-gssapi.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-gssapi.c
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-handshake-compiler-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-handshake-compiler-private.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-handshake-compiler-private.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-handshake-compiler-private.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-handshake-os-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-handshake-os-private.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-handshake-os-private.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-handshake-os-private.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-handshake-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-handshake-private.h
similarity index 92%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-handshake-private.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-handshake-private.h
index 9fd6348b..cfc473d5 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-handshake-private.h
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-handshake-private.h
@@ -1,103 +1,108 @@
/*
* Copyright 2016 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MONGOC_HANDSHAKE_PRIVATE_H
#define MONGOC_HANDSHAKE_PRIVATE_H
#if !defined(MONGOC_INSIDE) && !defined(MONGOC_COMPILATION)
#error "Only <mongoc.h> can be included directly."
#endif
#include <bson.h>
BSON_BEGIN_DECLS
#define HANDSHAKE_FIELD "client"
#define HANDSHAKE_PLATFORM_FIELD "platform"
#define HANDSHAKE_MAX_SIZE 512
#define HANDSHAKE_OS_TYPE_MAX 32
#define HANDSHAKE_OS_NAME_MAX 32
#define HANDSHAKE_OS_VERSION_MAX 32
#define HANDSHAKE_OS_ARCHITECTURE_MAX 32
#define HANDSHAKE_DRIVER_NAME_MAX 64
#define HANDSHAKE_DRIVER_VERSION_MAX 32
/* platform has no fixed max size. It can just occupy the remaining
* available space in the document. */
/* When adding a new field to mongoc-config.h.in, update this! */
typedef enum {
MONGOC_MD_FLAG_ENABLE_CRYPTO = 1 << 0,
MONGOC_MD_FLAG_ENABLE_CRYPTO_CNG = 1 << 1,
MONGOC_MD_FLAG_ENABLE_CRYPTO_COMMON_CRYPTO = 1 << 2,
MONGOC_MD_FLAG_ENABLE_CRYPTO_LIBCRYPTO = 1 << 3,
MONGOC_MD_FLAG_ENABLE_CRYPTO_SYSTEM_PROFILE = 1 << 4,
MONGOC_MD_FLAG_ENABLE_SASL = 1 << 5,
MONGOC_MD_FLAG_ENABLE_SSL = 1 << 6,
MONGOC_MD_FLAG_ENABLE_SSL_OPENSSL = 1 << 7,
MONGOC_MD_FLAG_ENABLE_SSL_SECURE_CHANNEL = 1 << 8,
MONGOC_MD_FLAG_ENABLE_SSL_SECURE_TRANSPORT = 1 << 9,
MONGOC_MD_FLAG_EXPERIMENTAL_FEATURES = 1 << 10,
MONGOC_MD_FLAG_HAVE_SASL_CLIENT_DONE = 1 << 11,
MONGOC_MD_FLAG_HAVE_WEAK_SYMBOLS = 1 << 12,
MONGOC_MD_FLAG_NO_AUTOMATIC_GLOBALS = 1 << 13,
MONGOC_MD_FLAG_ENABLE_SSL_LIBRESSL = 1 << 14,
MONGOC_MD_FLAG_ENABLE_SASL_CYRUS = 1 << 15,
MONGOC_MD_FLAG_ENABLE_SASL_SSPI = 1 << 16,
MONGOC_MD_FLAG_HAVE_SOCKLEN = 1 << 17,
MONGOC_MD_FLAG_ENABLE_COMPRESSION = 1 << 18,
MONGOC_MD_FLAG_ENABLE_COMPRESSION_SNAPPY = 1 << 19,
MONGOC_MD_FLAG_ENABLE_COMPRESSION_ZLIB = 1 << 20,
MONGOC_MD_FLAG_ENABLE_SASL_GSSAPI = 1 << 21,
+ MONGOC_MD_FLAG_ENABLE_RES_NSEARCH = 1 << 22,
+ MONGOC_MD_FLAG_ENABLE_RES_NDESTROY = 1 << 23,
+ MONGOC_MD_FLAG_ENABLE_RES_NCLOSE = 1 << 24,
+ MONGOC_MD_FLAG_ENABLE_RES_SEARCH = 1 << 25,
+ MONGOC_MD_FLAG_ENABLE_DNSAPI = 1 << 26,
} mongoc_handshake_config_flags_t;
typedef struct _mongoc_handshake_t {
char *os_type;
char *os_name;
char *os_version;
char *os_architecture;
char *driver_name;
char *driver_version;
char *platform;
bool frozen;
} mongoc_handshake_t;
void
_mongoc_handshake_init (void);
void
_mongoc_handshake_cleanup (void);
bool
_mongoc_handshake_build_doc_with_application (bson_t *doc,
const char *application);
void
_mongoc_handshake_freeze (void);
mongoc_handshake_t *
_mongoc_handshake_get (void);
bool
_mongoc_handshake_appname_is_valid (const char *appname);
BSON_END_DECLS
#endif
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-handshake.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-handshake.c
similarity index 97%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-handshake.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-handshake.c
index a445caa0..02ee95a2 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-handshake.c
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-handshake.c
@@ -1,541 +1,561 @@
/*
* Copyright 2016 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <bson.h>
#ifdef _POSIX_VERSION
#include <sys/utsname.h>
#endif
#ifdef _WIN32
#include <windows.h>
#endif
#include "mongoc-linux-distro-scanner-private.h"
#include "mongoc-handshake.h"
#include "mongoc-handshake-compiler-private.h"
#include "mongoc-handshake-os-private.h"
#include "mongoc-handshake-private.h"
#include "mongoc-client.h"
#include "mongoc-client-private.h"
#include "mongoc-error.h"
#include "mongoc-log.h"
#include "mongoc-version.h"
#include "mongoc-util-private.h"
/*
* Global handshake data instance. Initialized at startup from mongoc_init ()
*
* Can be modified by calls to mongoc_handshake_data_append ()
*/
static mongoc_handshake_t gMongocHandshake;
static uint32_t
_get_config_bitfield (void)
{
uint32_t bf = 0;
#ifdef MONGOC_ENABLE_SSL_SECURE_CHANNEL
bf |= MONGOC_MD_FLAG_ENABLE_SSL_SECURE_CHANNEL;
#endif
#ifdef MONGOC_ENABLE_CRYPTO_CNG
bf |= MONGOC_MD_FLAG_ENABLE_CRYPTO_CNG;
#endif
#ifdef MONGOC_ENABLE_SSL_SECURE_TRANSPORT
bf |= MONGOC_MD_FLAG_ENABLE_SSL_SECURE_TRANSPORT;
#endif
#ifdef MONGOC_ENABLE_CRYPTO_COMMON_CRYPTO
bf |= MONGOC_MD_FLAG_ENABLE_CRYPTO_COMMON_CRYPTO;
#endif
#ifdef MONGOC_ENABLE_SSL_OPENSSL
bf |= MONGOC_MD_FLAG_ENABLE_SSL_OPENSSL;
#endif
#ifdef MONGOC_ENABLE_CRYPTO_LIBCRYPTO
bf |= MONGOC_MD_FLAG_ENABLE_CRYPTO_LIBCRYPTO;
#endif
#ifdef MONGOC_ENABLE_SSL
bf |= MONGOC_MD_FLAG_ENABLE_SSL;
#endif
#ifdef MONGOC_ENABLE_CRYPTO
bf |= MONGOC_MD_FLAG_ENABLE_CRYPTO;
#endif
#ifdef MONGOC_ENABLE_CRYPTO_SYSTEM_PROFILE
bf |= MONGOC_MD_FLAG_ENABLE_CRYPTO_SYSTEM_PROFILE;
#endif
#ifdef MONGOC_ENABLE_SASL
bf |= MONGOC_MD_FLAG_ENABLE_SASL;
#endif
#ifdef MONGOC_HAVE_SASL_CLIENT_DONE
bf |= MONGOC_MD_FLAG_HAVE_SASL_CLIENT_DONE;
#endif
#ifdef MONGOC_HAVE_WEAK_SYMBOLS
bf |= MONGOC_MD_FLAG_HAVE_WEAK_SYMBOLS;
#endif
#ifdef MONGOC_NO_AUTOMATIC_GLOBALS
bf |= MONGOC_MD_FLAG_NO_AUTOMATIC_GLOBALS;
#endif
#ifdef MONGOC_EXPERIMENTAL_FEATURES
bf |= MONGOC_MD_FLAG_EXPERIMENTAL_FEATURES;
#endif
#ifdef MONGOC_ENABLE_SSL_LIBRESSL
bf |= MONGOC_MD_FLAG_ENABLE_SSL_LIBRESSL;
#endif
#ifdef MONGOC_ENABLE_SASL_CYRUS
bf |= MONGOC_MD_FLAG_ENABLE_SASL_CYRUS;
#endif
#ifdef MONGOC_ENABLE_SASL_SSPI
bf |= MONGOC_MD_FLAG_ENABLE_SASL_SSPI;
#endif
#ifdef MONGOC_HAVE_SOCKLEN
bf |= MONGOC_MD_FLAG_HAVE_SOCKLEN;
#endif
#ifdef MONGOC_ENABLE_COMPRESSION
bf |= MONGOC_MD_FLAG_ENABLE_COMPRESSION;
#endif
#ifdef MONGOC_ENABLE_COMPRESSION_SNAPPY
bf |= MONGOC_MD_FLAG_ENABLE_COMPRESSION_SNAPPY;
#endif
#ifdef MONGOC_ENABLE_COMPRESSION_ZLIB
bf |= MONGOC_MD_FLAG_ENABLE_COMPRESSION_ZLIB;
#endif
#ifdef MONGOC_MD_FLAG_ENABLE_SASL_GSSAPI
bf |= MONGOC_MD_FLAG_ENABLE_SASL_GSSAPI;
#endif
+#ifdef MONGOC_HAVE_RES_NSEARCH
+ bf |= MONGOC_MD_FLAG_ENABLE_RES_NSEARCH;
+#endif
+
+#ifdef MONGOC_HAVE_RES_NDESTROY
+ bf |= MONGOC_MD_FLAG_ENABLE_RES_NDESTROY;
+#endif
+
+#ifdef MONGOC_HAVE_RES_NCLOSE
+ bf |= MONGOC_MD_FLAG_ENABLE_RES_NCLOSE;
+#endif
+
+#ifdef MONGOC_HAVE_RES_SEARCH
+ bf |= MONGOC_MD_FLAG_ENABLE_RES_SEARCH;
+#endif
+
+#ifdef MONGOC_HAVE_DNSAPI
+ bf |= MONGOC_MD_FLAG_ENABLE_DNSAPI;
+#endif
+
return bf;
}
static char *
_get_os_type (void)
{
#ifdef MONGOC_OS_TYPE
return bson_strndup (MONGOC_OS_TYPE, HANDSHAKE_OS_TYPE_MAX);
#else
return bson_strndup ("unknown", HANDSHAKE_OS_TYPE_MAX);
#endif
}
static char *
_get_os_architecture (void)
{
const char *ret = NULL;
#ifdef _WIN32
SYSTEM_INFO system_info;
DWORD arch;
GetSystemInfo (&system_info);
arch = system_info.wProcessorArchitecture;
switch (arch) {
case PROCESSOR_ARCHITECTURE_AMD64:
ret = "x86_64";
break;
case PROCESSOR_ARCHITECTURE_ARM:
ret = "ARM";
break;
case PROCESSOR_ARCHITECTURE_IA64:
ret = "IA64";
break;
case PROCESSOR_ARCHITECTURE_INTEL:
ret = "x86";
break;
case PROCESSOR_ARCHITECTURE_UNKNOWN:
ret = "Unknown";
break;
default:
ret = "Other";
break;
}
#elif defined(_POSIX_VERSION)
struct utsname system_info;
if (uname (&system_info) >= 0) {
ret = system_info.machine;
}
#endif
if (ret) {
return bson_strndup (ret, HANDSHAKE_OS_ARCHITECTURE_MAX);
}
return NULL;
}
#ifndef MONGOC_OS_IS_LINUX
static char *
_get_os_name (void)
{
#ifdef MONGOC_OS_NAME
return bson_strndup (MONGOC_OS_NAME, HANDSHAKE_OS_NAME_MAX);
#elif defined(_POSIX_VERSION)
struct utsname system_info;
if (uname (&system_info) >= 0) {
return bson_strndup (system_info.sysname, HANDSHAKE_OS_NAME_MAX);
}
#endif
return NULL;
}
static char *
_get_os_version (void)
{
char *ret = bson_malloc (HANDSHAKE_OS_VERSION_MAX);
bool found = false;
#ifdef _WIN32
OSVERSIONINFO osvi;
ZeroMemory (&osvi, sizeof (OSVERSIONINFO));
osvi.dwOSVersionInfoSize = sizeof (OSVERSIONINFO);
if (GetVersionEx (&osvi)) {
bson_snprintf (ret,
HANDSHAKE_OS_VERSION_MAX,
"%lu.%lu (%lu)",
osvi.dwMajorVersion,
osvi.dwMinorVersion,
osvi.dwBuildNumber);
found = true;
} else {
MONGOC_WARNING ("Error with GetVersionEx(): %lu", GetLastError ());
}
#elif defined(_POSIX_VERSION)
struct utsname system_info;
if (uname (&system_info) >= 0) {
bson_strncpy (ret, system_info.release, HANDSHAKE_OS_VERSION_MAX);
found = true;
} else {
MONGOC_WARNING ("Error with uname(): %d", errno);
}
#endif
if (!found) {
bson_free (ret);
ret = NULL;
}
return ret;
}
#endif
static void
_get_system_info (mongoc_handshake_t *handshake)
{
handshake->os_type = _get_os_type ();
#ifdef MONGOC_OS_IS_LINUX
_mongoc_linux_distro_scanner_get_distro (&handshake->os_name,
&handshake->os_version);
#else
handshake->os_name = _get_os_name ();
handshake->os_version = _get_os_version ();
#endif
handshake->os_architecture = _get_os_architecture ();
}
static void
_free_system_info (mongoc_handshake_t *handshake)
{
bson_free (handshake->os_type);
bson_free (handshake->os_name);
bson_free (handshake->os_version);
bson_free (handshake->os_architecture);
}
static void
_get_driver_info (mongoc_handshake_t *handshake)
{
handshake->driver_name = bson_strndup ("mongoc", HANDSHAKE_DRIVER_NAME_MAX);
handshake->driver_version =
bson_strndup (MONGOC_VERSION_S, HANDSHAKE_DRIVER_VERSION_MAX);
}
static void
_free_driver_info (mongoc_handshake_t *handshake)
{
bson_free (handshake->driver_name);
bson_free (handshake->driver_version);
}
static void
_set_platform_string (mongoc_handshake_t *handshake)
{
bson_string_t *str;
str = bson_string_new ("");
bson_string_append_printf (str, "cfg=0x%x", _get_config_bitfield ());
#ifdef _POSIX_VERSION
bson_string_append_printf (str, " posix=%ld", _POSIX_VERSION);
#endif
#ifdef __STDC_VERSION__
bson_string_append_printf (str, " stdc=%ld", __STDC_VERSION__);
#endif
bson_string_append_printf (str, " CC=%s", MONGOC_COMPILER);
#ifdef MONGOC_COMPILER_VERSION
bson_string_append_printf (str, " %s", MONGOC_COMPILER_VERSION);
#endif
if (strlen (MONGOC_EVALUATE_STR (MONGOC_USER_SET_CFLAGS)) > 0) {
bson_string_append_printf (
str, " CFLAGS=%s", MONGOC_EVALUATE_STR (MONGOC_USER_SET_CFLAGS));
}
if (strlen (MONGOC_EVALUATE_STR (MONGOC_USER_SET_LDFLAGS)) > 0) {
bson_string_append_printf (
str, " LDFLAGS=%s", MONGOC_EVALUATE_STR (MONGOC_USER_SET_LDFLAGS));
}
handshake->platform = bson_string_free (str, false);
}
static void
_free_platform_string (mongoc_handshake_t *handshake)
{
bson_free (handshake->platform);
}
void
_mongoc_handshake_init (void)
{
_get_system_info (_mongoc_handshake_get ());
_get_driver_info (_mongoc_handshake_get ());
_set_platform_string (_mongoc_handshake_get ());
_mongoc_handshake_get ()->frozen = false;
}
void
_mongoc_handshake_cleanup (void)
{
_free_system_info (_mongoc_handshake_get ());
_free_driver_info (_mongoc_handshake_get ());
_free_platform_string (_mongoc_handshake_get ());
}
static bool
_append_platform_field (bson_t *doc, const char *platform)
{
int max_platform_str_size;
/* Compute space left for platform field */
max_platform_str_size =
HANDSHAKE_MAX_SIZE - (doc->len +
/* 1 byte for utf8 tag */
1 +
/* key size */
strlen (HANDSHAKE_PLATFORM_FIELD) +
1 +
/* 4 bytes for length of string */
4);
if (max_platform_str_size <= 0) {
return false;
}
max_platform_str_size =
BSON_MIN (max_platform_str_size, strlen (platform) + 1);
bson_append_utf8 (
doc, HANDSHAKE_PLATFORM_FIELD, -1, platform, max_platform_str_size - 1);
BSON_ASSERT (doc->len <= HANDSHAKE_MAX_SIZE);
return true;
}
/*
* Return true if we build the document, and it's not too big
* false if there's no way to prevent the doc from being too big. In this
* case, the caller shouldn't include it with isMaster
*/
bool
_mongoc_handshake_build_doc_with_application (bson_t *doc, const char *appname)
{
const mongoc_handshake_t *md = _mongoc_handshake_get ();
bson_t child;
if (appname) {
BSON_APPEND_DOCUMENT_BEGIN (doc, "application", &child);
BSON_APPEND_UTF8 (&child, "name", appname);
bson_append_document_end (doc, &child);
}
BSON_APPEND_DOCUMENT_BEGIN (doc, "driver", &child);
BSON_APPEND_UTF8 (&child, "name", md->driver_name);
BSON_APPEND_UTF8 (&child, "version", md->driver_version);
bson_append_document_end (doc, &child);
BSON_APPEND_DOCUMENT_BEGIN (doc, "os", &child);
BSON_ASSERT (md->os_type);
BSON_APPEND_UTF8 (&child, "type", md->os_type);
if (md->os_name) {
BSON_APPEND_UTF8 (&child, "name", md->os_name);
}
if (md->os_version) {
BSON_APPEND_UTF8 (&child, "version", md->os_version);
}
if (md->os_architecture) {
BSON_APPEND_UTF8 (&child, "architecture", md->os_architecture);
}
bson_append_document_end (doc, &child);
if (doc->len > HANDSHAKE_MAX_SIZE) {
/* We've done all we can possibly do to ensure the current
* document is below the maxsize, so if it overflows there is
* nothing else we can do, so we fail */
return false;
}
if (md->platform) {
_append_platform_field (doc, md->platform);
}
return true;
}
void
_mongoc_handshake_freeze (void)
{
_mongoc_handshake_get ()->frozen = true;
}
/*
* free (*s) and make *s point to *s concated with suffix.
* If *s is NULL it's treated like it's an empty string.
* If suffix is NULL, nothing happens.
*/
static void
_append_and_truncate (char **s, const char *suffix, int max_len)
{
char *old_str = *s;
char *prefix;
const int delim_len = strlen (" / ");
int space_for_suffix;
BSON_ASSERT (s);
prefix = old_str ? old_str : "";
if (!suffix) {
return;
}
space_for_suffix = max_len - strlen (prefix) - delim_len;
BSON_ASSERT (space_for_suffix >= 0);
*s = bson_strdup_printf ("%s / %.*s", prefix, space_for_suffix, suffix);
BSON_ASSERT (strlen (*s) <= max_len);
bson_free (old_str);
}
/*
* Set some values in our global handshake struct. These values will be sent
* to the server as part of the initial connection handshake (isMaster).
* If this function is called more than once, or after we've connected to a
* mongod, then it will do nothing and return false. It will return true if it
* successfully sets the values.
*
* All arguments are optional.
*/
bool
mongoc_handshake_data_append (const char *driver_name,
const char *driver_version,
const char *platform)
{
int max_size = 0;
if (_mongoc_handshake_get ()->frozen) {
MONGOC_ERROR ("Cannot set handshake more than once");
return false;
}
_append_and_truncate (&_mongoc_handshake_get ()->driver_name,
driver_name,
HANDSHAKE_DRIVER_NAME_MAX);
_append_and_truncate (&_mongoc_handshake_get ()->driver_version,
driver_version,
HANDSHAKE_DRIVER_VERSION_MAX);
max_size =
HANDSHAKE_MAX_SIZE -
-_mongoc_strlen_or_zero (_mongoc_handshake_get ()->os_type) -
_mongoc_strlen_or_zero (_mongoc_handshake_get ()->os_name) -
_mongoc_strlen_or_zero (_mongoc_handshake_get ()->os_version) -
_mongoc_strlen_or_zero (_mongoc_handshake_get ()->os_architecture) -
_mongoc_strlen_or_zero (_mongoc_handshake_get ()->driver_name) -
_mongoc_strlen_or_zero (_mongoc_handshake_get ()->driver_version);
_append_and_truncate (
&_mongoc_handshake_get ()->platform, platform, max_size);
_mongoc_handshake_freeze ();
return true;
}
mongoc_handshake_t *
_mongoc_handshake_get (void)
{
return &gMongocHandshake;
}
bool
_mongoc_handshake_appname_is_valid (const char *appname)
{
return strlen (appname) <= MONGOC_HANDSHAKE_APPNAME_MAX;
}
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-handshake.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-handshake.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-handshake.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-handshake.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-host-list-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-host-list-private.h
similarity index 86%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-host-list-private.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-host-list-private.h
index 714dff59..b2cf9181 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-host-list-private.h
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-host-list-private.h
@@ -1,44 +1,49 @@
/*
* Copyright 2013 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MONGOC_HOST_LIST_PRIVATE_H
#define MONGOC_HOST_LIST_PRIVATE_H
#if !defined(MONGOC_COMPILATION)
#error "Only <mongoc.h> can be included directly."
#endif
#include "mongoc-host-list.h"
BSON_BEGIN_DECLS
+mongoc_host_list_t *
+_mongoc_host_list_push (const char *host,
+ uint16_t port,
+ int family,
+ mongoc_host_list_t *next);
bool
_mongoc_host_list_from_string (mongoc_host_list_t *host_list,
const char *host_and_port);
bool
_mongoc_host_list_equal (const mongoc_host_list_t *host_a,
const mongoc_host_list_t *host_b);
void
_mongoc_host_list_destroy_all (mongoc_host_list_t *host);
BSON_END_DECLS
#endif /* MONGOC_HOST_LIST_PRIVATE_H */
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-host-list.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-host-list.c
similarity index 85%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-host-list.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-host-list.c
index cadf6319..5d076ba4 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-host-list.c
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-host-list.c
@@ -1,158 +1,192 @@
/*
* Copyright 2015 MongoDB Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "mongoc-host-list-private.h"
/* strcasecmp on windows */
#include "mongoc-util-private.h"
+/*
+ *--------------------------------------------------------------------------
+ *
+ * _mongoc_host_list_push --
+ *
+ * Add a host to the front of the list and return it.
+ *
+ * Side effects:
+ * None.
+ *
+ *--------------------------------------------------------------------------
+ */
+mongoc_host_list_t *
+_mongoc_host_list_push (const char *host,
+ uint16_t port,
+ int family,
+ mongoc_host_list_t *next)
+{
+ mongoc_host_list_t *h;
+
+ BSON_ASSERT (host);
+
+ h = bson_malloc0 (sizeof (mongoc_host_list_t));
+ bson_strncpy (h->host, host, sizeof h->host);
+ h->port = port;
+ bson_snprintf (
+ h->host_and_port, sizeof h->host_and_port, "%s:%hu", host, port);
+
+ h->family = family;
+ h->next = next;
+
+ return h;
+}
+
/*
*--------------------------------------------------------------------------
*
* _mongoc_host_list_equal --
*
* Check two hosts have the same domain (case-insensitive), port,
* and address family.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
bool
_mongoc_host_list_equal (const mongoc_host_list_t *host_a,
const mongoc_host_list_t *host_b)
{
return (!strcasecmp (host_a->host_and_port, host_b->host_and_port) &&
host_a->family == host_b->family);
}
/*
*--------------------------------------------------------------------------
*
* _mongoc_host_list_destroy_all --
*
* Destroy whole linked list of hosts.
*
*--------------------------------------------------------------------------
*/
void
_mongoc_host_list_destroy_all (mongoc_host_list_t *host)
{
mongoc_host_list_t *tmp;
while (host) {
tmp = host->next;
bson_free (host);
host = tmp;
}
}
/*
*--------------------------------------------------------------------------
*
* _mongoc_host_list_from_string --
*
* Populate a mongoc_host_list_t from a fully qualified address
*
*--------------------------------------------------------------------------
*/
bool
_mongoc_host_list_from_string (mongoc_host_list_t *link_, const char *address)
{
char *close_bracket;
bool bracket_at_end;
char *sport;
uint16_t port;
if (*address == '\0') {
MONGOC_ERROR ("empty address in _mongoc_host_list_from_string");
BSON_ASSERT (false);
}
close_bracket = strchr (address, ']');
bracket_at_end = close_bracket && *(close_bracket + 1) == '\0';
sport = strrchr (address, ':');
if (sport < close_bracket || (close_bracket && sport != close_bracket + 1)) {
/* ignore colons within IPv6 address like "[fe80::1]" */
sport = NULL;
}
/* like "example.com:27019" or "[fe80::1]:27019", but not "[fe80::1]" */
if (sport) {
if (!mongoc_parse_port (&port, sport + 1)) {
return false;
}
link_->port = port;
} else {
link_->port = MONGOC_DEFAULT_PORT;
}
/* like "[fe80::1]:27019" or ""[fe80::1]" */
if (*address == '[' && (bracket_at_end || (close_bracket && sport))) {
link_->family = AF_INET6;
bson_strncpy (link_->host,
address + 1,
BSON_MIN (close_bracket - address, sizeof link_->host));
mongoc_lowercase (link_->host, link_->host);
bson_snprintf (link_->host_and_port,
sizeof link_->host_and_port,
"[%s]:%hu",
link_->host,
link_->port);
} else if (strchr (address, '/') && strstr (address, ".sock")) {
link_->family = AF_UNIX;
if (sport) {
/* weird: "/tmp/mongodb.sock:1234", ignore the port number */
bson_strncpy (link_->host,
address,
BSON_MIN (sport - address + 1, sizeof link_->host));
} else {
bson_strncpy (link_->host, address, sizeof link_->host);
}
bson_strncpy (
link_->host_and_port, link_->host, sizeof link_->host_and_port);
} else if (sport == address) {
/* bad address like ":27017" */
return false;
} else {
link_->family = AF_INET;
if (sport) {
bson_strncpy (link_->host,
address,
BSON_MIN (sport - address + 1, sizeof link_->host));
} else {
bson_strncpy (link_->host, address, sizeof link_->host);
}
mongoc_lowercase (link_->host, link_->host);
bson_snprintf (link_->host_and_port,
sizeof link_->host_and_port,
"%s:%hu",
link_->host,
link_->port);
}
link_->next = NULL;
return true;
}
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-host-list.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-host-list.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-host-list.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-host-list.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-index.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-index.c
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-index.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-index.c
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-index.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-index.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-index.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-index.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-init.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-init.c
similarity index 94%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-init.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-init.c
index 150a5499..bbb841d4 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-init.c
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-init.c
@@ -1,204 +1,199 @@
/*
* Copyright 2013 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <bson.h>
#include "mongoc-config.h"
#include "mongoc-counters-private.h"
#include "mongoc-init.h"
#include "mongoc-handshake-private.h"
#ifdef MONGOC_ENABLE_SSL
#include "mongoc-scram-private.h"
#include "mongoc-ssl.h"
#ifdef MONGOC_ENABLE_SSL_OPENSSL
#include "mongoc-openssl-private.h"
#elif defined(MONGOC_ENABLE_SSL_LIBRESSL)
#include "tls.h"
#endif
#endif
#include "mongoc-thread-private.h"
-#include "mongoc-trace-private.h"
#ifndef MONGOC_NO_AUTOMATIC_GLOBALS
#pragma message( \
"Configure the driver with --disable-automatic-init-and-cleanup\
(if using ./configure) or ENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF (with cmake).\
Automatic cleanup is deprecated and will be removed in version 2.0.")
#endif
#ifdef MONGOC_ENABLE_SASL_CYRUS
#include <sasl/sasl.h>
-#include "mongoc-cyrus-private.h"
static void *
mongoc_cyrus_mutex_alloc (void)
{
mongoc_mutex_t *mutex;
mutex = (mongoc_mutex_t *) bson_malloc0 (sizeof (mongoc_mutex_t));
mongoc_mutex_init (mutex);
return (void *) mutex;
}
static int
mongoc_cyrus_mutex_lock (void *mutex)
{
mongoc_mutex_lock ((mongoc_mutex_t *) mutex);
return SASL_OK;
}
static int
mongoc_cyrus_mutex_unlock (void *mutex)
{
mongoc_mutex_unlock ((mongoc_mutex_t *) mutex);
return SASL_OK;
}
static void
mongoc_cyrus_mutex_free (void *mutex)
{
mongoc_mutex_destroy ((mongoc_mutex_t *) mutex);
bson_free (mutex);
}
#endif /* MONGOC_ENABLE_SASL_CYRUS */
static MONGOC_ONCE_FUN (_mongoc_do_init)
{
#ifdef MONGOC_ENABLE_SASL_CYRUS
int status;
- sasl_callback_t callbacks[] = {
- {SASL_CB_LOG, SASL_CALLBACK_FN (_mongoc_cyrus_log), NULL},
- {SASL_CB_LIST_END}};
#endif
#ifdef MONGOC_ENABLE_SSL_OPENSSL
_mongoc_openssl_init ();
#elif defined(MONGOC_ENABLE_SSL_LIBRESSL)
tls_init ();
#endif
#ifdef MONGOC_ENABLE_SSL
_mongoc_scram_startup ();
#endif
#ifdef MONGOC_ENABLE_SASL_CYRUS
/* The following functions should not use tracing, as they may be invoked
* before mongoc_log_set_handler() can complete. */
sasl_set_mutex (mongoc_cyrus_mutex_alloc,
mongoc_cyrus_mutex_lock,
mongoc_cyrus_mutex_unlock,
mongoc_cyrus_mutex_free);
- status = sasl_client_init (callbacks);
+ status = sasl_client_init (NULL);
BSON_ASSERT (status == SASL_OK);
#endif
_mongoc_counters_init ();
#ifdef _WIN32
{
WORD wVersionRequested;
WSADATA wsaData;
int err;
wVersionRequested = MAKEWORD (2, 2);
err = WSAStartup (wVersionRequested, &wsaData);
/* check the version perhaps? */
BSON_ASSERT (err == 0);
}
#endif
_mongoc_handshake_init ();
MONGOC_ONCE_RETURN;
}
void
mongoc_init (void)
{
static mongoc_once_t once = MONGOC_ONCE_INIT;
mongoc_once (&once, _mongoc_do_init);
}
static MONGOC_ONCE_FUN (_mongoc_do_cleanup)
{
#ifdef MONGOC_ENABLE_SSL_OPENSSL
_mongoc_openssl_cleanup ();
#endif
#ifdef MONGOC_ENABLE_SASL_CYRUS
#ifdef MONGOC_HAVE_SASL_CLIENT_DONE
sasl_client_done ();
#else
/* fall back to deprecated function */
sasl_done ();
#endif
#endif
#ifdef _WIN32
WSACleanup ();
#endif
_mongoc_counters_cleanup ();
_mongoc_handshake_cleanup ();
MONGOC_ONCE_RETURN;
}
void
mongoc_cleanup (void)
{
static mongoc_once_t once = MONGOC_ONCE_INIT;
mongoc_once (&once, _mongoc_do_cleanup);
}
/*
* On GCC, just use __attribute__((constructor)) to perform initialization
* automatically for the application.
*/
#if defined(__GNUC__) && !defined(MONGOC_NO_AUTOMATIC_GLOBALS)
static void
_mongoc_init_ctor (void) __attribute__ ((constructor));
static void
_mongoc_init_ctor (void)
{
mongoc_init ();
}
static void
_mongoc_init_dtor (void) __attribute__ ((destructor));
static void
_mongoc_init_dtor (void)
{
bson_mem_restore_vtable ();
mongoc_cleanup ();
}
#endif
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-init.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-init.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-init.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-init.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-iovec.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-iovec.h
similarity index 70%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-iovec.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-iovec.h
index 9128cf9a..c9fc71e7 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-iovec.h
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-iovec.h
@@ -1,53 +1,56 @@
/*
* Copyright 2014 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MONGOC_IOVEC_H
#define MONGOC_IOVEC_H
#include <bson.h>
#ifdef _WIN32
#include <stddef.h>
#else
#include <sys/uio.h>
#endif
BSON_BEGIN_DECLS
#ifdef _WIN32
typedef struct {
u_long iov_len;
char *iov_base;
} mongoc_iovec_t;
-BSON_STATIC_ASSERT (sizeof (mongoc_iovec_t) == sizeof (WSABUF));
-BSON_STATIC_ASSERT (offsetof (mongoc_iovec_t, iov_base) ==
- offsetof (WSABUF, buf));
-BSON_STATIC_ASSERT (offsetof (mongoc_iovec_t, iov_len) ==
- offsetof (WSABUF, len));
+BSON_STATIC_ASSERT2 (sizeof_iovect_t,
+ sizeof (mongoc_iovec_t) == sizeof (WSABUF));
+BSON_STATIC_ASSERT2 (offsetof_iovec_base,
+ offsetof (mongoc_iovec_t, iov_base) ==
+ offsetof (WSABUF, buf));
+BSON_STATIC_ASSERT2 (offsetof_iovec_len,
+ offsetof (mongoc_iovec_t, iov_len) ==
+ offsetof (WSABUF, len));
#else
typedef struct iovec mongoc_iovec_t;
#endif
BSON_END_DECLS
#endif /* MONGOC_IOVEC_H */
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-libressl-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-libressl-private.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-libressl-private.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-libressl-private.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-libressl.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-libressl.c
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-libressl.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-libressl.c
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-linux-distro-scanner-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-linux-distro-scanner-private.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-linux-distro-scanner-private.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-linux-distro-scanner-private.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-linux-distro-scanner.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-linux-distro-scanner.c
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-linux-distro-scanner.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-linux-distro-scanner.c
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-list-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-list-private.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-list-private.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-list-private.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-list.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-list.c
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-list.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-list.c
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-log-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-log-private.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-log-private.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-log-private.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-log.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-log.c
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-log.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-log.c
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-log.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-log.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-log.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-log.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-macros.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-macros.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-macros.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-macros.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-matcher-op-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-matcher-op-private.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-matcher-op-private.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-matcher-op-private.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-matcher-op.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-matcher-op.c
similarity index 99%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-matcher-op.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-matcher-op.c
index 867bc116..24617ac8 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-matcher-op.c
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-matcher-op.c
@@ -1,1189 +1,1188 @@
/*
* Copyright 2014 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "mongoc-log.h"
#include "mongoc-matcher-op-private.h"
#include "mongoc-util-private.h"
/*
*--------------------------------------------------------------------------
*
* _mongoc_matcher_op_exists_new --
*
* Create a new op for checking {$exists: bool}.
*
* Returns:
* A newly allocated mongoc_matcher_op_t that should be freed with
* _mongoc_matcher_op_destroy().
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
mongoc_matcher_op_t *
_mongoc_matcher_op_exists_new (const char *path, /* IN */
bool exists) /* IN */
{
mongoc_matcher_op_t *op;
BSON_ASSERT (path);
op = (mongoc_matcher_op_t *) bson_malloc0 (sizeof *op);
op->exists.base.opcode = MONGOC_MATCHER_OPCODE_EXISTS;
op->exists.path = bson_strdup (path);
op->exists.exists = exists;
return op;
}
/*
*--------------------------------------------------------------------------
*
* _mongoc_matcher_op_type_new --
*
* Create a new op for checking {$type: int}.
*
* Returns:
* A newly allocated mongoc_matcher_op_t that should be freed with
* _mongoc_matcher_op_destroy().
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
mongoc_matcher_op_t *
_mongoc_matcher_op_type_new (const char *path, /* IN */
bson_type_t type) /* IN */
{
mongoc_matcher_op_t *op;
BSON_ASSERT (path);
BSON_ASSERT (type);
op = (mongoc_matcher_op_t *) bson_malloc0 (sizeof *op);
op->type.base.opcode = MONGOC_MATCHER_OPCODE_TYPE;
op->type.path = bson_strdup (path);
op->type.type = type;
return op;
}
/*
*--------------------------------------------------------------------------
*
* _mongoc_matcher_op_logical_new --
*
* Create a new op for checking any of:
*
* {$or: []}
* {$nor: []}
* {$and: []}
*
* Returns:
* A newly allocated mongoc_matcher_op_t that should be freed with
* _mongoc_matcher_op_destroy().
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
mongoc_matcher_op_t *
_mongoc_matcher_op_logical_new (mongoc_matcher_opcode_t opcode, /* IN */
mongoc_matcher_op_t *left, /* IN */
mongoc_matcher_op_t *right) /* IN */
{
mongoc_matcher_op_t *op;
BSON_ASSERT (left);
BSON_ASSERT ((opcode >= MONGOC_MATCHER_OPCODE_OR) &&
(opcode <= MONGOC_MATCHER_OPCODE_NOR));
op = (mongoc_matcher_op_t *) bson_malloc0 (sizeof *op);
op->logical.base.opcode = opcode;
op->logical.left = left;
op->logical.right = right;
return op;
}
/*
*--------------------------------------------------------------------------
*
* _mongoc_matcher_op_compare_new --
*
* Create a new op for checking any of:
*
* {"abc": "def"}
* {$gt: {...}
* {$gte: {...}
* {$lt: {...}
* {$lte: {...}
* {$ne: {...}
* {$in: [...]}
* {$nin: [...]}
*
* Returns:
* A newly allocated mongoc_matcher_op_t that should be freed with
* _mongoc_matcher_op_destroy().
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
mongoc_matcher_op_t *
_mongoc_matcher_op_compare_new (mongoc_matcher_opcode_t opcode, /* IN */
const char *path, /* IN */
const bson_iter_t *iter) /* IN */
{
mongoc_matcher_op_t *op;
BSON_ASSERT (path);
BSON_ASSERT (iter);
op = (mongoc_matcher_op_t *) bson_malloc0 (sizeof *op);
op->compare.base.opcode = opcode;
op->compare.path = bson_strdup (path);
memcpy (&op->compare.iter, iter, sizeof *iter);
return op;
}
/*
*--------------------------------------------------------------------------
*
* _mongoc_matcher_op_not_new --
*
* Create a new op for checking {$not: {...}}
*
* Returns:
* A newly allocated mongoc_matcher_op_t that should be freed with
* _mongoc_matcher_op_destroy().
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
mongoc_matcher_op_t *
_mongoc_matcher_op_not_new (const char *path, /* IN */
mongoc_matcher_op_t *child) /* IN */
{
mongoc_matcher_op_t *op;
BSON_ASSERT (path);
BSON_ASSERT (child);
op = (mongoc_matcher_op_t *) bson_malloc0 (sizeof *op);
op->not_.base.opcode = MONGOC_MATCHER_OPCODE_NOT;
op->not_.path = bson_strdup (path);
op->not_.child = child;
return op;
}
/*
*--------------------------------------------------------------------------
*
* _mongoc_matcher_op_destroy --
*
* Free a mongoc_matcher_op_t structure and all children structures.
*
* Returns:
* None.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
void
_mongoc_matcher_op_destroy (mongoc_matcher_op_t *op) /* IN */
{
BSON_ASSERT (op);
switch (op->base.opcode) {
case MONGOC_MATCHER_OPCODE_EQ:
case MONGOC_MATCHER_OPCODE_GT:
case MONGOC_MATCHER_OPCODE_GTE:
case MONGOC_MATCHER_OPCODE_IN:
case MONGOC_MATCHER_OPCODE_LT:
case MONGOC_MATCHER_OPCODE_LTE:
case MONGOC_MATCHER_OPCODE_NE:
case MONGOC_MATCHER_OPCODE_NIN:
bson_free (op->compare.path);
break;
case MONGOC_MATCHER_OPCODE_OR:
case MONGOC_MATCHER_OPCODE_AND:
case MONGOC_MATCHER_OPCODE_NOR:
if (op->logical.left)
_mongoc_matcher_op_destroy (op->logical.left);
if (op->logical.right)
_mongoc_matcher_op_destroy (op->logical.right);
break;
case MONGOC_MATCHER_OPCODE_NOT:
_mongoc_matcher_op_destroy (op->not_.child);
bson_free (op->not_.path);
break;
case MONGOC_MATCHER_OPCODE_EXISTS:
bson_free (op->exists.path);
break;
case MONGOC_MATCHER_OPCODE_TYPE:
bson_free (op->type.path);
break;
default:
break;
}
bson_free (op);
}
/*
*--------------------------------------------------------------------------
*
* _mongoc_matcher_op_exists_match --
*
* Checks to see if @bson matches @exists requirements. The
* {$exists: bool} query can be either true or fase so we must
* handle false as "not exists".
*
* Returns:
* true if the field exists and the spec expected it.
* true if the field does not exist and the spec expected it to not
* exist.
* Otherwise, false.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
static bool
_mongoc_matcher_op_exists_match (mongoc_matcher_op_exists_t *exists, /* IN */
const bson_t *bson) /* IN */
{
bson_iter_t iter;
bson_iter_t desc;
bool found;
BSON_ASSERT (exists);
BSON_ASSERT (bson);
found = (bson_iter_init (&iter, bson) &&
bson_iter_find_descendant (&iter, exists->path, &desc));
return (found == exists->exists);
}
/*
*--------------------------------------------------------------------------
*
* _mongoc_matcher_op_type_match --
*
* Checks if @bson matches the {$type: ...} op.
*
* Returns:
* true if the requested field was found and the type matched
* the requested type.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
static bool
_mongoc_matcher_op_type_match (mongoc_matcher_op_type_t *type, /* IN */
const bson_t *bson) /* IN */
{
bson_iter_t iter;
bson_iter_t desc;
BSON_ASSERT (type);
BSON_ASSERT (bson);
if (bson_iter_init (&iter, bson) &&
bson_iter_find_descendant (&iter, type->path, &desc)) {
return (bson_iter_type (&iter) == type->type);
}
return false;
}
/*
*--------------------------------------------------------------------------
*
* _mongoc_matcher_op_not_match --
*
* Checks if the {$not: ...} expression matches by negating the
* child expression.
*
* Returns:
* true if the child expression returned false.
* Otherwise false.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
static bool
_mongoc_matcher_op_not_match (mongoc_matcher_op_not_t *not_, /* IN */
const bson_t *bson) /* IN */
{
BSON_ASSERT (not_);
BSON_ASSERT (bson);
return !_mongoc_matcher_op_match (not_->child, bson);
}
#define _TYPE_CODE(l, r) ((((int) (l)) << 8) | ((int) (r)))
#define _NATIVE_COMPARE(op, t1, t2) \
(bson_iter##t2 (iter) op bson_iter##t1 (compare_iter))
#define _EQ_COMPARE(t1, t2) _NATIVE_COMPARE (==, t1, t2)
#define _NE_COMPARE(t1, t2) _NATIVE_COMPARE (!=, t1, t2)
#define _GT_COMPARE(t1, t2) _NATIVE_COMPARE (>, t1, t2)
#define _GTE_COMPARE(t1, t2) _NATIVE_COMPARE (>=, t1, t2)
#define _LT_COMPARE(t1, t2) _NATIVE_COMPARE (<, t1, t2)
#define _LTE_COMPARE(t1, t2) _NATIVE_COMPARE (<=, t1, t2)
/*
*--------------------------------------------------------------------------
*
* _mongoc_matcher_iter_eq_match --
*
* Performs equality match for all types on either left or right
* side of the equation.
*
* We try to default to what the compiler would do for comparing
* things like integers. Therefore, we just have MACRO'tized
* everything so that the compiler sees the native values. (Such
* as (double == int64).
*
* The _TYPE_CODE() stuff allows us to shove the type of the left
* and the right into a single integer and then do a jump table
* with a switch/case for all our supported types.
*
* I imagine a bunch more of these will need to be added, so feel
* free to submit patches.
*
* Returns:
* true if the equality match succeeded.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
static bool
_mongoc_matcher_iter_eq_match (bson_iter_t *compare_iter, /* IN */
bson_iter_t *iter) /* IN */
{
int code;
BSON_ASSERT (compare_iter);
BSON_ASSERT (iter);
code = _TYPE_CODE (bson_iter_type (compare_iter), bson_iter_type (iter));
switch (code) {
/* Double on Left Side */
case _TYPE_CODE (BSON_TYPE_DOUBLE, BSON_TYPE_DOUBLE):
return _EQ_COMPARE (_double, _double);
case _TYPE_CODE (BSON_TYPE_DOUBLE, BSON_TYPE_BOOL):
return _EQ_COMPARE (_double, _bool);
case _TYPE_CODE (BSON_TYPE_DOUBLE, BSON_TYPE_INT32):
return _EQ_COMPARE (_double, _int32);
case _TYPE_CODE (BSON_TYPE_DOUBLE, BSON_TYPE_INT64):
return _EQ_COMPARE (_double, _int64);
/* UTF8 on Left Side */
case _TYPE_CODE (BSON_TYPE_UTF8, BSON_TYPE_UTF8): {
uint32_t llen;
uint32_t rlen;
const char *lstr;
const char *rstr;
lstr = bson_iter_utf8 (compare_iter, &llen);
rstr = bson_iter_utf8 (iter, &rlen);
return ((llen == rlen) && (0 == memcmp (lstr, rstr, llen)));
}
/* Int32 on Left Side */
case _TYPE_CODE (BSON_TYPE_INT32, BSON_TYPE_DOUBLE):
return _EQ_COMPARE (_int32, _double);
case _TYPE_CODE (BSON_TYPE_INT32, BSON_TYPE_BOOL):
return _EQ_COMPARE (_int32, _bool);
case _TYPE_CODE (BSON_TYPE_INT32, BSON_TYPE_INT32):
return _EQ_COMPARE (_int32, _int32);
case _TYPE_CODE (BSON_TYPE_INT32, BSON_TYPE_INT64):
return _EQ_COMPARE (_int32, _int64);
/* Int64 on Left Side */
case _TYPE_CODE (BSON_TYPE_INT64, BSON_TYPE_DOUBLE):
return _EQ_COMPARE (_int64, _double);
case _TYPE_CODE (BSON_TYPE_INT64, BSON_TYPE_BOOL):
return _EQ_COMPARE (_int64, _bool);
case _TYPE_CODE (BSON_TYPE_INT64, BSON_TYPE_INT32):
return _EQ_COMPARE (_int64, _int32);
case _TYPE_CODE (BSON_TYPE_INT64, BSON_TYPE_INT64):
return _EQ_COMPARE (_int64, _int64);
/* Null on Left Side */
case _TYPE_CODE (BSON_TYPE_NULL, BSON_TYPE_NULL):
case _TYPE_CODE (BSON_TYPE_NULL, BSON_TYPE_UNDEFINED):
return true;
case _TYPE_CODE (BSON_TYPE_ARRAY, BSON_TYPE_ARRAY): {
bson_iter_t left_array;
bson_iter_t right_array;
bson_iter_recurse (compare_iter, &left_array);
bson_iter_recurse (iter, &right_array);
while (true) {
bool left_has_next = bson_iter_next (&left_array);
bool right_has_next = bson_iter_next (&right_array);
if (left_has_next != right_has_next) {
/* different lengths */
return false;
}
if (!left_has_next) {
/* finished */
return true;
}
if (!_mongoc_matcher_iter_eq_match (&left_array, &right_array)) {
return false;
}
}
}
case _TYPE_CODE (BSON_TYPE_DOCUMENT, BSON_TYPE_DOCUMENT): {
uint32_t llen;
uint32_t rlen;
const uint8_t *ldoc;
const uint8_t *rdoc;
bson_iter_document (compare_iter, &llen, &ldoc);
bson_iter_document (iter, &rlen, &rdoc);
return ((llen == rlen) && (0 == memcmp (ldoc, rdoc, llen)));
}
default:
return false;
}
}
/*
*--------------------------------------------------------------------------
*
* _mongoc_matcher_op_eq_match --
*
* Performs equality match for all types on either left or right
* side of the equation.
*
* Returns:
* true if the equality match succeeded.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
static bool
_mongoc_matcher_op_eq_match (mongoc_matcher_op_compare_t *compare, /* IN */
bson_iter_t *iter) /* IN */
{
BSON_ASSERT (compare);
BSON_ASSERT (iter);
return _mongoc_matcher_iter_eq_match (&compare->iter, iter);
}
/*
*--------------------------------------------------------------------------
*
* _mongoc_matcher_op_gt_match --
*
* Perform {$gt: ...} match using @compare.
*
* In general, we try to default to what the compiler would do
* for comparison between different types.
*
* Returns:
* true if the document field was > the spec value.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
static bool
_mongoc_matcher_op_gt_match (mongoc_matcher_op_compare_t *compare, /* IN */
bson_iter_t *iter) /* IN */
{
int code;
bson_iter_t *compare_iter = &compare->iter;
BSON_ASSERT (compare);
BSON_ASSERT (iter);
code = _TYPE_CODE (bson_iter_type (compare_iter), bson_iter_type (iter));
switch (code) {
/* Double on Left Side */
case _TYPE_CODE (BSON_TYPE_DOUBLE, BSON_TYPE_DOUBLE):
return _GT_COMPARE (_double, _double);
case _TYPE_CODE (BSON_TYPE_DOUBLE, BSON_TYPE_BOOL):
return _GT_COMPARE (_double, _bool);
case _TYPE_CODE (BSON_TYPE_DOUBLE, BSON_TYPE_INT32):
return _GT_COMPARE (_double, _int32);
case _TYPE_CODE (BSON_TYPE_DOUBLE, BSON_TYPE_INT64):
return _GT_COMPARE (_double, _int64);
/* Int32 on Left Side */
case _TYPE_CODE (BSON_TYPE_INT32, BSON_TYPE_DOUBLE):
return _GT_COMPARE (_int32, _double);
case _TYPE_CODE (BSON_TYPE_INT32, BSON_TYPE_BOOL):
return _GT_COMPARE (_int32, _bool);
case _TYPE_CODE (BSON_TYPE_INT32, BSON_TYPE_INT32):
return _GT_COMPARE (_int32, _int32);
case _TYPE_CODE (BSON_TYPE_INT32, BSON_TYPE_INT64):
return _GT_COMPARE (_int32, _int64);
/* Int64 on Left Side */
case _TYPE_CODE (BSON_TYPE_INT64, BSON_TYPE_DOUBLE):
return _GT_COMPARE (_int64, _double);
case _TYPE_CODE (BSON_TYPE_INT64, BSON_TYPE_BOOL):
return _GT_COMPARE (_int64, _bool);
case _TYPE_CODE (BSON_TYPE_INT64, BSON_TYPE_INT32):
return _GT_COMPARE (_int64, _int32);
case _TYPE_CODE (BSON_TYPE_INT64, BSON_TYPE_INT64):
return _GT_COMPARE (_int64, _int64);
default:
MONGOC_WARNING ("Implement for (Type(%d) > Type(%d))",
bson_iter_type (compare_iter),
bson_iter_type (iter));
break;
}
return false;
}
/*
*--------------------------------------------------------------------------
*
* _mongoc_matcher_op_gte_match --
*
* Perform a match of {"path": {"$gte": value}}.
*
* Returns:
* true if the the spec matches, otherwise false.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
static bool
_mongoc_matcher_op_gte_match (mongoc_matcher_op_compare_t *compare, /* IN */
bson_iter_t *iter) /* IN */
{
bson_iter_t *compare_iter;
int code;
BSON_ASSERT (compare);
BSON_ASSERT (iter);
compare_iter = &compare->iter;
code = _TYPE_CODE (bson_iter_type (compare_iter), bson_iter_type (iter));
switch (code) {
/* Double on Left Side */
case _TYPE_CODE (BSON_TYPE_DOUBLE, BSON_TYPE_DOUBLE):
return _GTE_COMPARE (_double, _double);
case _TYPE_CODE (BSON_TYPE_DOUBLE, BSON_TYPE_BOOL):
return _GTE_COMPARE (_double, _bool);
case _TYPE_CODE (BSON_TYPE_DOUBLE, BSON_TYPE_INT32):
return _GTE_COMPARE (_double, _int32);
case _TYPE_CODE (BSON_TYPE_DOUBLE, BSON_TYPE_INT64):
return _GTE_COMPARE (_double, _int64);
/* Int32 on Left Side */
case _TYPE_CODE (BSON_TYPE_INT32, BSON_TYPE_DOUBLE):
return _GTE_COMPARE (_int32, _double);
case _TYPE_CODE (BSON_TYPE_INT32, BSON_TYPE_BOOL):
return _GTE_COMPARE (_int32, _bool);
case _TYPE_CODE (BSON_TYPE_INT32, BSON_TYPE_INT32):
return _GTE_COMPARE (_int32, _int32);
case _TYPE_CODE (BSON_TYPE_INT32, BSON_TYPE_INT64):
return _GTE_COMPARE (_int32, _int64);
/* Int64 on Left Side */
case _TYPE_CODE (BSON_TYPE_INT64, BSON_TYPE_DOUBLE):
return _GTE_COMPARE (_int64, _double);
case _TYPE_CODE (BSON_TYPE_INT64, BSON_TYPE_BOOL):
return _GTE_COMPARE (_int64, _bool);
case _TYPE_CODE (BSON_TYPE_INT64, BSON_TYPE_INT32):
return _GTE_COMPARE (_int64, _int32);
case _TYPE_CODE (BSON_TYPE_INT64, BSON_TYPE_INT64):
return _GTE_COMPARE (_int64, _int64);
default:
MONGOC_WARNING ("Implement for (Type(%d) >= Type(%d))",
bson_iter_type (compare_iter),
bson_iter_type (iter));
break;
}
return false;
}
/*
*--------------------------------------------------------------------------
*
* _mongoc_matcher_op_in_match --
*
* Checks the spec {"path": {"$in": [value1, value2, ...]}}.
*
* Returns:
* true if the spec matched, otherwise false.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
static bool
_mongoc_matcher_op_in_match (mongoc_matcher_op_compare_t *compare, /* IN */
bson_iter_t *iter) /* IN */
{
mongoc_matcher_op_compare_t op;
op.base.opcode = MONGOC_MATCHER_OPCODE_EQ;
op.path = compare->path;
if (!BSON_ITER_HOLDS_ARRAY (&compare->iter) ||
!bson_iter_recurse (&compare->iter, &op.iter)) {
return false;
}
while (bson_iter_next (&op.iter)) {
if (_mongoc_matcher_op_eq_match (&op, iter)) {
return true;
}
}
return false;
}
/*
*--------------------------------------------------------------------------
*
* _mongoc_matcher_op_lt_match --
*
* Perform a {"path": "$lt": {value}} match.
*
* Returns:
* true if the spec matched, otherwise false.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
static bool
_mongoc_matcher_op_lt_match (mongoc_matcher_op_compare_t *compare, /* IN */
bson_iter_t *iter) /* IN */
{
bson_iter_t *compare_iter;
int code;
BSON_ASSERT (compare);
BSON_ASSERT (iter);
compare_iter = &compare->iter;
code = _TYPE_CODE (bson_iter_type (compare_iter), bson_iter_type (iter));
switch (code) {
/* Double on Left Side */
case _TYPE_CODE (BSON_TYPE_DOUBLE, BSON_TYPE_DOUBLE):
return _LT_COMPARE (_double, _double);
case _TYPE_CODE (BSON_TYPE_DOUBLE, BSON_TYPE_BOOL):
return _LT_COMPARE (_double, _bool);
case _TYPE_CODE (BSON_TYPE_DOUBLE, BSON_TYPE_INT32):
return _LT_COMPARE (_double, _int32);
case _TYPE_CODE (BSON_TYPE_DOUBLE, BSON_TYPE_INT64):
return _LT_COMPARE (_double, _int64);
/* Int32 on Left Side */
case _TYPE_CODE (BSON_TYPE_INT32, BSON_TYPE_DOUBLE):
return _LT_COMPARE (_int32, _double);
case _TYPE_CODE (BSON_TYPE_INT32, BSON_TYPE_BOOL):
return _LT_COMPARE (_int32, _bool);
case _TYPE_CODE (BSON_TYPE_INT32, BSON_TYPE_INT32):
return _LT_COMPARE (_int32, _int32);
case _TYPE_CODE (BSON_TYPE_INT32, BSON_TYPE_INT64):
return _LT_COMPARE (_int32, _int64);
/* Int64 on Left Side */
case _TYPE_CODE (BSON_TYPE_INT64, BSON_TYPE_DOUBLE):
return _LT_COMPARE (_int64, _double);
case _TYPE_CODE (BSON_TYPE_INT64, BSON_TYPE_BOOL):
return _LT_COMPARE (_int64, _bool);
case _TYPE_CODE (BSON_TYPE_INT64, BSON_TYPE_INT32):
return _LT_COMPARE (_int64, _int32);
case _TYPE_CODE (BSON_TYPE_INT64, BSON_TYPE_INT64):
return _LT_COMPARE (_int64, _int64);
default:
MONGOC_WARNING ("Implement for (Type(%d) < Type(%d))",
bson_iter_type (compare_iter),
bson_iter_type (iter));
break;
}
return false;
}
/*
*--------------------------------------------------------------------------
*
* _mongoc_matcher_op_lte_match --
*
* Perform a {"$path": {"$lte": value}} match.
*
* Returns:
* true if the spec matched, otherwise false.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
static bool
_mongoc_matcher_op_lte_match (mongoc_matcher_op_compare_t *compare, /* IN */
bson_iter_t *iter) /* IN */
{
bson_iter_t *compare_iter;
int code;
BSON_ASSERT (compare);
BSON_ASSERT (iter);
compare_iter = &compare->iter;
code = _TYPE_CODE (bson_iter_type (compare_iter), bson_iter_type (iter));
switch (code) {
/* Double on Left Side */
case _TYPE_CODE (BSON_TYPE_DOUBLE, BSON_TYPE_DOUBLE):
return _LTE_COMPARE (_double, _double);
case _TYPE_CODE (BSON_TYPE_DOUBLE, BSON_TYPE_BOOL):
return _LTE_COMPARE (_double, _bool);
case _TYPE_CODE (BSON_TYPE_DOUBLE, BSON_TYPE_INT32):
return _LTE_COMPARE (_double, _int32);
case _TYPE_CODE (BSON_TYPE_DOUBLE, BSON_TYPE_INT64):
return _LTE_COMPARE (_double, _int64);
/* Int32 on Left Side */
case _TYPE_CODE (BSON_TYPE_INT32, BSON_TYPE_DOUBLE):
return _LTE_COMPARE (_int32, _double);
case _TYPE_CODE (BSON_TYPE_INT32, BSON_TYPE_BOOL):
return _LTE_COMPARE (_int32, _bool);
case _TYPE_CODE (BSON_TYPE_INT32, BSON_TYPE_INT32):
return _LTE_COMPARE (_int32, _int32);
case _TYPE_CODE (BSON_TYPE_INT32, BSON_TYPE_INT64):
return _LTE_COMPARE (_int32, _int64);
/* Int64 on Left Side */
case _TYPE_CODE (BSON_TYPE_INT64, BSON_TYPE_DOUBLE):
return _LTE_COMPARE (_int64, _double);
case _TYPE_CODE (BSON_TYPE_INT64, BSON_TYPE_BOOL):
return _LTE_COMPARE (_int64, _bool);
case _TYPE_CODE (BSON_TYPE_INT64, BSON_TYPE_INT32):
return _LTE_COMPARE (_int64, _int32);
case _TYPE_CODE (BSON_TYPE_INT64, BSON_TYPE_INT64):
return _LTE_COMPARE (_int64, _int64);
default:
MONGOC_WARNING ("Implement for (Type(%d) <= Type(%d))",
bson_iter_type (compare_iter),
bson_iter_type (iter));
break;
}
return false;
}
/*
*--------------------------------------------------------------------------
*
* _mongoc_matcher_op_ne_match --
*
* Perform a {"path": {"$ne": value}} match.
*
* Returns:
* true if the field "path" was not found or the value is not-equal
* to value.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
static bool
_mongoc_matcher_op_ne_match (mongoc_matcher_op_compare_t *compare, /* IN */
bson_iter_t *iter) /* IN */
{
return !_mongoc_matcher_op_eq_match (compare, iter);
}
/*
*--------------------------------------------------------------------------
*
* _mongoc_matcher_op_nin_match --
*
* Perform a {"path": {"$nin": value}} match.
*
* Returns:
* true if value was not found in the array at "path".
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
static bool
_mongoc_matcher_op_nin_match (mongoc_matcher_op_compare_t *compare, /* IN */
bson_iter_t *iter) /* IN */
{
return !_mongoc_matcher_op_in_match (compare, iter);
}
/*
*--------------------------------------------------------------------------
*
* _mongoc_matcher_op_compare_match --
*
* Dispatch function for mongoc_matcher_op_compare_t operations
* to perform a match.
*
* Returns:
* Opcode dependent.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
static bool
_mongoc_matcher_op_compare_match (mongoc_matcher_op_compare_t *compare, /* IN */
const bson_t *bson) /* IN */
{
bson_iter_t tmp;
bson_iter_t iter;
BSON_ASSERT (compare);
BSON_ASSERT (bson);
if (strchr (compare->path, '.')) {
if (!bson_iter_init (&tmp, bson) ||
!bson_iter_find_descendant (&tmp, compare->path, &iter)) {
return false;
}
} else if (!bson_iter_init_find (&iter, bson, compare->path)) {
return false;
}
switch ((int) compare->base.opcode) {
case MONGOC_MATCHER_OPCODE_EQ:
return _mongoc_matcher_op_eq_match (compare, &iter);
case MONGOC_MATCHER_OPCODE_GT:
return _mongoc_matcher_op_gt_match (compare, &iter);
case MONGOC_MATCHER_OPCODE_GTE:
return _mongoc_matcher_op_gte_match (compare, &iter);
case MONGOC_MATCHER_OPCODE_IN:
return _mongoc_matcher_op_in_match (compare, &iter);
case MONGOC_MATCHER_OPCODE_LT:
return _mongoc_matcher_op_lt_match (compare, &iter);
case MONGOC_MATCHER_OPCODE_LTE:
return _mongoc_matcher_op_lte_match (compare, &iter);
case MONGOC_MATCHER_OPCODE_NE:
return _mongoc_matcher_op_ne_match (compare, &iter);
case MONGOC_MATCHER_OPCODE_NIN:
return _mongoc_matcher_op_nin_match (compare, &iter);
default:
BSON_ASSERT (false);
break;
}
return false;
}
/*
*--------------------------------------------------------------------------
*
* _mongoc_matcher_op_logical_match --
*
* Dispatch function for mongoc_matcher_op_logical_t operations
* to perform a match.
*
* Returns:
* Opcode specific.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
static bool
_mongoc_matcher_op_logical_match (mongoc_matcher_op_logical_t *logical, /* IN */
const bson_t *bson) /* IN */
{
BSON_ASSERT (logical);
BSON_ASSERT (bson);
switch ((int) logical->base.opcode) {
case MONGOC_MATCHER_OPCODE_OR:
return (_mongoc_matcher_op_match (logical->left, bson) ||
_mongoc_matcher_op_match (logical->right, bson));
case MONGOC_MATCHER_OPCODE_AND:
return (_mongoc_matcher_op_match (logical->left, bson) &&
_mongoc_matcher_op_match (logical->right, bson));
case MONGOC_MATCHER_OPCODE_NOR:
return !(_mongoc_matcher_op_match (logical->left, bson) ||
_mongoc_matcher_op_match (logical->right, bson));
default:
BSON_ASSERT (false);
break;
}
return false;
}
/*
*--------------------------------------------------------------------------
*
* _mongoc_matcher_op_match --
*
* Dispatch function for all operation types to perform a match.
*
* Returns:
* Opcode specific.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
bool
_mongoc_matcher_op_match (mongoc_matcher_op_t *op, /* IN */
const bson_t *bson) /* IN */
{
BSON_ASSERT (op);
BSON_ASSERT (bson);
switch (op->base.opcode) {
case MONGOC_MATCHER_OPCODE_EQ:
case MONGOC_MATCHER_OPCODE_GT:
case MONGOC_MATCHER_OPCODE_GTE:
case MONGOC_MATCHER_OPCODE_IN:
case MONGOC_MATCHER_OPCODE_LT:
case MONGOC_MATCHER_OPCODE_LTE:
case MONGOC_MATCHER_OPCODE_NE:
case MONGOC_MATCHER_OPCODE_NIN:
return _mongoc_matcher_op_compare_match (&op->compare, bson);
case MONGOC_MATCHER_OPCODE_OR:
case MONGOC_MATCHER_OPCODE_AND:
case MONGOC_MATCHER_OPCODE_NOR:
return _mongoc_matcher_op_logical_match (&op->logical, bson);
case MONGOC_MATCHER_OPCODE_NOT:
return _mongoc_matcher_op_not_match (&op->not_, bson);
case MONGOC_MATCHER_OPCODE_EXISTS:
return _mongoc_matcher_op_exists_match (&op->exists, bson);
case MONGOC_MATCHER_OPCODE_TYPE:
return _mongoc_matcher_op_type_match (&op->type, bson);
default:
break;
}
return false;
}
/*
*--------------------------------------------------------------------------
*
* _mongoc_matcher_op_to_bson --
*
* Convert the optree specified by @op to a bson document similar
* to what the query would have been. This is not perfectly the
* same, and so should not be used as such.
*
* Returns:
* None.
*
* Side effects:
* @bson is appended to, and therefore must be initialized before
* calling this function.
*
*--------------------------------------------------------------------------
*/
void
_mongoc_matcher_op_to_bson (mongoc_matcher_op_t *op, /* IN */
bson_t *bson) /* IN */
{
const char *str;
bson_t child;
bson_t child2;
BSON_ASSERT (op);
BSON_ASSERT (bson);
switch (op->base.opcode) {
case MONGOC_MATCHER_OPCODE_EQ:
- _ignore_value (
- bson_append_iter (bson, op->compare.path, -1, &op->compare.iter));
+ (void) bson_append_iter (bson, op->compare.path, -1, &op->compare.iter);
break;
case MONGOC_MATCHER_OPCODE_GT:
case MONGOC_MATCHER_OPCODE_GTE:
case MONGOC_MATCHER_OPCODE_IN:
case MONGOC_MATCHER_OPCODE_LT:
case MONGOC_MATCHER_OPCODE_LTE:
case MONGOC_MATCHER_OPCODE_NE:
case MONGOC_MATCHER_OPCODE_NIN:
switch ((int) op->base.opcode) {
case MONGOC_MATCHER_OPCODE_GT:
str = "$gt";
break;
case MONGOC_MATCHER_OPCODE_GTE:
str = "$gte";
break;
case MONGOC_MATCHER_OPCODE_IN:
str = "$in";
break;
case MONGOC_MATCHER_OPCODE_LT:
str = "$lt";
break;
case MONGOC_MATCHER_OPCODE_LTE:
str = "$lte";
break;
case MONGOC_MATCHER_OPCODE_NE:
str = "$ne";
break;
case MONGOC_MATCHER_OPCODE_NIN:
str = "$nin";
break;
default:
str = "???";
break;
}
if (bson_append_document_begin (bson, op->compare.path, -1, &child)) {
- _ignore_value (bson_append_iter (&child, str, -1, &op->compare.iter));
+ (void) bson_append_iter (&child, str, -1, &op->compare.iter);
bson_append_document_end (bson, &child);
}
break;
case MONGOC_MATCHER_OPCODE_OR:
case MONGOC_MATCHER_OPCODE_AND:
case MONGOC_MATCHER_OPCODE_NOR:
if (op->base.opcode == MONGOC_MATCHER_OPCODE_OR) {
str = "$or";
} else if (op->base.opcode == MONGOC_MATCHER_OPCODE_AND) {
str = "$and";
} else if (op->base.opcode == MONGOC_MATCHER_OPCODE_NOR) {
str = "$nor";
} else {
BSON_ASSERT (false);
str = NULL;
}
bson_append_array_begin (bson, str, -1, &child);
bson_append_document_begin (&child, "0", 1, &child2);
_mongoc_matcher_op_to_bson (op->logical.left, &child2);
bson_append_document_end (&child, &child2);
if (op->logical.right) {
bson_append_document_begin (&child, "1", 1, &child2);
_mongoc_matcher_op_to_bson (op->logical.right, &child2);
bson_append_document_end (&child, &child2);
}
bson_append_array_end (bson, &child);
break;
case MONGOC_MATCHER_OPCODE_NOT:
bson_append_document_begin (bson, op->not_.path, -1, &child);
bson_append_document_begin (&child, "$not", 4, &child2);
_mongoc_matcher_op_to_bson (op->not_.child, &child2);
bson_append_document_end (&child, &child2);
bson_append_document_end (bson, &child);
break;
case MONGOC_MATCHER_OPCODE_EXISTS:
BSON_APPEND_BOOL (bson, "$exists", op->exists.exists);
break;
case MONGOC_MATCHER_OPCODE_TYPE:
BSON_APPEND_INT32 (bson, "$type", (int) op->type.type);
break;
default:
BSON_ASSERT (false);
break;
}
}
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-matcher-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-matcher-private.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-matcher-private.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-matcher-private.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-matcher.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-matcher.c
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-matcher.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-matcher.c
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-matcher.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-matcher.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-matcher.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-matcher.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-memcmp-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-memcmp-private.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-memcmp-private.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-memcmp-private.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-memcmp.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-memcmp.c
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-memcmp.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-memcmp.c
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-opcode.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-opcode.h
similarity index 97%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-opcode.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-opcode.h
index 0a98549e..a5ccf36b 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-opcode.h
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-opcode.h
@@ -1,46 +1,46 @@
/*
* Copyright 2013 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MONGOC_OPCODE_H
#define MONGOC_OPCODE_H
#if !defined(MONGOC_INSIDE) && !defined(MONGOC_COMPILATION)
#error "Only <mongoc.h> can be included directly."
#endif
#include <bson.h>
BSON_BEGIN_DECLS
typedef enum {
MONGOC_OPCODE_REPLY = 1,
- MONGOC_OPCODE_MSG = 1000,
MONGOC_OPCODE_UPDATE = 2001,
MONGOC_OPCODE_INSERT = 2002,
MONGOC_OPCODE_QUERY = 2004,
MONGOC_OPCODE_GET_MORE = 2005,
MONGOC_OPCODE_DELETE = 2006,
MONGOC_OPCODE_KILL_CURSORS = 2007,
MONGOC_OPCODE_COMPRESSED = 2012,
+ MONGOC_OPCODE_MSG = 2013,
} mongoc_opcode_t;
BSON_END_DECLS
#endif /* MONGOC_OPCODE_H */
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-openssl-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-openssl-private.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-openssl-private.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-openssl-private.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-openssl.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-openssl.c
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-openssl.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-openssl.c
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-queue-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-queue-private.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-queue-private.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-queue-private.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-queue.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-queue.c
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-queue.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-queue.c
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-rand-cng.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-rand-cng.c
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-rand-cng.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-rand-cng.c
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-rand-common-crypto.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-rand-common-crypto.c
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-rand-common-crypto.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-rand-common-crypto.c
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-rand-openssl.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-rand-openssl.c
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-rand-openssl.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-rand-openssl.c
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-rand-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-rand-private.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-rand-private.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-rand-private.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-rand.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-rand.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-rand.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-rand.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-read-concern-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-read-concern-private.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-read-concern-private.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-read-concern-private.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-read-concern.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-read-concern.c
similarity index 82%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-read-concern.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-read-concern.c
index c6f81e14..2365bf78 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-read-concern.c
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-read-concern.c
@@ -1,217 +1,205 @@
/*
* Copyright 2015 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "mongoc-log.h"
#include "mongoc-read-concern.h"
#include "mongoc-read-concern-private.h"
static void
_mongoc_read_concern_freeze (mongoc_read_concern_t *read_concern);
/**
* mongoc_read_concern_new:
*
* Create a new mongoc_read_concern_t.
*
* Returns: A newly allocated mongoc_read_concern_t. This should be freed
* with mongoc_read_concern_destroy().
*/
mongoc_read_concern_t *
mongoc_read_concern_new (void)
{
mongoc_read_concern_t *read_concern;
read_concern = (mongoc_read_concern_t *) bson_malloc0 (sizeof *read_concern);
+ bson_init (&read_concern->compiled);
+
return read_concern;
}
mongoc_read_concern_t *
mongoc_read_concern_copy (const mongoc_read_concern_t *read_concern)
{
mongoc_read_concern_t *ret = NULL;
if (read_concern) {
ret = mongoc_read_concern_new ();
ret->level = bson_strdup (read_concern->level);
}
return ret;
}
/**
* mongoc_read_concern_destroy:
* @read_concern: A mongoc_read_concern_t.
*
* Releases a mongoc_read_concern_t and all associated memory.
*/
void
mongoc_read_concern_destroy (mongoc_read_concern_t *read_concern)
{
if (read_concern) {
- if (read_concern->compiled.len) {
- bson_destroy (&read_concern->compiled);
- }
-
+ bson_destroy (&read_concern->compiled);
bson_free (read_concern->level);
bson_free (read_concern);
}
}
const char *
mongoc_read_concern_get_level (const mongoc_read_concern_t *read_concern)
{
BSON_ASSERT (read_concern);
return read_concern->level;
}
/**
* mongoc_read_concern_set_level:
* @read_concern: A mongoc_read_concern_t.
* @level: The read concern level
*
* Sets the read concern level. Any string is supported for future compatibility
* but MongoDB 3.2 only accepts "local" and "majority", aka:
* - MONGOC_READ_CONCERN_LEVEL_LOCAL
* - MONGOC_READ_CONCERN_LEVEL_MAJORITY
* MongoDB 3.4 added
* - MONGOC_READ_CONCERN_LEVEL_LINEARIZABLE
*
- * If the @read_concern has already been frozen, calling this function will not
- * alter the read concern level.
- *
* See the MongoDB docs for more information on readConcernLevel
*/
bool
mongoc_read_concern_set_level (mongoc_read_concern_t *read_concern,
const char *level)
{
BSON_ASSERT (read_concern);
- if (read_concern->frozen) {
- return false;
- }
-
bson_free (read_concern->level);
read_concern->level = bson_strdup (level);
+ read_concern->frozen = false;
+
return true;
}
/**
* mongoc_read_concern_append:
* @read_concern: (in): A mongoc_read_concern_t.
* @opts: (out): A pointer to a bson document.
*
* Appends a read_concern document to command options to send to
* a server.
*
* Returns true on success, false on failure.
*
*/
bool
mongoc_read_concern_append (mongoc_read_concern_t *read_concern,
bson_t *command)
{
BSON_ASSERT (read_concern);
if (!read_concern->level) {
return true;
}
if (!bson_append_document (command,
"readConcern",
11,
_mongoc_read_concern_get_bson (read_concern))) {
MONGOC_ERROR ("Could not append readConcern to command.");
return false;
}
return true;
}
/**
* mongoc_read_concern_is_default:
* @read_concern: A const mongoc_read_concern_t.
*
* Returns true when read_concern has not been modified.
*/
bool
mongoc_read_concern_is_default (const mongoc_read_concern_t *read_concern)
{
return !read_concern || !read_concern->level;
}
/**
* mongoc_read_concern_get_bson:
* @read_concern: A mongoc_read_concern_t.
*
* This is an internal function.
*
- * Freeze the read concern if necessary and retrieve the encoded bson_t
- * representing the read concern.
- *
- * You may not modify the read concern further after calling this function.
- *
- * Returns: A bson_t that should not be modified or freed as it is owned by
- * the mongoc_read_concern_t instance.
+ * Returns: A bson_t representing the read concern, which is owned by the
+ * mongoc_read_concern_t instance and should not be modified or freed.
*/
const bson_t *
_mongoc_read_concern_get_bson (mongoc_read_concern_t *read_concern)
{
if (!read_concern->frozen) {
_mongoc_read_concern_freeze (read_concern);
}
return &read_concern->compiled;
}
/**
* mongoc_read_concern_freeze:
* @read_concern: A mongoc_read_concern_t.
*
* This is an internal function.
*
- * Freeze the read concern if necessary and encode it into a bson_ts which
- * represent the raw bson form and the get last error command form.
- *
- * You may not modify the read concern further after calling this function.
+ * Encodes the read concern into a bson_t, which may then be returned by
+ * mongoc_read_concern_get_bson().
*/
static void
_mongoc_read_concern_freeze (mongoc_read_concern_t *read_concern)
{
bson_t *compiled;
BSON_ASSERT (read_concern);
compiled = &read_concern->compiled;
read_concern->frozen = true;
- bson_init (compiled);
+ bson_reinit (compiled);
- BSON_ASSERT (read_concern->level);
- BSON_APPEND_UTF8 (compiled, "level", read_concern->level);
+ if (read_concern->level) {
+ BSON_APPEND_UTF8 (compiled, "level", read_concern->level);
+ }
}
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-read-concern.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-read-concern.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-read-concern.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-read-concern.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-read-prefs-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-read-prefs-private.h
similarity index 70%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-read-prefs-private.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-read-prefs-private.h
index 072534f8..76cad3f7 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-read-prefs-private.h
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-read-prefs-private.h
@@ -1,71 +1,74 @@
/*
* Copyright 2013 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MONGOC_READ_PREFS_PRIVATE_H
#define MONGOC_READ_PREFS_PRIVATE_H
#if !defined(MONGOC_COMPILATION)
#error "Only <mongoc.h> can be included directly."
#endif
#include <bson.h>
#include "mongoc-cluster-private.h"
#include "mongoc-read-prefs.h"
BSON_BEGIN_DECLS
struct _mongoc_read_prefs_t {
mongoc_read_mode_t mode;
bson_t tags;
int64_t max_staleness_seconds;
};
-typedef struct _mongoc_apply_read_prefs_result_t {
- bson_t *query_with_read_prefs;
+typedef struct _mongoc_assemble_query_result_t {
+ bson_t *assembled_query;
bool query_owned;
mongoc_query_flags_t flags;
-} mongoc_apply_read_prefs_result_t;
+} mongoc_assemble_query_result_t;
-#define READ_PREFS_RESULT_INIT \
+#define ASSEMBLE_QUERY_RESULT_INIT \
{ \
NULL, false, MONGOC_QUERY_NONE \
}
const char *
_mongoc_read_mode_as_str (mongoc_read_mode_t mode);
void
-apply_read_preferences (const mongoc_read_prefs_t *read_prefs,
- const mongoc_server_stream_t *server_stream,
- const bson_t *query_bson,
- mongoc_query_flags_t initial_flags,
- mongoc_apply_read_prefs_result_t *result);
+assemble_query (const mongoc_read_prefs_t *read_prefs,
+ const mongoc_server_stream_t *server_stream,
+ const bson_t *query_bson,
+ mongoc_query_flags_t initial_flags,
+ mongoc_assemble_query_result_t *result);
void
-apply_read_prefs_result_cleanup (mongoc_apply_read_prefs_result_t *result);
+assemble_query_result_cleanup (mongoc_assemble_query_result_t *result);
bool
_mongoc_read_prefs_validate (const mongoc_read_prefs_t *read_prefs,
bson_error_t *error);
+#define IS_PREF_PRIMARY(_pref) \
+ (!(_pref) || ((_pref)->mode == MONGOC_READ_PRIMARY))
+
BSON_END_DECLS
#endif /* MONGOC_READ_PREFS_PRIVATE_H */
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-read-prefs.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-read-prefs.c
similarity index 87%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-read-prefs.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-read-prefs.c
index d80afbb7..88e2277c 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-read-prefs.c
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-read-prefs.c
@@ -1,381 +1,380 @@
/*
* Copyright 2013 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-#include "mongoc-config.h"
#include "mongoc-error.h"
#include "mongoc-read-prefs-private.h"
#include "mongoc-trace-private.h"
mongoc_read_prefs_t *
mongoc_read_prefs_new (mongoc_read_mode_t mode)
{
mongoc_read_prefs_t *read_prefs;
read_prefs = (mongoc_read_prefs_t *) bson_malloc0 (sizeof *read_prefs);
read_prefs->mode = mode;
bson_init (&read_prefs->tags);
read_prefs->max_staleness_seconds = MONGOC_NO_MAX_STALENESS;
return read_prefs;
}
mongoc_read_mode_t
mongoc_read_prefs_get_mode (const mongoc_read_prefs_t *read_prefs)
{
return read_prefs ? read_prefs->mode : MONGOC_READ_PRIMARY;
}
void
mongoc_read_prefs_set_mode (mongoc_read_prefs_t *read_prefs,
mongoc_read_mode_t mode)
{
BSON_ASSERT (read_prefs);
BSON_ASSERT (mode <= MONGOC_READ_NEAREST);
read_prefs->mode = mode;
}
const bson_t *
mongoc_read_prefs_get_tags (const mongoc_read_prefs_t *read_prefs)
{
BSON_ASSERT (read_prefs);
return &read_prefs->tags;
}
void
mongoc_read_prefs_set_tags (mongoc_read_prefs_t *read_prefs, const bson_t *tags)
{
BSON_ASSERT (read_prefs);
bson_destroy (&read_prefs->tags);
if (tags) {
bson_copy_to (tags, &read_prefs->tags);
} else {
bson_init (&read_prefs->tags);
}
}
void
mongoc_read_prefs_add_tag (mongoc_read_prefs_t *read_prefs, const bson_t *tag)
{
bson_t empty = BSON_INITIALIZER;
char str[16];
int key;
BSON_ASSERT (read_prefs);
key = bson_count_keys (&read_prefs->tags);
bson_snprintf (str, sizeof str, "%d", key);
if (tag) {
bson_append_document (&read_prefs->tags, str, -1, tag);
} else {
bson_append_document (&read_prefs->tags, str, -1, &empty);
}
}
int64_t
mongoc_read_prefs_get_max_staleness_seconds (
const mongoc_read_prefs_t *read_prefs)
{
BSON_ASSERT (read_prefs);
return read_prefs->max_staleness_seconds;
}
void
mongoc_read_prefs_set_max_staleness_seconds (mongoc_read_prefs_t *read_prefs,
int64_t max_staleness_seconds)
{
BSON_ASSERT (read_prefs);
read_prefs->max_staleness_seconds = max_staleness_seconds;
}
bool
mongoc_read_prefs_is_valid (const mongoc_read_prefs_t *read_prefs)
{
BSON_ASSERT (read_prefs);
/*
* Tags or maxStalenessSeconds are not supported with PRIMARY mode.
*/
if (read_prefs->mode == MONGOC_READ_PRIMARY) {
if (!bson_empty (&read_prefs->tags) ||
read_prefs->max_staleness_seconds != MONGOC_NO_MAX_STALENESS) {
return false;
}
}
if (read_prefs->max_staleness_seconds != MONGOC_NO_MAX_STALENESS &&
read_prefs->max_staleness_seconds <= 0) {
return false;
}
return true;
}
void
mongoc_read_prefs_destroy (mongoc_read_prefs_t *read_prefs)
{
if (read_prefs) {
bson_destroy (&read_prefs->tags);
bson_free (read_prefs);
}
}
mongoc_read_prefs_t *
mongoc_read_prefs_copy (const mongoc_read_prefs_t *read_prefs)
{
mongoc_read_prefs_t *ret = NULL;
if (read_prefs) {
ret = mongoc_read_prefs_new (read_prefs->mode);
bson_copy_to (&read_prefs->tags, &ret->tags);
ret->max_staleness_seconds = read_prefs->max_staleness_seconds;
}
return ret;
}
const char *
_mongoc_read_mode_as_str (mongoc_read_mode_t mode)
{
switch (mode) {
case MONGOC_READ_PRIMARY:
return "primary";
case MONGOC_READ_PRIMARY_PREFERRED:
return "primaryPreferred";
case MONGOC_READ_SECONDARY:
return "secondary";
case MONGOC_READ_SECONDARY_PREFERRED:
return "secondaryPreferred";
case MONGOC_READ_NEAREST:
return "nearest";
default:
return "";
}
}
/* Update result with the read prefs, following Server Selection Spec.
* The driver must have discovered the server is a mongos.
*/
static void
_apply_read_preferences_mongos (
const mongoc_read_prefs_t *read_prefs,
const bson_t *query_bson,
- mongoc_apply_read_prefs_result_t *result /* OUT */)
+ mongoc_assemble_query_result_t *result /* OUT */)
{
mongoc_read_mode_t mode;
const bson_t *tags = NULL;
bson_t child;
const char *mode_str;
int64_t max_staleness_seconds;
mode = mongoc_read_prefs_get_mode (read_prefs);
if (read_prefs) {
tags = mongoc_read_prefs_get_tags (read_prefs);
}
/* Server Selection Spec says:
*
* For mode 'primary', drivers MUST NOT set the slaveOK wire protocol flag
* and MUST NOT use $readPreference
*
* For mode 'secondary', drivers MUST set the slaveOK wire protocol flag and
* MUST also use $readPreference
*
* For mode 'primaryPreferred', drivers MUST set the slaveOK wire protocol
* flag and MUST also use $readPreference
*
* For mode 'secondaryPreferred', drivers MUST set the slaveOK wire protocol
* flag. If the read preference contains a non-empty tag_sets parameter,
* drivers MUST use $readPreference; otherwise, drivers MUST NOT use
* $readPreference
*
* For mode 'nearest', drivers MUST set the slaveOK wire protocol flag and
* MUST also use $readPreference
*/
if (mode == MONGOC_READ_SECONDARY_PREFERRED && bson_empty0 (tags)) {
result->flags |= MONGOC_QUERY_SLAVE_OK;
} else if (mode != MONGOC_READ_PRIMARY) {
result->flags |= MONGOC_QUERY_SLAVE_OK;
/* Server Selection Spec: "When any $ modifier is used, including the
* $readPreference modifier, the query MUST be provided using the $query
* modifier".
*
* This applies to commands, too.
*/
- result->query_with_read_prefs = bson_new ();
+ result->assembled_query = bson_new ();
result->query_owned = true;
if (bson_has_field (query_bson, "$query")) {
- bson_concat (result->query_with_read_prefs, query_bson);
+ bson_concat (result->assembled_query, query_bson);
} else {
bson_append_document (
- result->query_with_read_prefs, "$query", 6, query_bson);
+ result->assembled_query, "$query", 6, query_bson);
}
bson_append_document_begin (
- result->query_with_read_prefs, "$readPreference", 15, &child);
+ result->assembled_query, "$readPreference", 15, &child);
mode_str = _mongoc_read_mode_as_str (mode);
bson_append_utf8 (&child, "mode", 4, mode_str, -1);
if (!bson_empty0 (tags)) {
bson_append_array (&child, "tags", 4, tags);
}
max_staleness_seconds =
mongoc_read_prefs_get_max_staleness_seconds (read_prefs);
if (max_staleness_seconds != MONGOC_NO_MAX_STALENESS) {
bson_append_int64 (
&child, "maxStalenessSeconds", 19, max_staleness_seconds);
}
- bson_append_document_end (result->query_with_read_prefs, &child);
+ bson_append_document_end (result->assembled_query, &child);
}
}
/*
*--------------------------------------------------------------------------
*
- * apply_read_preferences --
+ * assemble_query --
*
- * Update @result based on @read prefs, following the Server Selection
+ * Update @result based on @read_prefs, following the Server Selection
* Spec.
*
* Side effects:
- * Sets @result->query_with_read_prefs and @result->flags.
+ * Sets @result->assembled_query and @result->flags.
*
* Note:
- * This function, the mongoc_apply_read_prefs_result_t struct, and all
+ * This function, the mongoc_assemble_query_result_t struct, and all
* related functions are only used for find operations with OP_QUERY.
- * Remove them once MongoDB 3.0 is EOL, all find operations will then
- * use the "find" command.
+ * Remove them once we have implemented exhaust cursors with OP_MSG in
+ * the server, and all previous server versions are EOL.
*
*--------------------------------------------------------------------------
*/
void
-apply_read_preferences (const mongoc_read_prefs_t *read_prefs,
- const mongoc_server_stream_t *server_stream,
- const bson_t *query_bson,
- mongoc_query_flags_t initial_flags,
- mongoc_apply_read_prefs_result_t *result /* OUT */)
+assemble_query (const mongoc_read_prefs_t *read_prefs,
+ const mongoc_server_stream_t *server_stream,
+ const bson_t *query_bson,
+ mongoc_query_flags_t initial_flags,
+ mongoc_assemble_query_result_t *result /* OUT */)
{
mongoc_server_description_type_t server_type;
ENTRY;
BSON_ASSERT (server_stream);
BSON_ASSERT (query_bson);
BSON_ASSERT (result);
/* default values */
- result->query_with_read_prefs = (bson_t *) query_bson;
+ result->assembled_query = (bson_t *) query_bson;
result->query_owned = false;
result->flags = initial_flags;
server_type = server_stream->sd->type;
switch (server_stream->topology_type) {
case MONGOC_TOPOLOGY_SINGLE:
if (server_type == MONGOC_SERVER_MONGOS) {
_apply_read_preferences_mongos (read_prefs, query_bson, result);
} else {
/* Server Selection Spec: for topology type single and server types
* besides mongos, "clients MUST always set the slaveOK wire protocol
* flag on reads to ensure that any server type can handle the
* request."
*/
result->flags |= MONGOC_QUERY_SLAVE_OK;
}
break;
case MONGOC_TOPOLOGY_RS_NO_PRIMARY:
case MONGOC_TOPOLOGY_RS_WITH_PRIMARY:
/* Server Selection Spec: for RS topology types, "For all read
* preferences modes except primary, clients MUST set the slaveOK wire
* protocol flag to ensure that any suitable server can handle the
* request. Clients MUST NOT set the slaveOK wire protocol flag if the
* read preference mode is primary.
*/
if (read_prefs && read_prefs->mode != MONGOC_READ_PRIMARY) {
result->flags |= MONGOC_QUERY_SLAVE_OK;
}
break;
case MONGOC_TOPOLOGY_SHARDED:
_apply_read_preferences_mongos (read_prefs, query_bson, result);
break;
case MONGOC_TOPOLOGY_UNKNOWN:
case MONGOC_TOPOLOGY_DESCRIPTION_TYPES:
default:
/* must not call _apply_read_preferences with unknown topology type */
BSON_ASSERT (false);
}
EXIT;
}
void
-apply_read_prefs_result_cleanup (mongoc_apply_read_prefs_result_t *result)
+assemble_query_result_cleanup (mongoc_assemble_query_result_t *result)
{
ENTRY;
BSON_ASSERT (result);
if (result->query_owned) {
- bson_destroy (result->query_with_read_prefs);
+ bson_destroy (result->assembled_query);
}
EXIT;
}
bool
_mongoc_read_prefs_validate (const mongoc_read_prefs_t *read_prefs,
bson_error_t *error)
{
if (read_prefs && !mongoc_read_prefs_is_valid (read_prefs)) {
bson_set_error (error,
MONGOC_ERROR_COMMAND,
MONGOC_ERROR_COMMAND_INVALID_ARG,
"Invalid mongoc_read_prefs_t");
return false;
}
return true;
}
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-read-prefs.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-read-prefs.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-read-prefs.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-read-prefs.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-rpc-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-rpc-private.h
similarity index 82%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-rpc-private.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-rpc-private.h
index aadf1798..fd296fe0 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-rpc-private.h
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-rpc-private.h
@@ -1,163 +1,181 @@
/*
* Copyright 2013 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MONGOC_RPC_PRIVATE_H
#define MONGOC_RPC_PRIVATE_H
#if !defined(MONGOC_COMPILATION)
#error "Only <mongoc.h> can be included directly."
#endif
#include <bson.h>
#include <stddef.h>
#include "mongoc-array-private.h"
#include "mongoc-cmd-private.h"
#include "mongoc-iovec.h"
#include "mongoc-write-concern.h"
#include "mongoc-flags.h"
/* forward declaration */
struct _mongoc_cluster_t;
BSON_BEGIN_DECLS
+typedef struct _mongoc_rpc_section_t {
+ uint8_t payload_type;
+ union {
+ /* payload_type == 0 */
+ const uint8_t *bson_document;
+ /* payload_type == 1 */
+ struct {
+ int32_t size;
+ uint32_t size_le;
+ const char *identifier;
+ const uint8_t *bson_documents;
+ } sequence;
+ } payload;
+} mongoc_rpc_section_t;
#define RPC(_name, _code) \
typedef struct { \
_code \
} mongoc_rpc_##_name##_t;
#define ENUM_FIELD(_name) uint32_t _name;
#define INT32_FIELD(_name) int32_t _name;
#define UINT8_FIELD(_name) uint8_t _name;
+#define UINT32_FIELD(_name) uint32_t _name;
#define INT64_FIELD(_name) int64_t _name;
#define INT64_ARRAY_FIELD(_len, _name) \
int32_t _len; \
int64_t *_name;
#define CSTRING_FIELD(_name) const char *_name;
#define BSON_FIELD(_name) const uint8_t *_name;
#define BSON_ARRAY_FIELD(_name) \
const uint8_t *_name; \
int32_t _name##_len;
#define IOVEC_ARRAY_FIELD(_name) \
const mongoc_iovec_t *_name; \
int32_t n_##_name; \
mongoc_iovec_t _name##_recv;
+#define SECTION_ARRAY_FIELD(_name) \
+ mongoc_rpc_section_t _name[2]; \
+ int32_t n_##_name;
#define RAW_BUFFER_FIELD(_name) \
const uint8_t *_name; \
int32_t _name##_len;
#define BSON_OPTIONAL(_check, _code) _code
#pragma pack(1)
#include "op-delete.def"
#include "op-get-more.def"
#include "op-header.def"
#include "op-insert.def"
#include "op-kill-cursors.def"
-#include "op-msg.def"
#include "op-query.def"
#include "op-reply.def"
#include "op-reply-header.def"
#include "op-update.def"
#include "op-compressed.def"
/* restore default packing */
#pragma pack()
+#include "op-msg.def"
typedef union {
mongoc_rpc_delete_t delete_;
mongoc_rpc_get_more_t get_more;
mongoc_rpc_header_t header;
mongoc_rpc_insert_t insert;
mongoc_rpc_kill_cursors_t kill_cursors;
mongoc_rpc_msg_t msg;
mongoc_rpc_query_t query;
mongoc_rpc_reply_t reply;
mongoc_rpc_reply_header_t reply_header;
mongoc_rpc_update_t update;
mongoc_rpc_compressed_t compressed;
} mongoc_rpc_t;
-BSON_STATIC_ASSERT (sizeof (mongoc_rpc_header_t) == 16);
-BSON_STATIC_ASSERT (offsetof (mongoc_rpc_header_t, opcode) ==
- offsetof (mongoc_rpc_reply_t, opcode));
-BSON_STATIC_ASSERT (sizeof (mongoc_rpc_reply_header_t) == 36);
+BSON_STATIC_ASSERT2 (sizeof_rpc_header, sizeof (mongoc_rpc_header_t) == 16);
+BSON_STATIC_ASSERT2 (offsetof_rpc_header,
+ offsetof (mongoc_rpc_header_t, opcode) ==
+ offsetof (mongoc_rpc_reply_t, opcode));
+BSON_STATIC_ASSERT2 (sizeof_reply_header,
+ sizeof (mongoc_rpc_reply_header_t) == 36);
#undef RPC
#undef ENUM_FIELD
#undef UINT8_FIELD
+#undef UINT32_FIELD
#undef INT32_FIELD
#undef INT64_FIELD
#undef INT64_ARRAY_FIELD
#undef CSTRING_FIELD
#undef BSON_FIELD
#undef BSON_ARRAY_FIELD
#undef IOVEC_ARRAY_FIELD
+#undef SECTION_ARRAY_FIELD
#undef BSON_OPTIONAL
#undef RAW_BUFFER_FIELD
void
_mongoc_rpc_gather (mongoc_rpc_t *rpc, mongoc_array_t *array);
-bool
-_mongoc_rpc_needs_gle (mongoc_rpc_t *rpc,
- const mongoc_write_concern_t *write_concern);
void
_mongoc_rpc_swab_to_le (mongoc_rpc_t *rpc);
void
_mongoc_rpc_swab_from_le (mongoc_rpc_t *rpc);
void
_mongoc_rpc_printf (mongoc_rpc_t *rpc);
bool
_mongoc_rpc_scatter (mongoc_rpc_t *rpc, const uint8_t *buf, size_t buflen);
bool
_mongoc_rpc_scatter_reply_header_only (mongoc_rpc_t *rpc,
const uint8_t *buf,
size_t buflen);
bool
_mongoc_rpc_get_first_document (mongoc_rpc_t *rpc, bson_t *reply);
bool
_mongoc_rpc_reply_get_first (mongoc_rpc_reply_t *reply, bson_t *bson);
void
_mongoc_rpc_prep_command (mongoc_rpc_t *rpc,
const char *cmd_ns,
mongoc_cmd_t *cmd);
bool
_mongoc_rpc_check_ok (mongoc_rpc_t *rpc,
- bool is_command,
int32_t error_api_version,
bson_error_t *error /* OUT */,
bson_t *error_doc /* OUT */);
bool
_mongoc_cmd_check_ok (const bson_t *doc,
int32_t error_api_version,
bson_error_t *error);
bool
_mongoc_rpc_decompress (mongoc_rpc_t *rpc_le, uint8_t *buf, size_t buflen);
char *
_mongoc_rpc_compress (struct _mongoc_cluster_t *cluster,
int32_t compressor_id,
mongoc_rpc_t *rpc_le,
bson_error_t *error);
BSON_END_DECLS
#endif /* MONGOC_RPC_PRIVATE_H */
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-rpc.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-rpc.c
similarity index 78%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-rpc.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-rpc.c
index aebf8938..ce5ed5c5 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-rpc.c
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-rpc.c
@@ -1,1145 +1,1227 @@
/*
* Copyright 2013 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <bson.h>
#include "mongoc.h"
#include "mongoc-rpc-private.h"
+#include "mongoc-counters-private.h"
#include "mongoc-trace-private.h"
#include "mongoc-util-private.h"
#include "mongoc-compression-private.h"
#include "mongoc-cluster-private.h"
#define RPC(_name, _code) \
static void _mongoc_rpc_gather_##_name (mongoc_rpc_##_name##_t *rpc, \
mongoc_rpc_header_t *header, \
mongoc_array_t *array) \
{ \
mongoc_iovec_t iov; \
BSON_ASSERT (rpc); \
BSON_ASSERT (array); \
header->msg_len = 0; \
_code \
}
#define UINT8_FIELD(_name) \
iov.iov_base = (void *) &rpc->_name; \
iov.iov_len = 1; \
header->msg_len += (int32_t) iov.iov_len; \
_mongoc_array_append_val (array, iov);
+#define UINT32_FIELD(_name) \
+ iov.iov_base = (void *) &rpc->_name; \
+ iov.iov_len = 4; \
+ header->msg_len += (uint32_t) iov.iov_len; \
+ _mongoc_array_append_val (array, iov);
#define INT32_FIELD(_name) \
iov.iov_base = (void *) &rpc->_name; \
iov.iov_len = 4; \
header->msg_len += (int32_t) iov.iov_len; \
_mongoc_array_append_val (array, iov);
#define ENUM_FIELD INT32_FIELD
#define INT64_FIELD(_name) \
iov.iov_base = (void *) &rpc->_name; \
iov.iov_len = 8; \
header->msg_len += (int32_t) iov.iov_len; \
_mongoc_array_append_val (array, iov);
#define CSTRING_FIELD(_name) \
BSON_ASSERT (rpc->_name); \
iov.iov_base = (void *) rpc->_name; \
iov.iov_len = strlen (rpc->_name) + 1; \
header->msg_len += (int32_t) iov.iov_len; \
_mongoc_array_append_val (array, iov);
#define BSON_FIELD(_name) \
do { \
int32_t __l; \
memcpy (&__l, rpc->_name, 4); \
__l = BSON_UINT32_FROM_LE (__l); \
iov.iov_base = (void *) rpc->_name; \
iov.iov_len = __l; \
header->msg_len += (int32_t) iov.iov_len; \
_mongoc_array_append_val (array, iov); \
} while (0);
#define BSON_OPTIONAL(_check, _code) \
if (rpc->_check) { \
_code \
}
#define BSON_ARRAY_FIELD(_name) \
if (rpc->_name##_len) { \
iov.iov_base = (void *) rpc->_name; \
iov.iov_len = rpc->_name##_len; \
header->msg_len += (int32_t) iov.iov_len; \
_mongoc_array_append_val (array, iov); \
}
#define IOVEC_ARRAY_FIELD(_name) \
do { \
ssize_t _i; \
BSON_ASSERT (rpc->n_##_name); \
for (_i = 0; _i < rpc->n_##_name; _i++) { \
BSON_ASSERT (rpc->_name[_i].iov_len); \
header->msg_len += (int32_t) rpc->_name[_i].iov_len; \
_mongoc_array_append_val (array, rpc->_name[_i]); \
} \
} while (0);
+#define SECTION_ARRAY_FIELD(_name) \
+ do { \
+ ssize_t _i; \
+ BSON_ASSERT (rpc->n_##_name); \
+ for (_i = 0; _i < rpc->n_##_name; _i++) { \
+ int32_t __l; \
+ iov.iov_base = (void *) &rpc->_name[_i].payload_type; \
+ iov.iov_len = 1; \
+ header->msg_len += (int32_t) iov.iov_len; \
+ _mongoc_array_append_val (array, iov); \
+ switch (rpc->_name[_i].payload_type) { \
+ case 0: \
+ memcpy (&__l, rpc->_name[_i].payload.bson_document, 4); \
+ __l = BSON_UINT32_FROM_LE (__l); \
+ iov.iov_base = (void *) rpc->_name[_i].payload.bson_document; \
+ iov.iov_len = __l; \
+ break; \
+ case 1: \
+ rpc->_name[_i].payload.sequence.size_le = \
+ BSON_UINT32_TO_LE (rpc->_name[_i].payload.sequence.size); \
+ iov.iov_base = (void *) &rpc->_name[_i].payload.sequence.size_le; \
+ iov.iov_len = 4; \
+ header->msg_len += 4; \
+ _mongoc_array_append_val (array, iov); \
+ iov.iov_base = \
+ (void *) rpc->_name[_i].payload.sequence.identifier; \
+ iov.iov_len = \
+ strlen (rpc->_name[_i].payload.sequence.identifier) + 1; \
+ header->msg_len += (int32_t) iov.iov_len; \
+ _mongoc_array_append_val (array, iov); \
+ iov.iov_base = \
+ (void *) rpc->_name[_i].payload.sequence.bson_documents; \
+ iov.iov_len = \
+ rpc->_name[_i].payload.sequence.size - iov.iov_len - 4; \
+ break; \
+ default: \
+ MONGOC_ERROR ("Unknown Payload Type: %d", \
+ rpc->_name[_i].payload_type); \
+ BSON_ASSERT (0); \
+ } \
+ header->msg_len += (int32_t) iov.iov_len; \
+ _mongoc_array_append_val (array, iov); \
+ } \
+ } while (0);
#define RAW_BUFFER_FIELD(_name) \
iov.iov_base = (void *) rpc->_name; \
iov.iov_len = rpc->_name##_len; \
BSON_ASSERT (iov.iov_len); \
header->msg_len += (int32_t) iov.iov_len; \
_mongoc_array_append_val (array, iov);
#define INT64_ARRAY_FIELD(_len, _name) \
iov.iov_base = (void *) &rpc->_len; \
iov.iov_len = 4; \
header->msg_len += (int32_t) iov.iov_len; \
_mongoc_array_append_val (array, iov); \
iov.iov_base = (void *) rpc->_name; \
iov.iov_len = rpc->_len * 8; \
BSON_ASSERT (iov.iov_len); \
header->msg_len += (int32_t) iov.iov_len; \
_mongoc_array_append_val (array, iov);
#include "op-delete.def"
#include "op-get-more.def"
#include "op-insert.def"
#include "op-kill-cursors.def"
#include "op-msg.def"
#include "op-query.def"
#include "op-reply.def"
#include "op-compressed.def"
#include "op-update.def"
#undef RPC
#undef ENUM_FIELD
#undef UINT8_FIELD
+#undef UINT32_FIELD
#undef INT32_FIELD
#undef INT64_FIELD
#undef INT64_ARRAY_FIELD
#undef CSTRING_FIELD
#undef BSON_FIELD
#undef BSON_ARRAY_FIELD
#undef IOVEC_ARRAY_FIELD
+#undef SECTION_ARRAY_FIELD
#undef RAW_BUFFER_FIELD
#undef BSON_OPTIONAL
#if BSON_BYTE_ORDER == BSON_BIG_ENDIAN
#define RPC(_name, _code) \
static void _mongoc_rpc_swab_to_le_##_name (mongoc_rpc_##_name##_t *rpc) \
{ \
BSON_ASSERT (rpc); \
_code \
}
#define UINT8_FIELD(_name)
+#define UINT32_FIELD(_name)
#define INT32_FIELD(_name) rpc->_name = BSON_UINT32_FROM_LE (rpc->_name);
#define ENUM_FIELD INT32_FIELD
#define INT64_FIELD(_name) rpc->_name = BSON_UINT64_FROM_LE (rpc->_name);
#define CSTRING_FIELD(_name)
#define BSON_FIELD(_name)
#define BSON_ARRAY_FIELD(_name)
#define IOVEC_ARRAY_FIELD(_name)
+#define SECTION_ARRAY_FIELD(_name)
#define BSON_OPTIONAL(_check, _code) \
if (rpc->_check) { \
_code \
}
#define RAW_BUFFER_FIELD(_name)
#define INT64_ARRAY_FIELD(_len, _name) \
do { \
ssize_t i; \
for (i = 0; i < rpc->_len; i++) { \
rpc->_name[i] = BSON_UINT64_FROM_LE (rpc->_name[i]); \
} \
rpc->_len = BSON_UINT32_FROM_LE (rpc->_len); \
} while (0);
#include "op-delete.def"
#include "op-get-more.def"
#include "op-insert.def"
#include "op-kill-cursors.def"
#include "op-msg.def"
#include "op-query.def"
#include "op-reply.def"
#include "op-compressed.def"
#include "op-update.def"
#undef RPC
#undef INT64_ARRAY_FIELD
#define RPC(_name, _code) \
static void _mongoc_rpc_swab_from_le_##_name (mongoc_rpc_##_name##_t *rpc) \
{ \
BSON_ASSERT (rpc); \
_code \
}
#define INT64_ARRAY_FIELD(_len, _name) \
do { \
ssize_t i; \
rpc->_len = BSON_UINT32_FROM_LE (rpc->_len); \
for (i = 0; i < rpc->_len; i++) { \
rpc->_name[i] = BSON_UINT64_FROM_LE (rpc->_name[i]); \
} \
} while (0);
#include "op-delete.def"
#include "op-get-more.def"
#include "op-insert.def"
#include "op-kill-cursors.def"
#include "op-msg.def"
#include "op-query.def"
#include "op-reply.def"
#include "op-compressed.def"
#include "op-update.def"
#undef RPC
#undef ENUM_FIELD
#undef UINT8_FIELD
+#undef UINT32_FIELD
#undef INT32_FIELD
#undef INT64_FIELD
#undef INT64_ARRAY_FIELD
#undef CSTRING_FIELD
#undef BSON_FIELD
#undef BSON_ARRAY_FIELD
#undef IOVEC_ARRAY_FIELD
+#undef SECTION_ARRAY_FIELD
#undef BSON_OPTIONAL
#undef RAW_BUFFER_FIELD
#endif /* BSON_BYTE_ORDER == BSON_BIG_ENDIAN */
#define RPC(_name, _code) \
static void _mongoc_rpc_printf_##_name (mongoc_rpc_##_name##_t *rpc) \
{ \
BSON_ASSERT (rpc); \
_code \
}
#define UINT8_FIELD(_name) printf (" " #_name " : %u\n", rpc->_name);
+#define UINT32_FIELD(_name) printf (" " #_name " : %u\n", rpc->_name);
#define INT32_FIELD(_name) printf (" " #_name " : %d\n", rpc->_name);
#define ENUM_FIELD(_name) printf (" " #_name " : %u\n", rpc->_name);
#define INT64_FIELD(_name) \
printf (" " #_name " : %" PRIi64 "\n", (int64_t) rpc->_name);
#define CSTRING_FIELD(_name) printf (" " #_name " : %s\n", rpc->_name);
-#define BSON_FIELD(_name) \
- do { \
- bson_t b; \
- char *s; \
- int32_t __l; \
- memcpy (&__l, rpc->_name, 4); \
- __l = BSON_UINT32_FROM_LE (__l); \
- bson_init_static (&b, rpc->_name, __l); \
- s = bson_as_canonical_extended_json (&b, NULL); \
- printf (" " #_name " : %s\n", s); \
- bson_free (s); \
- bson_destroy (&b); \
+#define BSON_FIELD(_name) \
+ do { \
+ bson_t b; \
+ char *s; \
+ int32_t __l; \
+ memcpy (&__l, rpc->_name, 4); \
+ __l = BSON_UINT32_FROM_LE (__l); \
+ bson_init_static (&b, rpc->_name, __l); \
+ s = bson_as_relaxed_extended_json (&b, NULL); \
+ printf (" " #_name " : %s\n", s); \
+ bson_free (s); \
+ bson_destroy (&b); \
} while (0);
#define BSON_ARRAY_FIELD(_name) \
do { \
bson_reader_t *__r; \
bool __eof; \
const bson_t *__b; \
__r = bson_reader_new_from_data (rpc->_name, rpc->_name##_len); \
while ((__b = bson_reader_read (__r, &__eof))) { \
- char *s = bson_as_canonical_extended_json (__b, NULL); \
+ char *s = bson_as_relaxed_extended_json (__b, NULL); \
printf (" " #_name " : %s\n", s); \
bson_free (s); \
} \
bson_reader_destroy (__r); \
} while (0);
#define IOVEC_ARRAY_FIELD(_name) \
do { \
ssize_t _i; \
size_t _j; \
for (_i = 0; _i < rpc->n_##_name; _i++) { \
printf (" " #_name " : "); \
for (_j = 0; _j < rpc->_name[_i].iov_len; _j++) { \
uint8_t u; \
u = ((char *) rpc->_name[_i].iov_base)[_j]; \
printf (" %02x", u); \
} \
printf ("\n"); \
} \
} while (0);
+#define SECTION_ARRAY_FIELD(_name) \
+ do { \
+ ssize_t _i; \
+ printf (" " #_name " : %d\n", rpc->n_##_name); \
+ for (_i = 0; _i < rpc->n_##_name; _i++) { \
+ if (rpc->_name[_i].payload_type == 0) { \
+ do { \
+ bson_t b; \
+ char *s; \
+ int32_t __l; \
+ memcpy (&__l, rpc->_name[_i].payload.bson_document, 4); \
+ __l = BSON_UINT32_FROM_LE (__l); \
+ bson_init_static ( \
+ &b, rpc->_name[_i].payload.bson_document, __l); \
+ s = bson_as_relaxed_extended_json (&b, NULL); \
+ printf (" Type %d: %s\n", rpc->_name[_i].payload_type, s); \
+ bson_free (s); \
+ bson_destroy (&b); \
+ } while (0); \
+ } else if (rpc->_name[_i].payload_type == 1) { \
+ bson_reader_t *__r; \
+ int max = rpc->_name[_i].payload.sequence.size - \
+ strlen (rpc->_name[_i].payload.sequence.identifier) - \
+ 1 - sizeof (int32_t); \
+ bool __eof; \
+ const bson_t *__b; \
+ printf (" Identifier: %s\n", \
+ rpc->_name[_i].payload.sequence.identifier); \
+ printf (" Size: %d\n", max); \
+ __r = bson_reader_new_from_data ( \
+ rpc->_name[_i].payload.sequence.bson_documents, max); \
+ while ((__b = bson_reader_read (__r, &__eof))) { \
+ char *s = bson_as_relaxed_extended_json (__b, NULL); \
+ bson_free (s); \
+ } \
+ bson_reader_destroy (__r); \
+ } \
+ } \
+ } while (0);
#define BSON_OPTIONAL(_check, _code) \
if (rpc->_check) { \
_code \
}
#define RAW_BUFFER_FIELD(_name) \
{ \
ssize_t __i; \
printf (" " #_name " :"); \
for (__i = 0; __i < rpc->_name##_len; __i++) { \
uint8_t u; \
u = ((char *) rpc->_name)[__i]; \
printf (" %02x", u); \
} \
printf ("\n"); \
}
#define INT64_ARRAY_FIELD(_len, _name) \
do { \
ssize_t i; \
for (i = 0; i < rpc->_len; i++) { \
printf (" " #_name " : %" PRIi64 "\n", (int64_t) rpc->_name[i]); \
} \
rpc->_len = BSON_UINT32_FROM_LE (rpc->_len); \
} while (0);
#include "op-delete.def"
#include "op-get-more.def"
#include "op-insert.def"
#include "op-kill-cursors.def"
#include "op-msg.def"
#include "op-query.def"
#include "op-reply.def"
#include "op-compressed.def"
#include "op-update.def"
#undef RPC
#undef ENUM_FIELD
#undef UINT8_FIELD
+#undef UINT32_FIELD
#undef INT32_FIELD
#undef INT64_FIELD
#undef INT64_ARRAY_FIELD
#undef CSTRING_FIELD
#undef BSON_FIELD
#undef BSON_ARRAY_FIELD
#undef IOVEC_ARRAY_FIELD
+#undef SECTION_ARRAY_FIELD
#undef BSON_OPTIONAL
#undef RAW_BUFFER_FIELD
#define RPC(_name, _code) \
static bool _mongoc_rpc_scatter_##_name ( \
mongoc_rpc_##_name##_t *rpc, const uint8_t *buf, size_t buflen) \
{ \
BSON_ASSERT (rpc); \
BSON_ASSERT (buf); \
BSON_ASSERT (buflen); \
_code return true; \
}
#define UINT8_FIELD(_name) \
if (buflen < 1) { \
return false; \
} \
memcpy (&rpc->_name, buf, 1); \
buflen -= 1; \
buf += 1;
+#define UINT32_FIELD(_name) \
+ if (buflen < 4) { \
+ return false; \
+ } \
+ memcpy (&rpc->_name, buf, 4); \
+ buflen -= 4; \
+ buf += 4;
#define INT32_FIELD(_name) \
if (buflen < 4) { \
return false; \
} \
memcpy (&rpc->_name, buf, 4); \
buflen -= 4; \
buf += 4;
#define ENUM_FIELD INT32_FIELD
#define INT64_FIELD(_name) \
if (buflen < 8) { \
return false; \
} \
memcpy (&rpc->_name, buf, 8); \
buflen -= 8; \
buf += 8;
#define INT64_ARRAY_FIELD(_len, _name) \
do { \
size_t needed; \
if (buflen < 4) { \
return false; \
} \
memcpy (&rpc->_len, buf, 4); \
buflen -= 4; \
buf += 4; \
needed = BSON_UINT32_FROM_LE (rpc->_len) * 8; \
if (needed > buflen) { \
return false; \
} \
rpc->_name = (int64_t *) buf; \
buf += needed; \
buflen -= needed; \
} while (0);
#define CSTRING_FIELD(_name) \
do { \
size_t __i; \
bool found = false; \
for (__i = 0; __i < buflen; __i++) { \
if (!buf[__i]) { \
rpc->_name = (const char *) buf; \
buflen -= __i + 1; \
buf += __i + 1; \
found = true; \
break; \
} \
} \
if (!found) { \
return false; \
} \
} while (0);
#define BSON_FIELD(_name) \
do { \
uint32_t __l; \
if (buflen < 4) { \
return false; \
} \
memcpy (&__l, buf, 4); \
__l = BSON_UINT32_FROM_LE (__l); \
if (__l < 5 || __l > buflen) { \
return false; \
} \
rpc->_name = (uint8_t *) buf; \
buf += __l; \
buflen -= __l; \
} while (0);
#define BSON_ARRAY_FIELD(_name) \
rpc->_name = (uint8_t *) buf; \
rpc->_name##_len = (int32_t) buflen; \
buf = NULL; \
buflen = 0;
#define BSON_OPTIONAL(_check, _code) \
if (buflen) { \
_code \
}
#define IOVEC_ARRAY_FIELD(_name) \
rpc->_name##_recv.iov_base = (void *) buf; \
rpc->_name##_recv.iov_len = buflen; \
rpc->_name = &rpc->_name##_recv; \
rpc->n_##_name = 1; \
buf = NULL; \
buflen = 0;
+#define SECTION_ARRAY_FIELD(_name) \
+ do { \
+ uint32_t __l; \
+ mongoc_rpc_section_t *section = &rpc->_name[rpc->n_##_name]; \
+ section->payload_type = buf[0]; \
+ buf++; \
+ buflen -= 1; \
+ memcpy (&__l, buf, 4); \
+ __l = BSON_UINT32_FROM_LE (__l); \
+ section->payload.bson_document = (uint8_t *) buf; \
+ buf += __l; \
+ buflen -= __l; \
+ } while (0); \
+ rpc->n_##_name++;
#define RAW_BUFFER_FIELD(_name) \
rpc->_name = (void *) buf; \
rpc->_name##_len = (int32_t) buflen; \
buf = NULL; \
buflen = 0;
#include "op-delete.def"
#include "op-get-more.def"
#include "op-header.def"
#include "op-insert.def"
#include "op-kill-cursors.def"
#include "op-msg.def"
#include "op-query.def"
#include "op-reply.def"
#include "op-reply-header.def"
#include "op-compressed.def"
#include "op-update.def"
#undef RPC
#undef ENUM_FIELD
#undef UINT8_FIELD
+#undef UINT32_FIELD
#undef INT32_FIELD
#undef INT64_FIELD
#undef INT64_ARRAY_FIELD
#undef CSTRING_FIELD
#undef BSON_FIELD
#undef BSON_ARRAY_FIELD
#undef IOVEC_ARRAY_FIELD
+#undef SECTION_ARRAY_FIELD
#undef BSON_OPTIONAL
#undef RAW_BUFFER_FIELD
/*
*--------------------------------------------------------------------------
*
* _mongoc_rpc_gather --
*
* Takes a (native endian) rpc struct and gathers the buffer.
* Caller should swab to little endian after calling gather.
*
* Gather, swab, compress write.
* Read, scatter, uncompress, swab
*
*--------------------------------------------------------------------------
*/
void
_mongoc_rpc_gather (mongoc_rpc_t *rpc, mongoc_array_t *array)
{
+ mongoc_counter_op_egress_total_inc ();
switch ((mongoc_opcode_t) rpc->header.opcode) {
case MONGOC_OPCODE_REPLY:
_mongoc_rpc_gather_reply (&rpc->reply, &rpc->header, array);
return;
+
case MONGOC_OPCODE_MSG:
_mongoc_rpc_gather_msg (&rpc->msg, &rpc->header, array);
+ mongoc_counter_op_egress_msg_inc ();
return;
+
case MONGOC_OPCODE_UPDATE:
_mongoc_rpc_gather_update (&rpc->update, &rpc->header, array);
+ mongoc_counter_op_egress_update_inc ();
return;
+
case MONGOC_OPCODE_INSERT:
_mongoc_rpc_gather_insert (&rpc->insert, &rpc->header, array);
+ mongoc_counter_op_egress_insert_inc ();
return;
+
case MONGOC_OPCODE_QUERY:
_mongoc_rpc_gather_query (&rpc->query, &rpc->header, array);
+ mongoc_counter_op_egress_query_inc ();
return;
+
case MONGOC_OPCODE_GET_MORE:
_mongoc_rpc_gather_get_more (&rpc->get_more, &rpc->header, array);
+ mongoc_counter_op_egress_getmore_inc ();
return;
+
case MONGOC_OPCODE_DELETE:
_mongoc_rpc_gather_delete (&rpc->delete_, &rpc->header, array);
+ mongoc_counter_op_egress_delete_inc ();
return;
+
case MONGOC_OPCODE_KILL_CURSORS:
_mongoc_rpc_gather_kill_cursors (&rpc->kill_cursors, &rpc->header, array);
+ mongoc_counter_op_egress_killcursors_inc ();
return;
+
case MONGOC_OPCODE_COMPRESSED:
_mongoc_rpc_gather_compressed (&rpc->compressed, &rpc->header, array);
+ mongoc_counter_op_egress_compressed_inc ();
return;
+
default:
MONGOC_WARNING ("Unknown rpc type: 0x%08x", rpc->header.opcode);
+ BSON_ASSERT (false);
break;
}
}
void
_mongoc_rpc_swab_to_le (mongoc_rpc_t *rpc)
{
#if BSON_BYTE_ORDER != BSON_LITTLE_ENDIAN
mongoc_opcode_t opcode;
opcode = rpc->header.opcode;
switch (opcode) {
case MONGOC_OPCODE_REPLY:
_mongoc_rpc_swab_to_le_reply (&rpc->reply);
break;
case MONGOC_OPCODE_MSG:
_mongoc_rpc_swab_to_le_msg (&rpc->msg);
break;
case MONGOC_OPCODE_UPDATE:
_mongoc_rpc_swab_to_le_update (&rpc->update);
break;
case MONGOC_OPCODE_INSERT:
_mongoc_rpc_swab_to_le_insert (&rpc->insert);
break;
case MONGOC_OPCODE_QUERY:
_mongoc_rpc_swab_to_le_query (&rpc->query);
break;
case MONGOC_OPCODE_GET_MORE:
_mongoc_rpc_swab_to_le_get_more (&rpc->get_more);
break;
case MONGOC_OPCODE_DELETE:
_mongoc_rpc_swab_to_le_delete (&rpc->delete_);
break;
case MONGOC_OPCODE_KILL_CURSORS:
_mongoc_rpc_swab_to_le_kill_cursors (&rpc->kill_cursors);
break;
case MONGOC_OPCODE_COMPRESSED:
_mongoc_rpc_swab_to_le_compressed (&rpc->compressed);
break;
default:
MONGOC_WARNING ("Unknown rpc type: 0x%08x", opcode);
break;
}
#endif
#if 0
- _mongoc_rpc_printf (&rpc);
+ _mongoc_rpc_printf (rpc);
#endif
}
void
_mongoc_rpc_swab_from_le (mongoc_rpc_t *rpc)
{
#if BSON_BYTE_ORDER != BSON_LITTLE_ENDIAN
mongoc_opcode_t opcode;
opcode = BSON_UINT32_FROM_LE (rpc->header.opcode);
switch (opcode) {
case MONGOC_OPCODE_REPLY:
_mongoc_rpc_swab_from_le_reply (&rpc->reply);
break;
case MONGOC_OPCODE_MSG:
_mongoc_rpc_swab_from_le_msg (&rpc->msg);
break;
case MONGOC_OPCODE_UPDATE:
_mongoc_rpc_swab_from_le_update (&rpc->update);
break;
case MONGOC_OPCODE_INSERT:
_mongoc_rpc_swab_from_le_insert (&rpc->insert);
break;
case MONGOC_OPCODE_QUERY:
_mongoc_rpc_swab_from_le_query (&rpc->query);
break;
case MONGOC_OPCODE_GET_MORE:
_mongoc_rpc_swab_from_le_get_more (&rpc->get_more);
break;
case MONGOC_OPCODE_DELETE:
_mongoc_rpc_swab_from_le_delete (&rpc->delete_);
break;
case MONGOC_OPCODE_KILL_CURSORS:
_mongoc_rpc_swab_from_le_kill_cursors (&rpc->kill_cursors);
break;
case MONGOC_OPCODE_COMPRESSED:
_mongoc_rpc_swab_from_le_compressed (&rpc->compressed);
break;
default:
MONGOC_WARNING ("Unknown rpc type: 0x%08x", rpc->header.opcode);
break;
}
#endif
#if 0
- _mongoc_rpc_printf (&rpc);
+ _mongoc_rpc_printf (rpc);
#endif
}
void
_mongoc_rpc_printf (mongoc_rpc_t *rpc)
{
switch ((mongoc_opcode_t) rpc->header.opcode) {
case MONGOC_OPCODE_REPLY:
_mongoc_rpc_printf_reply (&rpc->reply);
break;
case MONGOC_OPCODE_MSG:
_mongoc_rpc_printf_msg (&rpc->msg);
break;
case MONGOC_OPCODE_UPDATE:
_mongoc_rpc_printf_update (&rpc->update);
break;
case MONGOC_OPCODE_INSERT:
_mongoc_rpc_printf_insert (&rpc->insert);
break;
case MONGOC_OPCODE_QUERY:
_mongoc_rpc_printf_query (&rpc->query);
break;
case MONGOC_OPCODE_GET_MORE:
_mongoc_rpc_printf_get_more (&rpc->get_more);
break;
case MONGOC_OPCODE_DELETE:
_mongoc_rpc_printf_delete (&rpc->delete_);
break;
case MONGOC_OPCODE_KILL_CURSORS:
_mongoc_rpc_printf_kill_cursors (&rpc->kill_cursors);
break;
case MONGOC_OPCODE_COMPRESSED:
_mongoc_rpc_printf_compressed (&rpc->compressed);
break;
default:
MONGOC_WARNING ("Unknown rpc type: 0x%08x", rpc->header.opcode);
break;
}
+ printf ("\n");
}
/*
*--------------------------------------------------------------------------
*
* _mongoc_rpc_decompress --
*
* Takes a (little endian) rpc struct assumed to be OP_COMPRESSED
* and decompresses the opcode into its original opcode.
* The in-place updated rpc struct remains little endian.
*
* Side effects:
* Overwrites the RPC, along with the provided buf with the
* compressed results.
*
*--------------------------------------------------------------------------
*/
bool
_mongoc_rpc_decompress (mongoc_rpc_t *rpc_le, uint8_t *buf, size_t buflen)
{
size_t uncompressed_size =
BSON_UINT32_FROM_LE (rpc_le->compressed.uncompressed_size);
bool ok;
size_t msg_len = BSON_UINT32_TO_LE (buflen);
BSON_ASSERT (uncompressed_size <= buflen);
memcpy (buf, (void *) (&msg_len), 4);
memcpy (buf + 4, (void *) (&rpc_le->header.request_id), 4);
memcpy (buf + 8, (void *) (&rpc_le->header.response_to), 4);
memcpy (buf + 12, (void *) (&rpc_le->compressed.original_opcode), 4);
ok = mongoc_uncompress (rpc_le->compressed.compressor_id,
rpc_le->compressed.compressed_message,
rpc_le->compressed.compressed_message_len,
buf + 16,
&uncompressed_size);
if (ok) {
return _mongoc_rpc_scatter (rpc_le, buf, buflen);
}
return false;
}
/*
*--------------------------------------------------------------------------
*
* _mongoc_rpc_compress --
*
* Takes a (little endian) rpc struct and creates a OP_COMPRESSED
* compressed opcode based on the provided compressor_id.
* The in-place updated rpc struct remains little endian.
*
* Side effects:
* Overwrites the RPC, and clears and overwrites the cluster buffer
* with the compressed results.
*
*--------------------------------------------------------------------------
*/
char *
_mongoc_rpc_compress (struct _mongoc_cluster_t *cluster,
int32_t compressor_id,
mongoc_rpc_t *rpc_le,
bson_error_t *error)
{
char *output;
size_t output_length = 0;
size_t allocate = BSON_UINT32_FROM_LE (rpc_le->header.msg_len) - 16;
char *data;
int size;
int32_t compression_level = -1;
if (compressor_id == MONGOC_COMPRESSOR_ZLIB_ID) {
compression_level = mongoc_uri_get_option_as_int32 (
cluster->uri, MONGOC_URI_ZLIBCOMPRESSIONLEVEL, -1);
}
BSON_ASSERT (allocate > 0);
data = bson_malloc0 (allocate);
size = _mongoc_cluster_buffer_iovec (
cluster->iov.data, cluster->iov.len, 16, data);
BSON_ASSERT (size);
output_length =
mongoc_compressor_max_compressed_length (compressor_id, size);
if (!output_length) {
bson_set_error (error,
MONGOC_ERROR_COMMAND,
MONGOC_ERROR_COMMAND_INVALID_ARG,
"Could not determine compression bounds for %s",
mongoc_compressor_id_to_name (compressor_id));
bson_free (data);
return NULL;
}
output = (char *) bson_malloc0 (output_length);
if (mongoc_compress (compressor_id,
compression_level,
data,
size,
output,
&output_length)) {
rpc_le->header.msg_len = 0;
rpc_le->compressed.original_opcode =
BSON_UINT32_FROM_LE (rpc_le->header.opcode);
rpc_le->header.opcode = MONGOC_OPCODE_COMPRESSED;
rpc_le->header.request_id =
BSON_UINT32_FROM_LE (rpc_le->header.request_id);
rpc_le->header.response_to =
BSON_UINT32_FROM_LE (rpc_le->header.response_to);
rpc_le->compressed.uncompressed_size = size;
rpc_le->compressed.compressor_id = compressor_id;
rpc_le->compressed.compressed_message = (const uint8_t *) output;
rpc_le->compressed.compressed_message_len = output_length;
bson_free (data);
_mongoc_array_destroy (&cluster->iov);
_mongoc_array_init (&cluster->iov, sizeof (mongoc_iovec_t));
_mongoc_rpc_gather (rpc_le, &cluster->iov);
_mongoc_rpc_swab_to_le (rpc_le);
return output;
} else {
MONGOC_WARNING ("Could not compress data with %s",
mongoc_compressor_id_to_name (compressor_id));
}
bson_free (data);
bson_free (output);
return NULL;
}
/*
*--------------------------------------------------------------------------
*
* _mongoc_rpc_scatter --
*
* Takes a (little endian) rpc struct and scatters the buffer.
* Caller should check if resulting opcode is OP_COMPRESSED
* BEFORE swabbing to native endianness.
*
*--------------------------------------------------------------------------
*/
bool
_mongoc_rpc_scatter (mongoc_rpc_t *rpc, const uint8_t *buf, size_t buflen)
{
mongoc_opcode_t opcode;
memset (rpc, 0, sizeof *rpc);
if (BSON_UNLIKELY (buflen < 16)) {
return false;
}
+ mongoc_counter_op_ingress_total_inc ();
if (!_mongoc_rpc_scatter_header (&rpc->header, buf, 16)) {
return false;
}
opcode = (mongoc_opcode_t) BSON_UINT32_FROM_LE (rpc->header.opcode);
switch (opcode) {
case MONGOC_OPCODE_COMPRESSED:
+ mongoc_counter_op_ingress_compressed_inc ();
return _mongoc_rpc_scatter_compressed (&rpc->compressed, buf, buflen);
+
case MONGOC_OPCODE_REPLY:
+ mongoc_counter_op_ingress_reply_inc ();
return _mongoc_rpc_scatter_reply (&rpc->reply, buf, buflen);
+
case MONGOC_OPCODE_MSG:
+ mongoc_counter_op_ingress_msg_inc ();
return _mongoc_rpc_scatter_msg (&rpc->msg, buf, buflen);
+
+
+ /* useless, we are never *getting* these opcodes */
case MONGOC_OPCODE_UPDATE:
return _mongoc_rpc_scatter_update (&rpc->update, buf, buflen);
+
case MONGOC_OPCODE_INSERT:
return _mongoc_rpc_scatter_insert (&rpc->insert, buf, buflen);
+
case MONGOC_OPCODE_QUERY:
return _mongoc_rpc_scatter_query (&rpc->query, buf, buflen);
+
case MONGOC_OPCODE_GET_MORE:
return _mongoc_rpc_scatter_get_more (&rpc->get_more, buf, buflen);
+
case MONGOC_OPCODE_DELETE:
return _mongoc_rpc_scatter_delete (&rpc->delete_, buf, buflen);
+
case MONGOC_OPCODE_KILL_CURSORS:
return _mongoc_rpc_scatter_kill_cursors (&rpc->kill_cursors, buf, buflen);
+
default:
MONGOC_WARNING ("Unknown rpc type: 0x%08x", opcode);
return false;
}
}
bool
_mongoc_rpc_scatter_reply_header_only (mongoc_rpc_t *rpc,
const uint8_t *buf,
size_t buflen)
{
if (BSON_UNLIKELY (buflen < sizeof (mongoc_rpc_reply_header_t))) {
return false;
}
return _mongoc_rpc_scatter_reply_header (&rpc->reply_header, buf, buflen);
}
bool
_mongoc_rpc_get_first_document (mongoc_rpc_t *rpc, bson_t *reply)
{
if (rpc->header.opcode == MONGOC_OPCODE_REPLY &&
_mongoc_rpc_reply_get_first (&rpc->reply, reply)) {
return true;
}
return false;
}
bool
_mongoc_rpc_reply_get_first (mongoc_rpc_reply_t *reply, bson_t *bson)
{
int32_t len;
if (!reply->documents || reply->documents_len < 4) {
return false;
}
memcpy (&len, reply->documents, 4);
len = BSON_UINT32_FROM_LE (len);
if (reply->documents_len < len) {
return false;
}
return bson_init_static (bson, reply->documents, len);
}
-/*
- *--------------------------------------------------------------------------
- *
- * _mongoc_rpc_needs_gle --
- *
- * Checks to see if an rpc requires a getlasterror command to
- * determine the success of the rpc.
- *
- * The write_concern is checked to ensure that the caller wants
- * to know about a failure.
- *
- * Returns:
- * true if a getlasterror should be delivered; otherwise false.
- *
- * Side effects:
- * None.
- *
- *--------------------------------------------------------------------------
- */
-
-bool
-_mongoc_rpc_needs_gle (mongoc_rpc_t *rpc,
- const mongoc_write_concern_t *write_concern)
-{
- switch (rpc->header.opcode) {
- case MONGOC_OPCODE_REPLY:
- case MONGOC_OPCODE_QUERY:
- case MONGOC_OPCODE_MSG:
- case MONGOC_OPCODE_GET_MORE:
- case MONGOC_OPCODE_KILL_CURSORS:
- case MONGOC_OPCODE_COMPRESSED:
- return false;
- case MONGOC_OPCODE_INSERT:
- case MONGOC_OPCODE_UPDATE:
- case MONGOC_OPCODE_DELETE:
- default:
- break;
- }
-
- if (!write_concern || !mongoc_write_concern_get_w (write_concern)) {
- return false;
- }
-
- return true;
-}
-
/*
*--------------------------------------------------------------------------
*
* _mongoc_rpc_prep_command --
*
* Prepare an RPC for mongoc_cluster_run_command_rpc. @cmd_ns and
* @cmd must not be freed or modified while the RPC is in use.
*
* Side effects:
* Fills out the RPC, including pointers into @cmd_ns and @command.
*
*--------------------------------------------------------------------------
*/
void
_mongoc_rpc_prep_command (mongoc_rpc_t *rpc,
const char *cmd_ns,
mongoc_cmd_t *cmd)
{
rpc->header.msg_len = 0;
rpc->header.request_id = 0;
rpc->header.response_to = 0;
rpc->header.opcode = MONGOC_OPCODE_QUERY;
rpc->query.collection = cmd_ns;
rpc->query.skip = 0;
rpc->query.n_return = -1;
rpc->query.fields = NULL;
rpc->query.query = bson_get_data (cmd->command);
/* Find, getMore And killCursors Commands Spec: "When sending a find command
* rather than a legacy OP_QUERY find, only the slaveOk flag is honored."
* For other cursor-typed commands like aggregate, only slaveOk can be set.
* Clear bits except slaveOk; leave slaveOk set only if it is already.
*/
rpc->query.flags = cmd->query_flags & MONGOC_QUERY_SLAVE_OK;
}
/*
*--------------------------------------------------------------------------
*
* _mongoc_cmd_check_ok --
*
* Check if a server reply document is an error message.
* Optionally fill out a bson_error_t from the server error.
* Does *not* check for writeConcernError.
*
* Returns:
* false if @doc is an error message, true otherwise.
*
* Side effects:
* If @doc is an error reply and @error is not NULL, set its
* domain, code, and message.
*
*--------------------------------------------------------------------------
*/
bool
_mongoc_cmd_check_ok (const bson_t *doc,
int32_t error_api_version,
bson_error_t *error)
{
mongoc_error_domain_t domain =
error_api_version >= MONGOC_ERROR_API_VERSION_2 ? MONGOC_ERROR_SERVER
: MONGOC_ERROR_QUERY;
uint32_t code = MONGOC_ERROR_QUERY_FAILURE;
bson_iter_t iter;
const char *msg = "Unknown command error";
ENTRY;
BSON_ASSERT (doc);
if (bson_iter_init_find (&iter, doc, "ok") && bson_iter_as_bool (&iter)) {
/* no error */
RETURN (true);
}
if (bson_iter_init_find (&iter, doc, "code") &&
BSON_ITER_HOLDS_INT32 (&iter)) {
code = (uint32_t) bson_iter_int32 (&iter);
}
if (code == MONGOC_ERROR_PROTOCOL_ERROR || code == 13390) {
code = MONGOC_ERROR_QUERY_COMMAND_NOT_FOUND;
}
if (bson_iter_init_find (&iter, doc, "errmsg") &&
BSON_ITER_HOLDS_UTF8 (&iter)) {
msg = bson_iter_utf8 (&iter, NULL);
} else if (bson_iter_init_find (&iter, doc, "$err") &&
BSON_ITER_HOLDS_UTF8 (&iter)) {
msg = bson_iter_utf8 (&iter, NULL);
}
bson_set_error (error, domain, code, "%s", msg);
/* there was a command error */
RETURN (false);
}
/* helper function to parse error reply document to an OP_QUERY */
static void
_mongoc_populate_query_error (const bson_t *doc,
int32_t error_api_version,
bson_error_t *error)
{
mongoc_error_domain_t domain =
error_api_version >= MONGOC_ERROR_API_VERSION_2 ? MONGOC_ERROR_SERVER
: MONGOC_ERROR_QUERY;
uint32_t code = MONGOC_ERROR_QUERY_FAILURE;
bson_iter_t iter;
const char *msg = "Unknown query failure";
ENTRY;
BSON_ASSERT (doc);
if (bson_iter_init_find (&iter, doc, "code") &&
BSON_ITER_HOLDS_INT32 (&iter)) {
code = (uint32_t) bson_iter_int32 (&iter);
}
if (bson_iter_init_find (&iter, doc, "$err") &&
BSON_ITER_HOLDS_UTF8 (&iter)) {
msg = bson_iter_utf8 (&iter, NULL);
}
bson_set_error (error, domain, code, "%s", msg);
EXIT;
}
/*
*--------------------------------------------------------------------------
*
* _mongoc_rpc_check_ok --
*
* Check if a server OP_REPLY is an error message.
* Optionally fill out a bson_error_t from the server error.
* @error_document must be an initialized bson_t or NULL.
* Does *not* check for writeConcernError.
*
* Returns:
* false if the reply is an error message, true otherwise.
*
* Side effects:
* If rpc is an error reply and @error is not NULL, set its
* domain, code, and message.
*
* If rpc is an error reply and @error_document is not NULL,
* it is reinitialized with the server reply.
*
*--------------------------------------------------------------------------
*/
bool
_mongoc_rpc_check_ok (mongoc_rpc_t *rpc,
- bool is_command,
int32_t error_api_version,
bson_error_t *error /* OUT */,
bson_t *error_doc /* OUT */)
{
bson_t b;
- bool r;
ENTRY;
BSON_ASSERT (rpc);
if (rpc->header.opcode != MONGOC_OPCODE_REPLY) {
bson_set_error (error,
MONGOC_ERROR_PROTOCOL,
MONGOC_ERROR_PROTOCOL_INVALID_REPLY,
"Received rpc other than OP_REPLY.");
RETURN (false);
}
- if (is_command) {
- if (rpc->reply.n_returned != 1) {
- bson_set_error (error,
- MONGOC_ERROR_PROTOCOL,
- MONGOC_ERROR_PROTOCOL_INVALID_REPLY,
- "Expected only one reply document, got %d",
- rpc->reply.n_returned);
- RETURN (false);
- }
-
- if (_mongoc_rpc_get_first_document (rpc, &b)) {
- r = _mongoc_cmd_check_ok (&b, error_api_version, error);
- if (!r && error_doc) {
- bson_destroy (error_doc);
- bson_copy_to (&b, error_doc);
- }
-
- bson_destroy (&b);
- RETURN (r);
- } else {
- bson_set_error (error,
- MONGOC_ERROR_BSON,
- MONGOC_ERROR_BSON_INVALID,
- "Failed to decode document from the server.");
- RETURN (false);
- }
- } else if (rpc->reply.flags & MONGOC_REPLY_QUERY_FAILURE) {
+ if (rpc->reply.flags & MONGOC_REPLY_QUERY_FAILURE) {
if (_mongoc_rpc_get_first_document (rpc, &b)) {
_mongoc_populate_query_error (&b, error_api_version, error);
if (error_doc) {
bson_destroy (error_doc);
bson_copy_to (&b, error_doc);
}
bson_destroy (&b);
} else {
bson_set_error (error,
MONGOC_ERROR_QUERY,
MONGOC_ERROR_QUERY_FAILURE,
"Unknown query failure.");
}
RETURN (false);
} else if (rpc->reply.flags & MONGOC_REPLY_CURSOR_NOT_FOUND) {
bson_set_error (error,
MONGOC_ERROR_CURSOR,
MONGOC_ERROR_CURSOR_INVALID_CURSOR,
"The cursor is invalid or has expired.");
RETURN (false);
}
RETURN (true);
}
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-sasl-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-sasl-private.h
similarity index 85%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-sasl-private.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-sasl-private.h
index 5e3d8e70..bbc91918 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-sasl-private.h
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-sasl-private.h
@@ -1,66 +1,61 @@
/*
* Copyright 2017 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MONGOC_SASL_PRIVATE_H
#define MONGOC_SASL_PRIVATE_H
#if !defined(MONGOC_COMPILATION)
#error "Only <mongoc.h> can be included directly."
#endif
#include <bson.h>
#include "mongoc-uri.h"
#include "mongoc-stream-private.h"
#include "mongoc-stream.h"
#include "mongoc-stream-socket.h"
BSON_BEGIN_DECLS
typedef struct {
char *user;
char *pass;
char *service_name;
char *service_host;
bool canonicalize_host_name;
char *mechanism;
} mongoc_sasl_t;
void
_mongoc_sasl_set_pass (mongoc_sasl_t *sasl, const char *pass);
void
_mongoc_sasl_set_user (mongoc_sasl_t *sasl, const char *user);
-bool
-_mongoc_sasl_set_mechanism (mongoc_sasl_t *sasl,
- const char *mechanism,
- bson_error_t *error);
void
_mongoc_sasl_set_service_name (mongoc_sasl_t *sasl, const char *service_name);
void
_mongoc_sasl_set_service_host (mongoc_sasl_t *sasl, const char *service_host);
void
_mongoc_sasl_set_properties (mongoc_sasl_t *sasl, const mongoc_uri_t *uri);
bool
_mongoc_sasl_get_canonicalized_name (mongoc_stream_t *node_stream, /* IN */
char *name, /* OUT */
- size_t namelen, /* IN */
- bson_error_t *error); /* OUT */
+ size_t namelen); /* IN */
BSON_END_DECLS
#endif /* MONGOC_SASL_PRIVATE_H */
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-sasl.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-sasl.c
similarity index 92%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-sasl.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-sasl.c
index 266c4fb7..5c7afceb 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-sasl.c
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-sasl.c
@@ -1,193 +1,182 @@
/*
* Copyright 2017 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "mongoc-config.h"
#ifdef MONGOC_ENABLE_SASL
#include "mongoc-sasl-private.h"
#include "mongoc-util-private.h"
#include "mongoc-trace-private.h"
+#include "mongoc-change-stream-private.h"
#undef MONGOC_LOG_DOMAIN
#define MONGOC_LOG_DOMAIN "SASL"
void
_mongoc_sasl_set_user (mongoc_sasl_t *sasl, const char *user)
{
BSON_ASSERT (sasl);
bson_free (sasl->user);
sasl->user = user ? bson_strdup (user) : NULL;
}
void
_mongoc_sasl_set_pass (mongoc_sasl_t *sasl, const char *pass)
{
BSON_ASSERT (sasl);
bson_free (sasl->pass);
sasl->pass = pass ? bson_strdup (pass) : NULL;
}
void
_mongoc_sasl_set_service_host (mongoc_sasl_t *sasl, const char *service_host)
{
BSON_ASSERT (sasl);
bson_free (sasl->service_host);
sasl->service_host = service_host ? bson_strdup (service_host) : NULL;
}
void
_mongoc_sasl_set_service_name (mongoc_sasl_t *sasl, const char *service_name)
{
BSON_ASSERT (sasl);
bson_free (sasl->service_name);
sasl->service_name = service_name ? bson_strdup (service_name) : NULL;
}
void
_mongoc_sasl_set_properties (mongoc_sasl_t *sasl, const mongoc_uri_t *uri)
{
const bson_t *options;
bson_iter_t iter;
bson_t properties;
const char *service_name = NULL;
bool canonicalize = false;
options = mongoc_uri_get_options (uri);
if (!mongoc_uri_get_mechanism_properties (uri, &properties)) {
bson_init (&properties);
}
if (bson_iter_init_find_case (
&iter, options, MONGOC_URI_GSSAPISERVICENAME) &&
BSON_ITER_HOLDS_UTF8 (&iter)) {
service_name = bson_iter_utf8 (&iter, NULL);
}
if (bson_iter_init_find_case (&iter, &properties, "SERVICE_NAME") &&
BSON_ITER_HOLDS_UTF8 (&iter)) {
/* newer "authMechanismProperties" URI syntax takes precedence */
service_name = bson_iter_utf8 (&iter, NULL);
}
_mongoc_sasl_set_service_name (sasl, service_name);
/*
* Driver Authentication Spec: "Drivers MAY allow the user to request
* canonicalization of the hostname. This might be required when the hosts
* report different hostnames than what is used in the kerberos database.
* The default is "false".
*
* Some underlying GSSAPI layers will do this for us, but can be disabled in
* their config (krb.conf).
*
* See CDRIVER-323 for more information.
*/
if (bson_iter_init_find_case (
&iter, options, MONGOC_URI_CANONICALIZEHOSTNAME) &&
BSON_ITER_HOLDS_BOOL (&iter)) {
canonicalize = bson_iter_bool (&iter);
}
if (bson_iter_init_find_case (
&iter, &properties, "CANONICALIZE_HOST_NAME") &&
BSON_ITER_HOLDS_UTF8 (&iter)) {
/* newer "authMechanismProperties" URI syntax takes precedence */
canonicalize = !strcasecmp (bson_iter_utf8 (&iter, NULL), "true");
}
sasl->canonicalize_host_name = canonicalize;
bson_destroy (&properties);
}
/*
*--------------------------------------------------------------------------
*
* _mongoc_sasl_get_canonicalized_name --
*
* Query the node to get the canonicalized name. This may happen if
* the node has been accessed via an alias.
*
* The gssapi code will use this if canonicalizeHostname is true.
*
* Some underlying layers of krb might do this for us, but they can
* be disabled in krb.conf.
*
* Returns:
* None.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
bool
_mongoc_sasl_get_canonicalized_name (mongoc_stream_t *node_stream, /* IN */
char *name, /* OUT */
- size_t namelen, /* IN */
- bson_error_t *error) /* OUT */
+ size_t namelen) /* OUT */
{
mongoc_stream_t *stream;
- mongoc_stream_t *tmp;
mongoc_socket_t *sock = NULL;
char *canonicalized;
ENTRY;
BSON_ASSERT (node_stream);
BSON_ASSERT (name);
- /*
- * Find the underlying socket used in the stream chain.
- */
- for (stream = node_stream; stream;) {
- if ((tmp = mongoc_stream_get_base_stream (stream))) {
- stream = tmp;
- continue;
- }
- break;
- }
-
+ stream = mongoc_stream_get_root_stream (node_stream);
BSON_ASSERT (stream);
if (stream->type == MONGOC_STREAM_SOCKET) {
sock =
mongoc_stream_socket_get_socket ((mongoc_stream_socket_t *) stream);
if (sock) {
canonicalized = mongoc_socket_getnameinfo (sock);
if (canonicalized) {
bson_snprintf (name, namelen, "%s", canonicalized);
bson_free (canonicalized);
RETURN (true);
}
}
}
RETURN (false);
}
#endif
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-scram-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-scram-private.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-scram-private.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-scram-private.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-scram.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-scram.c
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-scram.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-scram.c
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-secure-channel-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-secure-channel-private.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-secure-channel-private.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-secure-channel-private.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-secure-channel.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-secure-channel.c
similarity index 98%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-secure-channel.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-secure-channel.c
index fbd4ea5d..104998b0 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-secure-channel.c
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-secure-channel.c
@@ -1,962 +1,962 @@
/*
* Copyright 2016 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "mongoc-config.h"
#ifdef MONGOC_ENABLE_SSL_SECURE_CHANNEL
#include <bson.h>
#include "mongoc-log.h"
#include "mongoc-trace-private.h"
#include "mongoc-ssl.h"
#include "mongoc-stream-tls.h"
#include "mongoc-stream-tls-private.h"
#include "mongoc-secure-channel-private.h"
#include "mongoc-stream-tls-secure-channel-private.h"
#include "mongoc-errno-private.h"
#undef MONGOC_LOG_DOMAIN
#define MONGOC_LOG_DOMAIN "stream-secure-channel"
/* mingw doesn't define this */
#ifndef SECBUFFER_ALERT
#define SECBUFFER_ALERT 17
#endif
PCCERT_CONTEXT
mongoc_secure_channel_setup_certificate_from_file (const char *filename)
{
char *pem;
FILE *file;
bool success;
HCRYPTKEY hKey;
long pem_length;
HCRYPTPROV provider;
CERT_BLOB public_blob;
const char *pem_public;
const char *pem_private;
LPBYTE blob_private = NULL;
PCCERT_CONTEXT cert = NULL;
DWORD blob_private_len = 0;
HCERTSTORE cert_store = NULL;
DWORD encrypted_cert_len = 0;
LPBYTE encrypted_cert = NULL;
DWORD encrypted_private_len = 0;
LPBYTE encrypted_private = NULL;
file = fopen (filename, "rb");
if (!file) {
MONGOC_ERROR ("Couldn't open file '%s'", filename);
return false;
}
fseek (file, 0, SEEK_END);
pem_length = ftell (file);
fseek (file, 0, SEEK_SET);
if (pem_length < 1) {
MONGOC_ERROR ("Couldn't determine file size of '%s'", filename);
return false;
}
pem = (char *) bson_malloc0 (pem_length);
fread ((void *) pem, 1, pem_length, file);
fclose (file);
pem_public = strstr (pem, "-----BEGIN CERTIFICATE-----");
pem_private = strstr (pem, "-----BEGIN ENCRYPTED PRIVATE KEY-----");
if (pem_private) {
MONGOC_ERROR ("Detected unsupported encrypted private key");
goto fail;
}
pem_private = strstr (pem, "-----BEGIN RSA PRIVATE KEY-----");
if (!pem_private) {
pem_private = strstr (pem, "-----BEGIN PRIVATE KEY-----");
}
if (!pem_private) {
MONGOC_ERROR ("Can't find private key in '%s'", filename);
goto fail;
}
public_blob.cbData = (DWORD) strlen (pem_public);
public_blob.pbData = (BYTE *) pem_public;
/* https://msdn.microsoft.com/en-us/library/windows/desktop/aa380264%28v=vs.85%29.aspx
*/
CryptQueryObject (
CERT_QUERY_OBJECT_BLOB, /* dwObjectType, blob or file */
&public_blob, /* pvObject, Unicode filename */
CERT_QUERY_CONTENT_FLAG_ALL, /* dwExpectedContentTypeFlags */
CERT_QUERY_FORMAT_FLAG_ALL, /* dwExpectedFormatTypeFlags */
0, /* dwFlags, reserved for "future use" */
NULL, /* pdwMsgAndCertEncodingType, OUT, unused */
NULL, /* pdwContentType (dwExpectedContentTypeFlags), OUT, unused */
NULL, /* pdwFormatType (dwExpectedFormatTypeFlags,), OUT, unused */
NULL, /* phCertStore, OUT, HCERTSTORE.., unused, for now */
NULL, /* phMsg, OUT, HCRYPTMSG, only for PKC7, unused */
(const void **) &cert /* ppvContext, OUT, the Certificate Context */
);
if (!cert) {
MONGOC_ERROR ("Failed to extract public key from '%s'. Error 0x%.8X",
filename,
GetLastError ());
goto fail;
}
/* https://msdn.microsoft.com/en-us/library/windows/desktop/aa380285%28v=vs.85%29.aspx
*/
success =
CryptStringToBinaryA (pem_private, /* pszString */
0, /* cchString */
CRYPT_STRING_BASE64HEADER, /* dwFlags */
NULL, /* pbBinary */
&encrypted_private_len, /* pcBinary, IN/OUT */
NULL, /* pdwSkip */
NULL); /* pdwFlags */
if (!success) {
MONGOC_ERROR ("Failed to convert base64 private key. Error 0x%.8X",
GetLastError ());
goto fail;
}
encrypted_private = (LPBYTE) bson_malloc0 (encrypted_private_len);
success = CryptStringToBinaryA (pem_private,
0,
CRYPT_STRING_BASE64HEADER,
encrypted_private,
&encrypted_private_len,
NULL,
NULL);
if (!success) {
MONGOC_ERROR ("Failed to convert base64 private key. Error 0x%.8X",
GetLastError ());
goto fail;
}
/* https://msdn.microsoft.com/en-us/library/windows/desktop/aa379912%28v=vs.85%29.aspx
*/
success = CryptDecodeObjectEx (
X509_ASN_ENCODING | PKCS_7_ASN_ENCODING, /* dwCertEncodingType */
PKCS_RSA_PRIVATE_KEY, /* lpszStructType */
encrypted_private, /* pbEncoded */
encrypted_private_len, /* cbEncoded */
0, /* dwFlags */
NULL, /* pDecodePara */
NULL, /* pvStructInfo */
&blob_private_len); /* pcbStructInfo */
if (!success) {
LPTSTR msg = NULL;
FormatMessage (FORMAT_MESSAGE_ALLOCATE_BUFFER |
FORMAT_MESSAGE_FROM_SYSTEM |
FORMAT_MESSAGE_ARGUMENT_ARRAY,
NULL,
GetLastError (),
LANG_NEUTRAL,
(LPTSTR) &msg,
0,
NULL);
MONGOC_ERROR (
"Failed to parse private key. %s (0x%.8X)", msg, GetLastError ());
LocalFree (msg);
goto fail;
}
blob_private = (LPBYTE) bson_malloc0 (blob_private_len);
success = CryptDecodeObjectEx (X509_ASN_ENCODING | PKCS_7_ASN_ENCODING,
PKCS_RSA_PRIVATE_KEY,
encrypted_private,
encrypted_private_len,
0,
NULL,
blob_private,
&blob_private_len);
if (!success) {
MONGOC_ERROR ("Failed to parse private key. Error 0x%.8X",
GetLastError ());
goto fail;
}
/* https://msdn.microsoft.com/en-us/library/windows/desktop/aa379886%28v=vs.85%29.aspx
*/
success = CryptAcquireContext (&provider, /* phProv */
NULL, /* pszContainer */
MS_ENHANCED_PROV, /* pszProvider */
PROV_RSA_FULL, /* dwProvType */
CRYPT_VERIFYCONTEXT); /* dwFlags */
if (!success) {
MONGOC_ERROR ("CryptAcquireContext failed with error 0x%.8X",
GetLastError ());
goto fail;
}
/* https://msdn.microsoft.com/en-us/library/windows/desktop/aa380207%28v=vs.85%29.aspx
*/
success = CryptImportKey (provider, /* hProv */
blob_private, /* pbData */
blob_private_len, /* dwDataLen */
0, /* hPubKey */
0, /* dwFlags */
&hKey); /* phKey, OUT */
if (!success) {
MONGOC_ERROR ("CryptImportKey for private key failed with error 0x%.8X",
GetLastError ());
goto fail;
}
/* https://msdn.microsoft.com/en-us/library/windows/desktop/aa376573%28v=vs.85%29.aspx
*/
success = CertSetCertificateContextProperty (
cert, /* pCertContext */
CERT_KEY_PROV_HANDLE_PROP_ID, /* dwPropId */
0, /* dwFlags */
(const void *) provider); /* pvData */
if (success) {
- TRACE ("Successfully loaded client certificate");
+ TRACE ("%s", "Successfully loaded client certificate");
return cert;
}
MONGOC_ERROR ("Can't associate private key with public key: 0x%.8X",
GetLastError ());
fail:
SecureZeroMemory (pem, pem_length);
bson_free (pem);
if (encrypted_private) {
SecureZeroMemory (encrypted_private, encrypted_private_len);
bson_free (encrypted_private);
}
if (blob_private) {
SecureZeroMemory (blob_private, blob_private_len);
bson_free (blob_private);
}
return NULL;
}
PCCERT_CONTEXT
mongoc_secure_channel_setup_certificate (
mongoc_stream_tls_secure_channel_t *secure_channel, mongoc_ssl_opt_t *opt)
{
return mongoc_secure_channel_setup_certificate_from_file (opt->pem_file);
}
void
_bson_append_szoid (bson_string_t *retval,
PCCERT_CONTEXT cert,
const char *label,
void *oid)
{
DWORD oid_len =
CertGetNameString (cert, CERT_NAME_ATTR_TYPE, 0, oid, NULL, 0);
if (oid_len > 1) {
char *tmp = bson_malloc0 (oid_len);
CertGetNameString (cert, CERT_NAME_ATTR_TYPE, 0, oid, tmp, oid_len);
bson_string_append_printf (retval, "%s%s", label, tmp);
bson_free (tmp);
}
}
char *
_mongoc_secure_channel_extract_subject (const char *filename,
const char *passphrase)
{
bson_string_t *retval;
PCCERT_CONTEXT cert;
cert = mongoc_secure_channel_setup_certificate_from_file (filename);
if (!cert) {
return NULL;
}
retval = bson_string_new ("");
;
_bson_append_szoid (retval, cert, "C=", szOID_COUNTRY_NAME);
_bson_append_szoid (retval, cert, ",ST=", szOID_STATE_OR_PROVINCE_NAME);
_bson_append_szoid (retval, cert, ",L=", szOID_LOCALITY_NAME);
_bson_append_szoid (retval, cert, ",O=", szOID_ORGANIZATION_NAME);
_bson_append_szoid (retval, cert, ",OU=", szOID_ORGANIZATIONAL_UNIT_NAME);
_bson_append_szoid (retval, cert, ",CN=", szOID_COMMON_NAME);
_bson_append_szoid (retval, cert, ",STREET=", szOID_STREET_ADDRESS);
return bson_string_free (retval, false);
}
bool
mongoc_secure_channel_setup_ca (
mongoc_stream_tls_secure_channel_t *secure_channel, mongoc_ssl_opt_t *opt)
{
FILE *file;
long length;
const char *pem_key;
HCERTSTORE cert_store = NULL;
PCCERT_CONTEXT cert = NULL;
DWORD encrypted_cert_len = 0;
LPBYTE encrypted_cert = NULL;
file = fopen (opt->ca_file, "rb");
if (!file) {
MONGOC_WARNING ("Couldn't open file '%s'", opt->ca_file);
return false;
}
fseek (file, 0, SEEK_END);
length = ftell (file);
fseek (file, 0, SEEK_SET);
if (length < 1) {
MONGOC_WARNING ("Couldn't determine file size of '%s'", opt->ca_file);
return false;
}
pem_key = (const char *) bson_malloc0 (length);
fread ((void *) pem_key, 1, length, file);
fclose (file);
/* If we have private keys or other fuzz, seek to the good stuff */
pem_key = strstr (pem_key, "-----BEGIN CERTIFICATE-----");
/*printf ("%s\n", pem_key);*/
if (!pem_key) {
MONGOC_WARNING ("Couldn't find certificate in '%d'", opt->ca_file);
return false;
}
if (!CryptStringToBinaryA (pem_key,
0,
CRYPT_STRING_BASE64HEADER,
NULL,
&encrypted_cert_len,
NULL,
NULL)) {
MONGOC_ERROR ("Failed to convert BASE64 public key. Error 0x%.8X",
GetLastError ());
return false;
}
encrypted_cert = (LPBYTE) LocalAlloc (0, encrypted_cert_len);
if (!CryptStringToBinaryA (pem_key,
0,
CRYPT_STRING_BASE64HEADER,
encrypted_cert,
&encrypted_cert_len,
NULL,
NULL)) {
MONGOC_ERROR ("Failed to convert BASE64 public key. Error 0x%.8X",
GetLastError ());
return false;
}
cert = CertCreateCertificateContext (
X509_ASN_ENCODING, encrypted_cert, encrypted_cert_len);
if (!cert) {
MONGOC_WARNING ("Could not convert certificate");
return false;
}
cert_store = CertOpenStore (
CERT_STORE_PROV_SYSTEM, /* provider */
X509_ASN_ENCODING | PKCS_7_ASN_ENCODING, /* certificate encoding */
0, /* unused */
CERT_SYSTEM_STORE_LOCAL_MACHINE, /* dwFlags */
L"Root"); /* system store name. "My" or "Root" */
if (cert_store == NULL) {
MONGOC_ERROR ("Error opening certificate store");
return false;
}
if (CertAddCertificateContextToStore (
cert_store, cert, CERT_STORE_ADD_USE_EXISTING, NULL)) {
- TRACE ("Added the certificate !");
+ TRACE ("%s", "Added the certificate !");
CertCloseStore (cert_store, 0);
return true;
}
MONGOC_WARNING ("Failed adding the cert");
CertCloseStore (cert_store, 0);
return false;
}
bool
mongoc_secure_channel_setup_crl (
mongoc_stream_tls_secure_channel_t *secure_channel, mongoc_ssl_opt_t *opt)
{
HCERTSTORE cert_store = NULL;
PCCERT_CONTEXT cert = NULL;
LPWSTR str;
int chars;
chars = MultiByteToWideChar (CP_ACP, 0, opt->crl_file, -1, NULL, 0);
if (chars < 1) {
MONGOC_WARNING ("Can't determine opt->crl_file length");
return false;
}
str = (LPWSTR) bson_malloc0 (chars);
MultiByteToWideChar (CP_ACP, 0, opt->crl_file, -1, str, chars);
/* https://msdn.microsoft.com/en-us/library/windows/desktop/aa380264%28v=vs.85%29.aspx
*/
CryptQueryObject (
CERT_QUERY_OBJECT_FILE, /* dwObjectType, blob or file */
str, /* pvObject, Unicode filename */
CERT_QUERY_CONTENT_FLAG_CRL, /* dwExpectedContentTypeFlags */
CERT_QUERY_FORMAT_FLAG_ALL, /* dwExpectedFormatTypeFlags */
0, /* dwFlags, reserved for "future use" */
NULL, /* pdwMsgAndCertEncodingType, OUT, unused */
NULL, /* pdwContentType (dwExpectedContentTypeFlags), OUT, unused */
NULL, /* pdwFormatType (dwExpectedFormatTypeFlags,), OUT, unused */
NULL, /* phCertStore, OUT, HCERTSTORE.., unused, for now */
NULL, /* phMsg, OUT, HCRYPTMSG, only for PKC7, unused */
(const void **) &cert /* ppvContext, OUT, the Certificate Context */
);
bson_free (str);
if (!cert) {
MONGOC_WARNING ("Can't extract CRL from '%s'", opt->crl_file);
return false;
}
cert_store = CertOpenStore (
CERT_STORE_PROV_SYSTEM, /* provider */
X509_ASN_ENCODING | PKCS_7_ASN_ENCODING, /* certificate encoding */
0, /* unused */
CERT_SYSTEM_STORE_LOCAL_MACHINE, /* dwFlags */
L"Root"); /* system store name. "My" or "Root" */
if (cert_store == NULL) {
MONGOC_ERROR ("Error opening certificate store");
CertFreeCertificateContext (cert);
return false;
}
if (CertAddCertificateContextToStore (
cert_store, cert, CERT_STORE_ADD_USE_EXISTING, NULL)) {
- TRACE ("Added the certificate !");
+ TRACE ("%s", "Added the certificate !");
CertFreeCertificateContext (cert);
CertCloseStore (cert_store, 0);
return true;
}
MONGOC_WARNING ("Failed adding the cert");
CertFreeCertificateContext (cert);
CertCloseStore (cert_store, 0);
return false;
}
size_t
mongoc_secure_channel_read (mongoc_stream_tls_t *tls,
void *data,
size_t data_length)
{
ssize_t length;
errno = 0;
TRACE ("Wanting to read: %d", data_length);
/* 4th argument is minimum bytes, while the data_length is the
* size of the buffer. We are totally fine with just one TLS record (few
*bytes)
**/
length = mongoc_stream_read (
tls->base_stream, data, data_length, 0, tls->timeout_msec);
TRACE ("Got %d", length);
if (length > 0) {
return length;
}
return 0;
}
size_t
mongoc_secure_channel_write (mongoc_stream_tls_t *tls,
const void *data,
size_t data_length)
{
ssize_t length;
errno = 0;
TRACE ("Wanting to write: %d", data_length);
length = mongoc_stream_write (
tls->base_stream, (void *) data, data_length, tls->timeout_msec);
TRACE ("Wrote: %d", length);
return length;
}
/**
* The follow functions comes from one of my favorite project, cURL!
* Thank you so much for having gone through the Secure Channel pain for me.
*
*
* Copyright (C) 2012 - 2015, Marc Hoersken, <info@marc-hoersken.de>
* Copyright (C) 2012, Mark Salisbury, <mark.salisbury@hp.com>
* Copyright (C) 2012 - 2015, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at https://curl.haxx.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
***************************************************************************/
/*
* Based upon the PolarSSL implementation in polarssl.c and polarssl.h:
* Copyright (C) 2010, 2011, Hoi-Ho Chan, <hoiho.chan@gmail.com>
*
* Based upon the CyaSSL implementation in cyassl.c and cyassl.h:
* Copyright (C) 1998 - 2012, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* Thanks for code and inspiration!
*/
void
_mongoc_secure_channel_init_sec_buffer (SecBuffer *buffer,
unsigned long buf_type,
void *buf_data_ptr,
unsigned long buf_byte_size)
{
buffer->cbBuffer = buf_byte_size;
buffer->BufferType = buf_type;
buffer->pvBuffer = buf_data_ptr;
}
void
_mongoc_secure_channel_init_sec_buffer_desc (SecBufferDesc *desc,
SecBuffer *buffer_array,
unsigned long buffer_count)
{
desc->ulVersion = SECBUFFER_VERSION;
desc->pBuffers = buffer_array;
desc->cBuffers = buffer_count;
}
bool
mongoc_secure_channel_handshake_step_1 (mongoc_stream_tls_t *tls,
char *hostname)
{
SecBuffer outbuf;
ssize_t written = -1;
SecBufferDesc outbuf_desc;
SECURITY_STATUS sspi_status = SEC_E_OK;
mongoc_stream_tls_secure_channel_t *secure_channel =
(mongoc_stream_tls_secure_channel_t *) tls->ctx;
TRACE ("SSL/TLS connection with '%s' (step 1/3)", hostname);
/* setup output buffer */
_mongoc_secure_channel_init_sec_buffer (&outbuf, SECBUFFER_EMPTY, NULL, 0);
_mongoc_secure_channel_init_sec_buffer_desc (&outbuf_desc, &outbuf, 1);
/* setup request flags */
secure_channel->req_flags = ISC_REQ_SEQUENCE_DETECT | ISC_REQ_REPLAY_DETECT |
ISC_REQ_CONFIDENTIALITY |
ISC_REQ_ALLOCATE_MEMORY | ISC_REQ_STREAM;
/* allocate memory for the security context handle */
secure_channel->ctxt = (mongoc_secure_channel_ctxt *) bson_malloc0 (
sizeof (mongoc_secure_channel_ctxt));
/* https://msdn.microsoft.com/en-us/library/windows/desktop/aa375924.aspx */
sspi_status = InitializeSecurityContext (
&secure_channel->cred->cred_handle, /* phCredential */
NULL, /* phContext */
hostname, /* pszTargetName */
secure_channel->req_flags, /* fContextReq */
0, /* Reserved1, must be 0 */
0, /* TargetDataRep, unused */
NULL, /* pInput */
0, /* Reserved2, must be 0 */
&secure_channel->ctxt->ctxt_handle, /* phNewContext OUT param */
&outbuf_desc, /* pOutput OUT param */
&secure_channel->ret_flags, /* pfContextAttr OUT param */
&secure_channel->ctxt->time_stamp /* ptsExpiry OUT param */
);
if (sspi_status != SEC_I_CONTINUE_NEEDED) {
MONGOC_ERROR ("initial InitializeSecurityContext failed: %d",
sspi_status);
return false;
}
TRACE ("sending initial handshake data: sending %lu bytes...",
outbuf.cbBuffer);
/* send initial handshake data which is now stored in output buffer */
written =
mongoc_secure_channel_write (tls, outbuf.pvBuffer, outbuf.cbBuffer);
FreeContextBuffer (outbuf.pvBuffer);
if (outbuf.cbBuffer != (size_t) written) {
MONGOC_ERROR ("failed to send initial handshake data: "
"sent %zd of %lu bytes",
written,
outbuf.cbBuffer);
return false;
}
TRACE ("sent initial handshake data: sent %zd bytes", written);
secure_channel->recv_unrecoverable_err = 0;
secure_channel->recv_sspi_close_notify = false;
secure_channel->recv_connection_closed = false;
/* continue to second handshake step */
secure_channel->connecting_state = ssl_connect_2;
return true;
}
bool
mongoc_secure_channel_handshake_step_2 (mongoc_stream_tls_t *tls,
char *hostname)
{
mongoc_stream_tls_secure_channel_t *secure_channel =
(mongoc_stream_tls_secure_channel_t *) tls->ctx;
SECURITY_STATUS sspi_status = SEC_E_OK;
unsigned char *reallocated_buffer;
ssize_t nread = -1, written = -1;
size_t reallocated_length;
SecBufferDesc outbuf_desc;
SecBufferDesc inbuf_desc;
SecBuffer outbuf[3];
SecBuffer inbuf[2];
bool doread;
int i;
doread = (secure_channel->connecting_state != ssl_connect_2_writing) ? true
: false;
- TRACE ("SSL/TLS connection with endpoint (step 2/3)");
+ TRACE ("%s", "SSL/TLS connection with endpoint (step 2/3)");
if (!secure_channel->cred || !secure_channel->ctxt) {
return false;
}
/* buffer to store previously received and decrypted data */
if (secure_channel->decdata_buffer == NULL) {
secure_channel->decdata_offset = 0;
secure_channel->decdata_length = MONGOC_SCHANNEL_BUFFER_INIT_SIZE;
secure_channel->decdata_buffer =
bson_malloc0 (secure_channel->decdata_length);
}
/* buffer to store previously received and encrypted data */
if (secure_channel->encdata_buffer == NULL) {
secure_channel->encdata_offset = 0;
secure_channel->encdata_length = MONGOC_SCHANNEL_BUFFER_INIT_SIZE;
secure_channel->encdata_buffer =
bson_malloc0 (secure_channel->encdata_length);
}
/* if we need a bigger buffer to read a full message, increase buffer now */
if (secure_channel->encdata_length - secure_channel->encdata_offset <
MONGOC_SCHANNEL_BUFFER_FREE_SIZE) {
/* increase internal encrypted data buffer */
reallocated_length =
secure_channel->encdata_offset + MONGOC_SCHANNEL_BUFFER_FREE_SIZE;
reallocated_buffer =
bson_realloc (secure_channel->encdata_buffer, reallocated_length);
secure_channel->encdata_buffer = reallocated_buffer;
secure_channel->encdata_length = reallocated_length;
}
for (;;) {
if (doread) {
/* read encrypted handshake data from socket */
nread = mongoc_secure_channel_read (
tls,
(char *) (secure_channel->encdata_buffer +
secure_channel->encdata_offset),
secure_channel->encdata_length - secure_channel->encdata_offset);
if (!nread) {
if (MONGOC_ERRNO_IS_AGAIN (errno)) {
if (secure_channel->connecting_state != ssl_connect_2_writing) {
secure_channel->connecting_state = ssl_connect_2_reading;
}
- TRACE ("failed to receive handshake, need more data");
+ TRACE ("%s", "failed to receive handshake, need more data");
return true;
}
MONGOC_ERROR (
"failed to receive handshake, SSL/TLS connection failed");
return false;
}
/* increase encrypted data buffer offset */
secure_channel->encdata_offset += nread;
}
TRACE ("encrypted data buffer: offset %zu length %zu",
secure_channel->encdata_offset,
secure_channel->encdata_length);
/* setup input buffers */
_mongoc_secure_channel_init_sec_buffer (
&inbuf[0],
SECBUFFER_TOKEN,
malloc (secure_channel->encdata_offset),
(unsigned long) (secure_channel->encdata_offset &
(size_t) 0xFFFFFFFFUL));
_mongoc_secure_channel_init_sec_buffer (
&inbuf[1], SECBUFFER_EMPTY, NULL, 0);
_mongoc_secure_channel_init_sec_buffer_desc (&inbuf_desc, inbuf, 2);
/* setup output buffers */
_mongoc_secure_channel_init_sec_buffer (
&outbuf[0], SECBUFFER_TOKEN, NULL, 0);
_mongoc_secure_channel_init_sec_buffer (
&outbuf[1], SECBUFFER_ALERT, NULL, 0);
_mongoc_secure_channel_init_sec_buffer (
&outbuf[2], SECBUFFER_EMPTY, NULL, 0);
_mongoc_secure_channel_init_sec_buffer_desc (&outbuf_desc, outbuf, 3);
if (inbuf[0].pvBuffer == NULL) {
MONGOC_ERROR ("unable to allocate memory");
return false;
}
/* copy received handshake data into input buffer */
memcpy (inbuf[0].pvBuffer,
secure_channel->encdata_buffer,
secure_channel->encdata_offset);
/* https://msdn.microsoft.com/en-us/library/windows/desktop/aa375924.aspx
*/
sspi_status =
InitializeSecurityContext (&secure_channel->cred->cred_handle,
&secure_channel->ctxt->ctxt_handle,
hostname,
secure_channel->req_flags,
0,
0,
&inbuf_desc,
0,
NULL,
&outbuf_desc,
&secure_channel->ret_flags,
&secure_channel->ctxt->time_stamp);
/* free buffer for received handshake data */
free (inbuf[0].pvBuffer);
/* check if the handshake was incomplete */
if (sspi_status == SEC_E_INCOMPLETE_MESSAGE) {
secure_channel->connecting_state = ssl_connect_2_reading;
- TRACE ("received incomplete message, need more data");
+ TRACE ("%s", "received incomplete message, need more data");
return true;
}
/* If the server has requested a client certificate, attempt to continue
* the handshake without one. This will allow connections to servers which
* request a client certificate but do not require it. */
if (sspi_status == SEC_I_INCOMPLETE_CREDENTIALS &&
!(secure_channel->req_flags & ISC_REQ_USE_SUPPLIED_CREDS)) {
secure_channel->req_flags |= ISC_REQ_USE_SUPPLIED_CREDS;
secure_channel->connecting_state = ssl_connect_2_writing;
- TRACE ("a client certificate has been requested");
+ TRACE ("%s", "A client certificate has been requested");
return true;
}
/* check if the handshake needs to be continued */
if (sspi_status == SEC_I_CONTINUE_NEEDED || sspi_status == SEC_E_OK) {
for (i = 0; i < 3; i++) {
/* search for handshake tokens that need to be send */
if (outbuf[i].BufferType == SECBUFFER_TOKEN &&
outbuf[i].cbBuffer > 0) {
TRACE ("sending next handshake data: sending %lu bytes...",
outbuf[i].cbBuffer);
/* send handshake token to server */
written = mongoc_secure_channel_write (
tls, outbuf[i].pvBuffer, outbuf[i].cbBuffer);
if (outbuf[i].cbBuffer != (size_t) written) {
MONGOC_ERROR ("failed to send next handshake data: "
"sent %zd of %lu bytes",
written,
outbuf[i].cbBuffer);
return false;
}
}
/* free obsolete buffer */
if (outbuf[i].pvBuffer != NULL) {
FreeContextBuffer (outbuf[i].pvBuffer);
}
}
} else {
switch (sspi_status) {
case SEC_E_WRONG_PRINCIPAL:
MONGOC_ERROR ("SSL Certification verification failed: hostname "
"doesn't match certificate");
break;
case SEC_E_UNTRUSTED_ROOT:
MONGOC_ERROR ("SSL Certification verification failed: Untrusted "
"root certificate");
break;
case SEC_E_CERT_EXPIRED:
MONGOC_ERROR ("SSL Certification verification failed: certificate "
"has expired");
break;
case CRYPT_E_NO_REVOCATION_CHECK:
/* This seems to be raised also when hostname doesn't match the
* certificate */
MONGOC_ERROR ("SSL Certification verification failed: failed "
"revocation/hostname check");
break;
case SEC_E_INSUFFICIENT_MEMORY:
case SEC_E_INTERNAL_ERROR:
case SEC_E_INVALID_HANDLE:
case SEC_E_INVALID_TOKEN:
case SEC_E_LOGON_DENIED:
case SEC_E_NO_AUTHENTICATING_AUTHORITY:
case SEC_E_NO_CREDENTIALS:
case SEC_E_TARGET_UNKNOWN:
case SEC_E_UNSUPPORTED_FUNCTION:
#ifdef SEC_E_APPLICATION_PROTOCOL_MISMATCH
/* Not available in VS2010 */
case SEC_E_APPLICATION_PROTOCOL_MISMATCH:
#endif
default: {
LPTSTR msg = NULL;
FormatMessage (FORMAT_MESSAGE_ALLOCATE_BUFFER |
FORMAT_MESSAGE_FROM_SYSTEM |
FORMAT_MESSAGE_ARGUMENT_ARRAY,
NULL,
GetLastError (),
LANG_NEUTRAL,
(LPTSTR) &msg,
0,
NULL);
MONGOC_ERROR ("Failed to initialize security context, error code: "
"0x%04X%04X: %s",
(sspi_status >> 16) & 0xffff,
sspi_status & 0xffff,
msg);
LocalFree (msg);
}
}
return false;
}
/* check if there was additional remaining encrypted data */
if (inbuf[1].BufferType == SECBUFFER_EXTRA && inbuf[1].cbBuffer > 0) {
TRACE ("encrypted data length: %lu", inbuf[1].cbBuffer);
/*
* There are two cases where we could be getting extra data here:
* 1) If we're renegotiating a connection and the handshake is already
* complete (from the server perspective), it can encrypted app data
* (not handshake data) in an extra buffer at this point.
* 2) (sspi_status == SEC_I_CONTINUE_NEEDED) We are negotiating a
* connection and this extra data is part of the handshake.
* We should process the data immediately; waiting for the socket to
* be ready may fail since the server is done sending handshake data.
*/
/* check if the remaining data is less than the total amount
* and therefore begins after the already processed data */
if (secure_channel->encdata_offset > inbuf[1].cbBuffer) {
memmove (secure_channel->encdata_buffer,
(secure_channel->encdata_buffer +
secure_channel->encdata_offset) -
inbuf[1].cbBuffer,
inbuf[1].cbBuffer);
secure_channel->encdata_offset = inbuf[1].cbBuffer;
if (sspi_status == SEC_I_CONTINUE_NEEDED) {
doread = FALSE;
continue;
}
}
} else {
secure_channel->encdata_offset = 0;
}
break;
}
/* check if the handshake needs to be continued */
if (sspi_status == SEC_I_CONTINUE_NEEDED) {
secure_channel->connecting_state = ssl_connect_2_reading;
return true;
}
/* check if the handshake is complete */
if (sspi_status == SEC_E_OK) {
secure_channel->connecting_state = ssl_connect_3;
- TRACE ("SSL/TLS handshake complete\n");
+ TRACE ("%s", "SSL/TLS handshake complete");
}
return true;
}
bool
mongoc_secure_channel_handshake_step_3 (mongoc_stream_tls_t *tls,
char *hostname)
{
mongoc_stream_tls_secure_channel_t *secure_channel =
(mongoc_stream_tls_secure_channel_t *) tls->ctx;
BSON_ASSERT (ssl_connect_3 == secure_channel->connecting_state);
- TRACE ("SSL/TLS connection with %s (step 3/3)\n", hostname);
+ TRACE ("SSL/TLS connection with %s (step 3/3)", hostname);
if (!secure_channel->cred) {
return false;
}
/* check if the required context attributes are met */
if (secure_channel->ret_flags != secure_channel->req_flags) {
MONGOC_ERROR ("Failed handshake");
return false;
}
secure_channel->connecting_state = ssl_connect_done;
return true;
}
#endif
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-secure-transport-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-secure-transport-private.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-secure-transport-private.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-secure-transport-private.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-secure-transport.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-secure-transport.c
similarity index 96%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-secure-transport.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-secure-transport.c
index 27209303..a3e9d17b 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-secure-transport.c
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-secure-transport.c
@@ -1,474 +1,473 @@
/*
* Copyright 2016 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "mongoc-config.h"
#ifdef MONGOC_ENABLE_SSL_SECURE_TRANSPORT
#include <bson.h>
#include "mongoc-log.h"
#include "mongoc-trace-private.h"
#include "mongoc-ssl.h"
#include "mongoc-stream-tls.h"
#include "mongoc-stream-tls-private.h"
#include "mongoc-secure-transport-private.h"
#include "mongoc-stream-tls-secure-transport-private.h"
#include <CoreFoundation/CoreFoundation.h>
#include <Security/Security.h>
#include <Security/SecKey.h>
#include <Security/SecureTransport.h>
#include <CommonCrypto/CommonDigest.h>
#include <Security/Security.h>
#include <Security/SecureTransport.h>
#include <CoreFoundation/CoreFoundation.h>
/* Jailbreak Darwin Private API */
SecIdentityRef
SecIdentityCreate (CFAllocatorRef allocator,
SecCertificateRef certificate,
SecKeyRef privateKey);
#undef MONGOC_LOG_DOMAIN
#define MONGOC_LOG_DOMAIN "stream-secure_transport"
void
_bson_append_cftyperef (bson_string_t *retval, const char *label, CFTypeRef str)
{
if (str && CFGetTypeID (str) == CFStringGetTypeID ()) {
CFIndex length = CFStringGetLength (str);
CFStringEncoding encoding = kCFStringEncodingASCII;
CFIndex maxSize =
CFStringGetMaximumSizeForEncoding (length, encoding) + 1;
char *cs = bson_malloc ((size_t) maxSize);
if (CFStringGetCString (str, cs, maxSize, encoding)) {
bson_string_append_printf (retval, "%s%s", label, cs);
} else {
bson_string_append_printf (retval, "%s(null)", label);
}
bson_free (cs);
}
}
CFTypeRef
_mongoc_secure_transport_dict_get (CFArrayRef values, CFStringRef label)
{
if (!values || CFGetTypeID (values) != CFArrayGetTypeID ()) {
return NULL;
}
for (CFIndex i = 0; i < CFArrayGetCount (values); ++i) {
CFStringRef item_label;
CFDictionaryRef item = CFArrayGetValueAtIndex (values, i);
if (CFGetTypeID (item) != CFDictionaryGetTypeID ()) {
continue;
}
item_label = CFDictionaryGetValue (item, kSecPropertyKeyLabel);
if (item_label &&
CFStringCompare (item_label, label, 0) == kCFCompareEqualTo) {
return CFDictionaryGetValue (item, kSecPropertyKeyValue);
}
}
return NULL;
}
char *
_mongoc_secure_transport_RFC2253_from_cert (SecCertificateRef cert)
{
CFTypeRef value;
bson_string_t *retval;
CFTypeRef subject_name;
CFDictionaryRef cert_dict;
cert_dict = SecCertificateCopyValues (cert, NULL, NULL);
if (!cert_dict) {
return NULL;
}
subject_name = CFDictionaryGetValue (cert_dict, kSecOIDX509V1SubjectName);
if (!subject_name) {
CFRelease (cert_dict);
return NULL;
}
subject_name = CFDictionaryGetValue (subject_name, kSecPropertyKeyValue);
if (!subject_name) {
CFRelease (cert_dict);
return NULL;
}
retval = bson_string_new ("");
;
value = _mongoc_secure_transport_dict_get (subject_name, kSecOIDCountryName);
_bson_append_cftyperef (retval, "C=", value);
value = _mongoc_secure_transport_dict_get (subject_name,
kSecOIDStateProvinceName);
_bson_append_cftyperef (retval, ",ST=", value);
value =
_mongoc_secure_transport_dict_get (subject_name, kSecOIDLocalityName);
_bson_append_cftyperef (retval, ",L=", value);
value =
_mongoc_secure_transport_dict_get (subject_name, kSecOIDOrganizationName);
_bson_append_cftyperef (retval, ",O=", value);
value = _mongoc_secure_transport_dict_get (subject_name,
kSecOIDOrganizationalUnitName);
if (value) {
/* Can be either one unit name, or array of unit names */
if (CFGetTypeID (value) == CFStringGetTypeID ()) {
_bson_append_cftyperef (retval, ",OU=", value);
} else if (CFGetTypeID (value) == CFArrayGetTypeID ()) {
CFIndex len = CFArrayGetCount (value);
if (len > 0) {
_bson_append_cftyperef (
retval, ",OU=", CFArrayGetValueAtIndex (value, 0));
}
if (len > 1) {
_bson_append_cftyperef (
retval, ",", CFArrayGetValueAtIndex (value, 1));
}
if (len > 2) {
_bson_append_cftyperef (
retval, ",", CFArrayGetValueAtIndex (value, 2));
}
}
}
value = _mongoc_secure_transport_dict_get (subject_name, kSecOIDCommonName);
_bson_append_cftyperef (retval, ",CN=", value);
value =
_mongoc_secure_transport_dict_get (subject_name, kSecOIDStreetAddress);
_bson_append_cftyperef (retval, ",STREET", value);
CFRelease (cert_dict);
return bson_string_free (retval, false);
}
bool
_mongoc_secure_transport_import_pem (const char *filename,
const char *passphrase,
CFArrayRef *items,
SecExternalItemType *type)
{
SecExternalFormat format = kSecFormatPEMSequence;
SecItemImportExportKeyParameters params;
SecTransformRef sec_transform;
CFReadStreamRef read_stream;
CFDataRef dataref;
CFErrorRef error;
CFURLRef url;
OSStatus res;
if (!filename) {
- MONGOC_INFO ("%s", "No certificate provided");
+ TRACE ("%s", "No certificate provided");
return false;
}
params.version = SEC_KEY_IMPORT_EXPORT_PARAMS_VERSION;
params.flags = 0;
params.passphrase = NULL;
params.alertTitle = NULL;
params.alertPrompt = NULL;
params.accessRef = NULL;
params.keyUsage = NULL;
params.keyAttributes = NULL;
if (passphrase) {
params.passphrase = CFStringCreateWithCString (
kCFAllocatorDefault, passphrase, kCFStringEncodingUTF8);
}
url = CFURLCreateFromFileSystemRepresentation (
kCFAllocatorDefault, (const UInt8 *) filename, strlen (filename), false);
read_stream = CFReadStreamCreateWithFile (kCFAllocatorDefault, url);
sec_transform = SecTransformCreateReadTransformWithReadStream (read_stream);
dataref = SecTransformExecute (sec_transform, &error);
if (error) {
CFStringRef str = CFErrorCopyDescription (error);
MONGOC_ERROR (
"Failed importing PEM '%s': %s",
filename,
CFStringGetCStringPtr (str, CFStringGetFastestEncoding (str)));
CFRelease (str);
CFRelease (sec_transform);
CFRelease (read_stream);
CFRelease (url);
if (passphrase) {
CFRelease (params.passphrase);
}
return false;
}
res = SecItemImport (
dataref, CFSTR (".pem"), &format, type, 0, &params, NULL, items);
CFRelease (dataref);
CFRelease (sec_transform);
CFRelease (read_stream);
CFRelease (url);
if (passphrase) {
CFRelease (params.passphrase);
}
if (res) {
MONGOC_ERROR ("Failed importing PEM '%s' (code: %d)", filename, res);
return false;
}
return true;
}
char *
_mongoc_secure_transport_extract_subject (const char *filename,
const char *passphrase)
{
bool success;
char *retval = NULL;
CFArrayRef items = NULL;
SecExternalItemType type = kSecItemTypeCertificate;
success =
_mongoc_secure_transport_import_pem (filename, passphrase, &items, &type);
if (!success) {
return NULL;
}
if (type == kSecItemTypeAggregate) {
for (CFIndex i = 0; i < CFArrayGetCount (items); ++i) {
CFTypeID item_id = CFGetTypeID (CFArrayGetValueAtIndex (items, i));
if (item_id == SecCertificateGetTypeID ()) {
retval = _mongoc_secure_transport_RFC2253_from_cert (
(SecCertificateRef) CFArrayGetValueAtIndex (items, i));
break;
}
}
} else if (type == kSecItemTypeCertificate) {
retval =
_mongoc_secure_transport_RFC2253_from_cert ((SecCertificateRef) items);
}
if (items) {
CFRelease (items);
}
return retval;
}
bool
mongoc_secure_transport_setup_certificate (
mongoc_stream_tls_secure_transport_t *secure_transport,
mongoc_ssl_opt_t *opt)
{
bool success;
CFArrayRef items;
SecIdentityRef id;
SecKeyRef key = NULL;
SecCertificateRef cert = NULL;
SecExternalItemType type = kSecItemTypeCertificate;
if (!opt->pem_file) {
- MONGOC_INFO (
- "No private key provided, the server won't be able to verify us");
+ TRACE ("%s",
+ "No private key provided, the server won't be able to verify us");
return false;
}
success = _mongoc_secure_transport_import_pem (
opt->pem_file, opt->pem_pwd, &items, &type);
if (!success) {
MONGOC_ERROR ("Can't find certificate in: '%s'", opt->pem_file);
return false;
}
if (type != kSecItemTypeAggregate) {
MONGOC_ERROR ("Cannot work with keys of type \"%d\". Please file a JIRA",
type);
CFRelease (items);
return false;
}
for (CFIndex i = 0; i < CFArrayGetCount (items); ++i) {
CFTypeID item_id = CFGetTypeID (CFArrayGetValueAtIndex (items, i));
if (item_id == SecCertificateGetTypeID ()) {
cert = (SecCertificateRef) CFArrayGetValueAtIndex (items, i);
} else if (item_id == SecKeyGetTypeID ()) {
key = (SecKeyRef) CFArrayGetValueAtIndex (items, i);
}
}
if (!cert || !key) {
MONGOC_ERROR ("Couldn't find valid private key");
CFRelease (items);
return false;
}
id = SecIdentityCreate (kCFAllocatorDefault, cert, key);
secure_transport->my_cert =
CFArrayCreateMutableCopy (kCFAllocatorDefault, (CFIndex) 2, items);
CFArraySetValueAtIndex (secure_transport->my_cert, 0, id);
CFArraySetValueAtIndex (secure_transport->my_cert, 1, cert);
/*
* Secure Transport assumes the following:
* * The certificate references remain valid for the lifetime of the
* session.
* * The identity specified in certRefs[0] is capable of signing.
*/
success = !SSLSetCertificate (secure_transport->ssl_ctx_ref,
secure_transport->my_cert);
- MONGOC_DEBUG ("Setting client certificate %s",
- success ? "succeeded" : "failed");
+ TRACE ("Setting client certificate %s", success ? "succeeded" : "failed");
CFRelease (items);
return true;
}
bool
mongoc_secure_transport_setup_ca (
mongoc_stream_tls_secure_transport_t *secure_transport,
mongoc_ssl_opt_t *opt)
{
if (opt->ca_file) {
CFArrayRef items;
SecExternalItemType type = kSecItemTypeCertificate;
bool success = _mongoc_secure_transport_import_pem (
opt->ca_file, NULL, &items, &type);
if (!success) {
MONGOC_ERROR ("Can't find certificate in \"%s\"", opt->ca_file);
return false;
}
if (type == kSecItemTypeAggregate) {
CFMutableArrayRef anchors = CFArrayCreateMutable (
kCFAllocatorDefault, 0, &kCFTypeArrayCallBacks);
for (CFIndex i = 0; i < CFArrayGetCount (items); ++i) {
CFTypeID item_id = CFGetTypeID (CFArrayGetValueAtIndex (items, i));
if (item_id == SecCertificateGetTypeID ()) {
CFArrayAppendValue (anchors, CFArrayGetValueAtIndex (items, i));
}
}
secure_transport->anchors = CFRetain (anchors);
CFRelease (items);
} else if (type == kSecItemTypeCertificate) {
secure_transport->anchors = CFRetain (items);
}
/* This should be SSLSetCertificateAuthorities But the /TLS/ tests fail
* when it is */
success = !SSLSetTrustedRoots (
secure_transport->ssl_ctx_ref, secure_transport->anchors, true);
- MONGOC_DEBUG ("Setting certificate authority %s (%s)",
- success ? "succeeded" : "failed",
- opt->ca_file);
+ TRACE ("Setting certificate authority %s (%s)",
+ success ? "succeeded" : "failed",
+ opt->ca_file);
return true;
}
- MONGOC_INFO ("No CA provided, using defaults");
+ TRACE ("%s", "No CA provided, using defaults");
return false;
}
OSStatus
mongoc_secure_transport_read (SSLConnectionRef connection,
void *data,
size_t *data_length)
{
mongoc_stream_tls_t *tls = (mongoc_stream_tls_t *) connection;
ssize_t length;
ENTRY;
errno = 0;
/* 4 arguments is *min_bytes* -- This is not a negotiation.
* Secure Transport wants all or nothing. We must continue reading until
* we get this amount, or timeout */
length = mongoc_stream_read (
tls->base_stream, data, *data_length, *data_length, tls->timeout_msec);
if (length > 0) {
*data_length = length;
RETURN (noErr);
}
if (length == 0) {
RETURN (errSSLClosedGraceful);
}
switch (errno) {
case ENOENT:
RETURN (errSSLClosedGraceful);
break;
case ECONNRESET:
RETURN (errSSLClosedAbort);
break;
case EAGAIN:
RETURN (errSSLWouldBlock);
break;
default:
RETURN (-36); /* ioErr */
break;
}
}
OSStatus
mongoc_secure_transport_write (SSLConnectionRef connection,
const void *data,
size_t *data_length)
{
mongoc_stream_tls_t *tls = (mongoc_stream_tls_t *) connection;
ssize_t length;
ENTRY;
errno = 0;
length = mongoc_stream_write (
tls->base_stream, (void *) data, *data_length, tls->timeout_msec);
if (length >= 0) {
*data_length = length;
RETURN (noErr);
}
switch (errno) {
case EAGAIN:
RETURN (errSSLWouldBlock);
break;
default:
RETURN (-36); /* ioErr */
break;
}
}
#endif
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-server-description-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-server-description-private.h
similarity index 98%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-server-description-private.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-server-description-private.h
index 35c24fa6..647d4040 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-server-description-private.h
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-server-description-private.h
@@ -1,138 +1,138 @@
/*
* Copyright 2014 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MONGOC_SERVER_DESCRIPTION_PRIVATE_H
#define MONGOC_SERVER_DESCRIPTION_PRIVATE_H
#include "mongoc-server-description.h"
#define MONGOC_DEFAULT_WIRE_VERSION 0
#define MONGOC_DEFAULT_WRITE_BATCH_SIZE 1000
#define MONGOC_DEFAULT_BSON_OBJ_SIZE 16 * 1024 * 1024
#define MONGOC_DEFAULT_MAX_MSG_SIZE 48000000
+#define MONGOC_NO_SESSIONS -1
#define MONGOC_IDLE_WRITE_PERIOD_MS 10 * 1000
/* represent a server or topology with no replica set config version */
#define MONGOC_NO_SET_VERSION -1
typedef enum {
MONGOC_SERVER_UNKNOWN,
MONGOC_SERVER_STANDALONE,
MONGOC_SERVER_MONGOS,
MONGOC_SERVER_POSSIBLE_PRIMARY,
MONGOC_SERVER_RS_PRIMARY,
MONGOC_SERVER_RS_SECONDARY,
MONGOC_SERVER_RS_ARBITER,
MONGOC_SERVER_RS_OTHER,
MONGOC_SERVER_RS_GHOST,
MONGOC_SERVER_DESCRIPTION_TYPES,
} mongoc_server_description_type_t;
struct _mongoc_server_description_t {
uint32_t id;
mongoc_host_list_t host;
int64_t round_trip_time_msec;
int64_t last_update_time_usec;
bson_t last_is_master;
bool has_is_master;
const char *connection_address;
const char *me;
/* whether an APM server-opened callback has been fired before */
bool opened;
const char *set_name;
bson_error_t error;
mongoc_server_description_type_t type;
int32_t min_wire_version;
int32_t max_wire_version;
int32_t max_msg_size;
int32_t max_bson_obj_size;
int32_t max_write_batch_size;
+ int64_t session_timeout_minutes;
bson_t hosts;
bson_t passives;
bson_t arbiters;
bson_t tags;
const char *current_primary;
int64_t set_version;
bson_oid_t election_id;
int64_t last_write_date_ms;
-#ifdef MONGOC_ENABLE_COMPRESSION
bson_t compressors;
-#endif
};
void
mongoc_server_description_init (mongoc_server_description_t *sd,
const char *address,
uint32_t id);
bool
mongoc_server_description_has_rs_member (
mongoc_server_description_t *description, const char *address);
bool
mongoc_server_description_has_set_version (
mongoc_server_description_t *description);
bool
mongoc_server_description_has_election_id (
mongoc_server_description_t *description);
void
mongoc_server_description_cleanup (mongoc_server_description_t *sd);
void
mongoc_server_description_reset (mongoc_server_description_t *sd);
void
mongoc_server_description_set_state (mongoc_server_description_t *description,
mongoc_server_description_type_t type);
void
mongoc_server_description_set_set_version (
mongoc_server_description_t *description, int64_t set_version);
void
mongoc_server_description_set_election_id (
mongoc_server_description_t *description, const bson_oid_t *election_id);
void
mongoc_server_description_update_rtt (mongoc_server_description_t *server,
int64_t rtt_msec);
void
mongoc_server_description_handle_ismaster (mongoc_server_description_t *sd,
const bson_t *reply,
int64_t rtt_msec,
const bson_error_t *error /* IN */);
void
mongoc_server_description_filter_stale (mongoc_server_description_t **sds,
size_t sds_len,
mongoc_server_description_t *primary,
int64_t heartbeat_frequency_ms,
const mongoc_read_prefs_t *read_prefs);
void
mongoc_server_description_filter_tags (
mongoc_server_description_t **descriptions,
size_t description_len,
const mongoc_read_prefs_t *read_prefs);
#endif
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-server-description.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-server-description.c
similarity index 97%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-server-description.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-server-description.c
index 6762aa54..2bb1ed5a 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-server-description.c
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-server-description.c
@@ -1,956 +1,961 @@
/*
* Copyright 2014 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "mongoc-config.h"
#include "mongoc-host-list.h"
#include "mongoc-host-list-private.h"
#include "mongoc-read-prefs.h"
#include "mongoc-read-prefs-private.h"
#include "mongoc-server-description-private.h"
#include "mongoc-trace-private.h"
#include "mongoc-uri.h"
#include "mongoc-util-private.h"
#include "mongoc-compression-private.h"
#include <stdio.h>
#define ALPHA 0.2
static bson_oid_t kObjectIdZero = {{0}};
static bool
_match_tag_set (const mongoc_server_description_t *sd,
bson_iter_t *tag_set_iter);
/* Destroy allocated resources within @description, but don't free it */
void
mongoc_server_description_cleanup (mongoc_server_description_t *sd)
{
BSON_ASSERT (sd);
bson_destroy (&sd->last_is_master);
}
/* Reset fields inside this sd, but keep same id, host information, and RTT,
and leave ismaster in empty inited state */
void
mongoc_server_description_reset (mongoc_server_description_t *sd)
{
BSON_ASSERT (sd);
memset (&sd->error, 0, sizeof sd->error);
sd->set_name = NULL;
sd->type = MONGOC_SERVER_UNKNOWN;
sd->min_wire_version = MONGOC_DEFAULT_WIRE_VERSION;
sd->max_wire_version = MONGOC_DEFAULT_WIRE_VERSION;
sd->max_msg_size = MONGOC_DEFAULT_MAX_MSG_SIZE;
sd->max_bson_obj_size = MONGOC_DEFAULT_BSON_OBJ_SIZE;
sd->max_write_batch_size = MONGOC_DEFAULT_WRITE_BATCH_SIZE;
+ sd->session_timeout_minutes = MONGOC_NO_SESSIONS;
sd->last_write_date_ms = -1;
/* always leave last ismaster in an init-ed state until we destroy sd */
bson_destroy (&sd->last_is_master);
bson_init (&sd->last_is_master);
sd->has_is_master = false;
sd->last_update_time_usec = bson_get_monotonic_time ();
bson_init (&sd->hosts);
bson_init (&sd->passives);
bson_init (&sd->arbiters);
bson_init (&sd->tags);
-#ifdef MONGOC_ENABLE_COMPRESSION
bson_init (&sd->compressors);
-#endif
sd->me = NULL;
sd->current_primary = NULL;
sd->set_version = MONGOC_NO_SET_VERSION;
bson_oid_copy_unsafe (&kObjectIdZero, &sd->election_id);
}
/*
*--------------------------------------------------------------------------
*
* mongoc_server_description_init --
*
* Initialize a new server_description_t.
*
* Returns:
* None.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
void
mongoc_server_description_init (mongoc_server_description_t *sd,
const char *address,
uint32_t id)
{
ENTRY;
BSON_ASSERT (sd);
BSON_ASSERT (address);
sd->id = id;
sd->type = MONGOC_SERVER_UNKNOWN;
sd->round_trip_time_msec = -1;
if (!_mongoc_host_list_from_string (&sd->host, address)) {
MONGOC_WARNING ("Failed to parse uri for %s", address);
return;
}
sd->connection_address = sd->host.host_and_port;
bson_init (&sd->last_is_master);
mongoc_server_description_reset (sd);
EXIT;
}
/*
*--------------------------------------------------------------------------
*
* mongoc_server_description_destroy --
*
* Destroy allocated resources within @description and free
* @description.
*
* Returns:
* None.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
void
mongoc_server_description_destroy (mongoc_server_description_t *description)
{
ENTRY;
mongoc_server_description_cleanup (description);
bson_free (description);
EXIT;
}
/*
*--------------------------------------------------------------------------
*
* mongoc_server_description_has_rs_member --
*
* Return true if this address is included in server's list of rs
* members, false otherwise.
*
* Returns:
* true, false
*
* Side effects:
* None
*
*--------------------------------------------------------------------------
*/
bool
mongoc_server_description_has_rs_member (mongoc_server_description_t *server,
const char *address)
{
bson_iter_t member_iter;
const bson_t *rs_members[3];
int i;
if (server->type != MONGOC_SERVER_UNKNOWN) {
rs_members[0] = &server->hosts;
rs_members[1] = &server->arbiters;
rs_members[2] = &server->passives;
for (i = 0; i < 3; i++) {
bson_iter_init (&member_iter, rs_members[i]);
while (bson_iter_next (&member_iter)) {
if (strcasecmp (address, bson_iter_utf8 (&member_iter, NULL)) ==
0) {
return true;
}
}
}
}
return false;
}
/*
*--------------------------------------------------------------------------
*
* mongoc_server_description_has_set_version --
*
* Did this server's ismaster response have a "setVersion" field?
*
* Returns:
* True if the server description's setVersion is set.
*
*--------------------------------------------------------------------------
*/
bool
mongoc_server_description_has_set_version (
mongoc_server_description_t *description)
{
return description->set_version != MONGOC_NO_SET_VERSION;
}
/*
*--------------------------------------------------------------------------
*
* mongoc_server_description_has_election_id --
*
* Did this server's ismaster response have an "electionId" field?
*
* Returns:
* True if the server description's electionId is set.
*
*--------------------------------------------------------------------------
*/
bool
mongoc_server_description_has_election_id (
mongoc_server_description_t *description)
{
return 0 != bson_oid_compare (&description->election_id, &kObjectIdZero);
}
/*
*--------------------------------------------------------------------------
*
* mongoc_server_description_id --
*
* Get the id of this server.
*
* Returns:
* Server's id.
*
*--------------------------------------------------------------------------
*/
uint32_t
mongoc_server_description_id (const mongoc_server_description_t *description)
{
return description->id;
}
/*
*--------------------------------------------------------------------------
*
* mongoc_server_description_host --
*
* Return a reference to the host associated with this server description.
*
* Returns:
* This server description's host, a mongoc_host_list_t * you must
* not modify or free.
*
*--------------------------------------------------------------------------
*/
mongoc_host_list_t *
mongoc_server_description_host (const mongoc_server_description_t *description)
{
return &((mongoc_server_description_t *) description)->host;
}
/*
*--------------------------------------------------------------------------
*
* mongoc_server_description_round_trip_time --
*
* Get the round trip time of this server, which is the client's
* measurement of the duration of an "ismaster" command.
*
* Returns:
* The server's round trip time in milliseconds.
*
*--------------------------------------------------------------------------
*/
int64_t
mongoc_server_description_round_trip_time (
const mongoc_server_description_t *description)
{
return description->round_trip_time_msec;
}
/*
*--------------------------------------------------------------------------
*
* mongoc_server_description_type --
*
* Get this server's type, one of the types defined in the Server
* Discovery And Monitoring Spec.
*
* Returns:
* A string.
*
*--------------------------------------------------------------------------
*/
const char *
mongoc_server_description_type (const mongoc_server_description_t *description)
{
switch (description->type) {
case MONGOC_SERVER_UNKNOWN:
return "Unknown";
case MONGOC_SERVER_STANDALONE:
return "Standalone";
case MONGOC_SERVER_MONGOS:
return "Mongos";
case MONGOC_SERVER_POSSIBLE_PRIMARY:
return "PossiblePrimary";
case MONGOC_SERVER_RS_PRIMARY:
return "RSPrimary";
case MONGOC_SERVER_RS_SECONDARY:
return "RSSecondary";
case MONGOC_SERVER_RS_ARBITER:
return "RSArbiter";
case MONGOC_SERVER_RS_OTHER:
return "RSOther";
case MONGOC_SERVER_RS_GHOST:
return "RSGhost";
case MONGOC_SERVER_DESCRIPTION_TYPES:
default:
MONGOC_ERROR ("Invalid mongoc_server_description_t type");
return "Invalid";
}
}
/*
*--------------------------------------------------------------------------
*
* mongoc_server_description_ismaster --
*
* Return this server's most recent "ismaster" command response.
*
* Returns:
* A reference to a BSON document, owned by the server description.
*
*--------------------------------------------------------------------------
*/
const bson_t *
mongoc_server_description_ismaster (
const mongoc_server_description_t *description)
{
return &description->last_is_master;
}
/*
*--------------------------------------------------------------------------
*
* mongoc_server_description_set_state --
*
* Set the server description's server type.
*
*--------------------------------------------------------------------------
*/
void
mongoc_server_description_set_state (mongoc_server_description_t *description,
mongoc_server_description_type_t type)
{
description->type = type;
}
/*
*--------------------------------------------------------------------------
*
* mongoc_server_description_set_set_version --
*
* Set the replica set version of this server.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
void
mongoc_server_description_set_set_version (
mongoc_server_description_t *description, int64_t set_version)
{
description->set_version = set_version;
}
/*
*--------------------------------------------------------------------------
*
* mongoc_server_description_set_election_id --
*
* Set the election_id of this server. Copies the given ObjectId or,
* if it is NULL, zeroes description's election_id.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
void
mongoc_server_description_set_election_id (
mongoc_server_description_t *description, const bson_oid_t *election_id)
{
if (election_id) {
bson_oid_copy_unsafe (election_id, &description->election_id);
} else {
bson_oid_copy_unsafe (&kObjectIdZero, &description->election_id);
}
}
/*
*-------------------------------------------------------------------------
*
* mongoc_server_description_update_rtt --
*
* Calculate this server's rtt calculation using an exponentially-
* weighted moving average formula.
*
* Side effects:
* None.
*
*-------------------------------------------------------------------------
*/
void
mongoc_server_description_update_rtt (mongoc_server_description_t *server,
int64_t rtt_msec)
{
if (server->round_trip_time_msec == -1) {
server->round_trip_time_msec = rtt_msec;
} else {
server->round_trip_time_msec = (int64_t) (
ALPHA * rtt_msec + (1 - ALPHA) * server->round_trip_time_msec);
}
}
static void
_mongoc_server_description_set_error (mongoc_server_description_t *sd,
const bson_error_t *error)
{
if (error && error->code) {
memcpy (&sd->error, error, sizeof (bson_error_t));
} else {
bson_set_error (&sd->error,
MONGOC_ERROR_STREAM,
MONGOC_ERROR_STREAM_CONNECT,
"unknown error calling ismaster");
}
/* Server Discovery and Monitoring Spec: if the server type changes from a
* known type to Unknown its RTT is set to null. */
sd->round_trip_time_msec = -1;
}
/*
*-------------------------------------------------------------------------
*
* Called during SDAM, from topology description's ismaster handler, or
* when handshaking a connection in _mongoc_cluster_stream_for_server.
*
* If @ismaster_response is empty, @error must say why ismaster failed.
*
*-------------------------------------------------------------------------
*/
void
mongoc_server_description_handle_ismaster (mongoc_server_description_t *sd,
const bson_t *ismaster_response,
int64_t rtt_msec,
const bson_error_t *error /* IN */)
{
bson_iter_t iter;
bson_iter_t child;
bool is_master = false;
bool is_shard = false;
bool is_secondary = false;
bool is_arbiter = false;
bool is_replicaset = false;
bool is_hidden = false;
const uint8_t *bytes;
uint32_t len;
int num_keys = 0;
ENTRY;
BSON_ASSERT (sd);
mongoc_server_description_reset (sd);
if (!ismaster_response) {
_mongoc_server_description_set_error (sd, error);
EXIT;
}
bson_destroy (&sd->last_is_master);
bson_copy_to (ismaster_response, &sd->last_is_master);
sd->has_is_master = true;
bson_iter_init (&iter, &sd->last_is_master);
while (bson_iter_next (&iter)) {
num_keys++;
if (strcmp ("ok", bson_iter_key (&iter)) == 0) {
/* ismaster responses never have ok: 0, but spec requires we check */
if (!bson_iter_as_bool (&iter))
goto failure;
} else if (strcmp ("ismaster", bson_iter_key (&iter)) == 0) {
if (!BSON_ITER_HOLDS_BOOL (&iter))
goto failure;
is_master = bson_iter_bool (&iter);
} else if (strcmp ("me", bson_iter_key (&iter)) == 0) {
if (!BSON_ITER_HOLDS_UTF8 (&iter))
goto failure;
sd->me = bson_iter_utf8 (&iter, NULL);
} else if (strcmp ("maxMessageSizeBytes", bson_iter_key (&iter)) == 0) {
if (!BSON_ITER_HOLDS_INT32 (&iter))
goto failure;
sd->max_msg_size = bson_iter_int32 (&iter);
} else if (strcmp ("maxBsonObjectSize", bson_iter_key (&iter)) == 0) {
if (!BSON_ITER_HOLDS_INT32 (&iter))
goto failure;
sd->max_bson_obj_size = bson_iter_int32 (&iter);
} else if (strcmp ("maxWriteBatchSize", bson_iter_key (&iter)) == 0) {
if (!BSON_ITER_HOLDS_INT32 (&iter))
goto failure;
sd->max_write_batch_size = bson_iter_int32 (&iter);
+ } else if (strcmp ("logicalSessionTimeoutMinutes",
+ bson_iter_key (&iter)) == 0) {
+ if (BSON_ITER_HOLDS_NUMBER (&iter)) {
+ sd->session_timeout_minutes = bson_iter_as_int64 (&iter);
+ } else if (BSON_ITER_HOLDS_NULL (&iter)) {
+ /* this arises executing standard JSON tests */
+ sd->session_timeout_minutes = MONGOC_NO_SESSIONS;
+ } else {
+ goto failure;
+ }
} else if (strcmp ("minWireVersion", bson_iter_key (&iter)) == 0) {
if (!BSON_ITER_HOLDS_INT32 (&iter))
goto failure;
sd->min_wire_version = bson_iter_int32 (&iter);
} else if (strcmp ("maxWireVersion", bson_iter_key (&iter)) == 0) {
if (!BSON_ITER_HOLDS_INT32 (&iter))
goto failure;
sd->max_wire_version = bson_iter_int32 (&iter);
} else if (strcmp ("msg", bson_iter_key (&iter)) == 0) {
if (!BSON_ITER_HOLDS_UTF8 (&iter))
goto failure;
is_shard = !!bson_iter_utf8 (&iter, NULL);
} else if (strcmp ("setName", bson_iter_key (&iter)) == 0) {
if (!BSON_ITER_HOLDS_UTF8 (&iter))
goto failure;
sd->set_name = bson_iter_utf8 (&iter, NULL);
} else if (strcmp ("setVersion", bson_iter_key (&iter)) == 0) {
mongoc_server_description_set_set_version (sd,
bson_iter_as_int64 (&iter));
} else if (strcmp ("electionId", bson_iter_key (&iter)) == 0) {
if (!BSON_ITER_HOLDS_OID (&iter))
goto failure;
mongoc_server_description_set_election_id (sd, bson_iter_oid (&iter));
} else if (strcmp ("secondary", bson_iter_key (&iter)) == 0) {
if (!BSON_ITER_HOLDS_BOOL (&iter))
goto failure;
is_secondary = bson_iter_bool (&iter);
} else if (strcmp ("hosts", bson_iter_key (&iter)) == 0) {
if (!BSON_ITER_HOLDS_ARRAY (&iter))
goto failure;
bson_iter_array (&iter, &len, &bytes);
bson_init_static (&sd->hosts, bytes, len);
} else if (strcmp ("passives", bson_iter_key (&iter)) == 0) {
if (!BSON_ITER_HOLDS_ARRAY (&iter))
goto failure;
bson_iter_array (&iter, &len, &bytes);
bson_init_static (&sd->passives, bytes, len);
} else if (strcmp ("arbiters", bson_iter_key (&iter)) == 0) {
if (!BSON_ITER_HOLDS_ARRAY (&iter))
goto failure;
bson_iter_array (&iter, &len, &bytes);
bson_init_static (&sd->arbiters, bytes, len);
} else if (strcmp ("primary", bson_iter_key (&iter)) == 0) {
if (!BSON_ITER_HOLDS_UTF8 (&iter))
goto failure;
sd->current_primary = bson_iter_utf8 (&iter, NULL);
} else if (strcmp ("arbiterOnly", bson_iter_key (&iter)) == 0) {
if (!BSON_ITER_HOLDS_BOOL (&iter))
goto failure;
is_arbiter = bson_iter_bool (&iter);
} else if (strcmp ("isreplicaset", bson_iter_key (&iter)) == 0) {
if (!BSON_ITER_HOLDS_BOOL (&iter))
goto failure;
is_replicaset = bson_iter_bool (&iter);
} else if (strcmp ("tags", bson_iter_key (&iter)) == 0) {
if (!BSON_ITER_HOLDS_DOCUMENT (&iter))
goto failure;
bson_iter_document (&iter, &len, &bytes);
bson_init_static (&sd->tags, bytes, len);
} else if (strcmp ("hidden", bson_iter_key (&iter)) == 0) {
is_hidden = bson_iter_bool (&iter);
} else if (strcmp ("lastWrite", bson_iter_key (&iter)) == 0) {
if (!BSON_ITER_HOLDS_DOCUMENT (&iter) ||
!bson_iter_recurse (&iter, &child) ||
!bson_iter_find (&child, "lastWriteDate") ||
!BSON_ITER_HOLDS_DATE_TIME (&child)) {
goto failure;
}
sd->last_write_date_ms = bson_iter_date_time (&child);
} else if (strcmp ("idleWritePeriodMillis", bson_iter_key (&iter)) == 0) {
sd->last_write_date_ms = bson_iter_as_int64 (&iter);
-#ifdef MONGOC_ENABLE_COMPRESSION
} else if (strcmp ("compression", bson_iter_key (&iter)) == 0) {
if (!BSON_ITER_HOLDS_ARRAY (&iter))
goto failure;
bson_iter_array (&iter, &len, &bytes);
bson_init_static (&sd->compressors, bytes, len);
-#endif
}
}
if (is_shard) {
sd->type = MONGOC_SERVER_MONGOS;
} else if (sd->set_name) {
if (is_hidden) {
sd->type = MONGOC_SERVER_RS_OTHER;
} else if (is_master) {
sd->type = MONGOC_SERVER_RS_PRIMARY;
} else if (is_secondary) {
sd->type = MONGOC_SERVER_RS_SECONDARY;
} else if (is_arbiter) {
sd->type = MONGOC_SERVER_RS_ARBITER;
} else {
sd->type = MONGOC_SERVER_RS_OTHER;
}
} else if (is_replicaset) {
sd->type = MONGOC_SERVER_RS_GHOST;
} else if (num_keys > 0) {
sd->type = MONGOC_SERVER_STANDALONE;
} else {
sd->type = MONGOC_SERVER_UNKNOWN;
}
if (!num_keys) {
/* empty reply means ismaster failed */
_mongoc_server_description_set_error (sd, error);
}
mongoc_server_description_update_rtt (sd, rtt_msec);
EXIT;
failure:
sd->type = MONGOC_SERVER_UNKNOWN;
sd->round_trip_time_msec = -1;
EXIT;
}
/*
*-------------------------------------------------------------------------
*
* mongoc_server_description_new_copy --
*
* A copy of a server description that you must destroy, or NULL.
*
*-------------------------------------------------------------------------
*/
mongoc_server_description_t *
mongoc_server_description_new_copy (
const mongoc_server_description_t *description)
{
mongoc_server_description_t *copy;
if (!description) {
return NULL;
}
copy = (mongoc_server_description_t *) bson_malloc0 (sizeof (*copy));
copy->id = description->id;
copy->opened = description->opened;
memcpy (&copy->host, &description->host, sizeof (copy->host));
copy->round_trip_time_msec = -1;
copy->connection_address = copy->host.host_and_port;
bson_init (&copy->last_is_master);
if (description->has_is_master) {
/* calls mongoc_server_description_reset */
mongoc_server_description_handle_ismaster (
copy,
&description->last_is_master,
description->round_trip_time_msec,
&description->error);
} else {
mongoc_server_description_reset (copy);
}
/* Preserve the error */
memcpy (&copy->error, &description->error, sizeof copy->error);
return copy;
}
/*
*-------------------------------------------------------------------------
*
* mongoc_server_description_filter_stale --
*
* Estimate servers' staleness according to the Server Selection Spec.
* Determines the number of eligible servers, and sets any servers that
* are too stale to NULL in the descriptions set.
*
*-------------------------------------------------------------------------
*/
void
mongoc_server_description_filter_stale (mongoc_server_description_t **sds,
size_t sds_len,
mongoc_server_description_t *primary,
int64_t heartbeat_frequency_ms,
const mongoc_read_prefs_t *read_prefs)
{
int64_t max_staleness_seconds;
size_t i;
int64_t heartbeat_frequency_usec;
int64_t max_last_write_date_usec;
int64_t staleness_usec;
int64_t max_staleness_usec;
if (!read_prefs) {
/* NULL read_prefs is PRIMARY, no maxStalenessSeconds to filter by */
return;
}
max_staleness_seconds =
mongoc_read_prefs_get_max_staleness_seconds (read_prefs);
if (max_staleness_seconds == MONGOC_NO_MAX_STALENESS) {
return;
}
BSON_ASSERT (max_staleness_seconds > 0);
max_staleness_usec = max_staleness_seconds * 1000 * 1000;
heartbeat_frequency_usec = heartbeat_frequency_ms * 1000;
if (primary) {
for (i = 0; i < sds_len; i++) {
if (!sds[i] || sds[i]->type != MONGOC_SERVER_RS_SECONDARY) {
continue;
}
/* See max-staleness.rst for explanation of these formulae. */
staleness_usec =
primary->last_write_date_ms * 1000 +
(sds[i]->last_update_time_usec - primary->last_update_time_usec) -
sds[i]->last_write_date_ms * 1000 + heartbeat_frequency_usec;
if (staleness_usec > max_staleness_usec) {
TRACE ("Rejected stale RSSecondary [%s]",
sds[i]->host.host_and_port);
sds[i] = NULL;
}
}
} else {
/* find max last_write_date */
max_last_write_date_usec = 0;
for (i = 0; i < sds_len; i++) {
if (sds[i] && sds[i]->type == MONGOC_SERVER_RS_SECONDARY) {
max_last_write_date_usec = BSON_MAX (
max_last_write_date_usec, sds[i]->last_write_date_ms * 1000);
}
}
/* use max last_write_date to estimate each secondary's staleness */
for (i = 0; i < sds_len; i++) {
if (!sds[i] || sds[i]->type != MONGOC_SERVER_RS_SECONDARY) {
continue;
}
staleness_usec = max_last_write_date_usec -
sds[i]->last_write_date_ms * 1000 +
heartbeat_frequency_usec;
if (staleness_usec > max_staleness_usec) {
TRACE ("Rejected stale RSSecondary [%s]",
sds[i]->host.host_and_port);
sds[i] = NULL;
}
}
}
}
/*
*-------------------------------------------------------------------------
*
* mongoc_server_description_filter_tags --
*
* Given a set of server descriptions, set to NULL any that don't
* match the the read preference's tag sets.
*
* https://github.com/mongodb/specifications/blob/master/source/server-selection/server-selection.rst#tag-set
*
*-------------------------------------------------------------------------
*/
void
mongoc_server_description_filter_tags (
mongoc_server_description_t **descriptions,
size_t description_len,
const mongoc_read_prefs_t *read_prefs)
{
const bson_t *rp_tags;
bson_iter_t rp_tagset_iter;
bson_iter_t tag_set_iter;
bool *sd_matched = NULL;
bool found;
size_t i;
if (!read_prefs) {
/* NULL read_prefs is PRIMARY, no tags to filter by */
return;
}
rp_tags = mongoc_read_prefs_get_tags (read_prefs);
if (bson_count_keys (rp_tags) == 0) {
/* no tags to filter by */
return;
}
sd_matched = (bool *) bson_malloc0 (sizeof (bool) * description_len);
bson_iter_init (&rp_tagset_iter, rp_tags);
/* for each read preference tag set */
while (bson_iter_next (&rp_tagset_iter)) {
found = false;
for (i = 0; i < description_len; i++) {
if (!descriptions[i]) {
/* NULLed earlier in mongoc_topology_description_suitable_servers */
continue;
}
bson_iter_recurse (&rp_tagset_iter, &tag_set_iter);
sd_matched[i] = _match_tag_set (descriptions[i], &tag_set_iter);
if (sd_matched[i]) {
found = true;
}
}
if (found) {
for (i = 0; i < description_len; i++) {
if (!sd_matched[i] && descriptions[i]) {
TRACE ("Rejected [%s] [%s], doesn't match tags",
mongoc_server_description_type (descriptions[i]),
descriptions[i]->host.host_and_port);
descriptions[i] = NULL;
}
}
goto CLEANUP;
}
}
/* tried each */
for (i = 0; i < description_len; i++) {
if (!sd_matched[i]) {
TRACE ("Rejected [%s] [%s], reached end of tags array without match",
mongoc_server_description_type (descriptions[i]),
descriptions[i]->host.host_and_port);
descriptions[i] = NULL;
}
}
CLEANUP:
bson_free (sd_matched);
}
/*
*-------------------------------------------------------------------------
*
* _match_tag_set --
*
* Check if a server's tags match one tag set, like
* {'tag1': 'value1', 'tag2': 'value2'}.
*
*-------------------------------------------------------------------------
*/
static bool
_match_tag_set (const mongoc_server_description_t *sd,
bson_iter_t *tag_set_iter)
{
bson_iter_t sd_iter;
uint32_t read_pref_tag_len;
uint32_t sd_len;
const char *read_pref_tag;
const char *read_pref_val;
const char *server_val;
while (bson_iter_next (tag_set_iter)) {
/* one {'tag': 'value'} pair from the read preference's tag set */
read_pref_tag = bson_iter_key (tag_set_iter);
read_pref_val = bson_iter_utf8 (tag_set_iter, &read_pref_tag_len);
if (bson_iter_init_find (&sd_iter, &sd->tags, read_pref_tag)) {
/* The server has this tag - does it have the right value? */
server_val = bson_iter_utf8 (&sd_iter, &sd_len);
if (sd_len != read_pref_tag_len ||
memcmp (read_pref_val, server_val, read_pref_tag_len)) {
/* If the values don't match, no match */
return false;
}
} else {
/* If the server description doesn't have that key, no match */
return false;
}
}
return true;
}
-#ifdef MONGOC_ENABLE_COMPRESSION
/*
*--------------------------------------------------------------------------
*
* mongoc_server_description_compressor_id --
*
* Get the compressor id if compression was negotiated.
*
* Returns:
- * The compressor ID, or 0 if none was negotiated.
+ * The compressor ID, or -1 if none was negotiated.
*
*--------------------------------------------------------------------------
*/
int32_t
mongoc_server_description_compressor_id (
const mongoc_server_description_t *description)
{
int id;
bson_iter_t iter;
bson_iter_init (&iter, &description->compressors);
while (bson_iter_next (&iter)) {
id = mongoc_compressor_name_to_id (bson_iter_utf8 (&iter, NULL));
if (id != -1) {
return id;
}
}
- return 0;
+ return -1;
}
-#endif
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-server-description.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-server-description.h
similarity index 97%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-server-description.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-server-description.h
index 972f3487..9e9264ed 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-server-description.h
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-server-description.h
@@ -1,62 +1,60 @@
/*
* Copyright 2014 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MONGOC_SERVER_DESCRIPTION_H
#define MONGOC_SERVER_DESCRIPTION_H
#include <bson.h>
#include "mongoc-macros.h"
#include "mongoc-read-prefs.h"
#include "mongoc-host-list.h"
BSON_BEGIN_DECLS
typedef struct _mongoc_server_description_t mongoc_server_description_t;
MONGOC_EXPORT (void)
mongoc_server_description_destroy (mongoc_server_description_t *description);
MONGOC_EXPORT (mongoc_server_description_t *)
mongoc_server_description_new_copy (
const mongoc_server_description_t *description);
MONGOC_EXPORT (uint32_t)
mongoc_server_description_id (const mongoc_server_description_t *description);
MONGOC_EXPORT (mongoc_host_list_t *)
mongoc_server_description_host (const mongoc_server_description_t *description);
MONGOC_EXPORT (int64_t)
mongoc_server_description_round_trip_time (
const mongoc_server_description_t *description);
MONGOC_EXPORT (const char *)
mongoc_server_description_type (const mongoc_server_description_t *description);
MONGOC_EXPORT (const bson_t *)
mongoc_server_description_ismaster (
const mongoc_server_description_t *description);
-#ifdef MONGOC_ENABLE_COMPRESSION
MONGOC_EXPORT (int32_t)
mongoc_server_description_compressor_id (
const mongoc_server_description_t *description);
-#endif
BSON_END_DECLS
#endif
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-server-stream-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-server-stream-private.h
similarity index 93%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-server-stream-private.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-server-stream-private.h
index a07380cb..834d61d4 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-server-stream-private.h
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-server-stream-private.h
@@ -1,62 +1,63 @@
/*
* Copyright 2015 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MONGOC_SERVER_STREAM_H
#define MONGOC_SERVER_STREAM_H
#include "mongoc-config.h"
#if !defined(MONGOC_COMPILATION)
#error "Only <mongoc.h> can be included directly."
#endif
#include <bson.h>
#include "mongoc-topology-description-private.h"
#include "mongoc-server-description-private.h"
#include "mongoc-stream.h"
BSON_BEGIN_DECLS
typedef struct _mongoc_server_stream_t {
mongoc_topology_description_type_t topology_type;
mongoc_server_description_t *sd; /* owned */
+ bson_t cluster_time; /* owned */
mongoc_stream_t *stream; /* borrowed */
} mongoc_server_stream_t;
mongoc_server_stream_t *
-mongoc_server_stream_new (mongoc_topology_description_type_t topology_type,
+mongoc_server_stream_new (const mongoc_topology_description_t *td,
mongoc_server_description_t *sd,
mongoc_stream_t *stream);
int32_t
mongoc_server_stream_max_bson_obj_size (mongoc_server_stream_t *server_stream);
int32_t
mongoc_server_stream_max_msg_size (mongoc_server_stream_t *server_stream);
int32_t
mongoc_server_stream_max_write_batch_size (
mongoc_server_stream_t *server_stream);
void
mongoc_server_stream_cleanup (mongoc_server_stream_t *server_stream);
BSON_END_DECLS
#endif /* MONGOC_SERVER_STREAM_H */
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-server-stream.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-server-stream.c
similarity index 92%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-server-stream.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-server-stream.c
index abc709e5..305db509 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-server-stream.c
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-server-stream.c
@@ -1,102 +1,104 @@
/*
* Copyright 2015 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "mongoc-cluster-private.h"
#include "mongoc-server-stream-private.h"
#include "mongoc-util-private.h"
#undef MONGOC_LOG_DOMAIN
#define MONGOC_LOG_DOMAIN "server-stream"
mongoc_server_stream_t *
-mongoc_server_stream_new (mongoc_topology_description_type_t topology_type,
+mongoc_server_stream_new (const mongoc_topology_description_t *td,
mongoc_server_description_t *sd,
mongoc_stream_t *stream)
{
mongoc_server_stream_t *server_stream;
BSON_ASSERT (sd);
BSON_ASSERT (stream);
server_stream = bson_malloc (sizeof (mongoc_server_stream_t));
- server_stream->topology_type = topology_type;
+ server_stream->topology_type = td->type;
+ bson_copy_to (&td->cluster_time, &server_stream->cluster_time);
server_stream->sd = sd; /* becomes owned */
server_stream->stream = stream; /* merely borrowed */
return server_stream;
}
void
mongoc_server_stream_cleanup (mongoc_server_stream_t *server_stream)
{
if (server_stream) {
mongoc_server_description_destroy (server_stream->sd);
+ bson_destroy (&server_stream->cluster_time);
bson_free (server_stream);
}
}
/*
*--------------------------------------------------------------------------
*
* mongoc_server_stream_max_bson_obj_size --
*
* Return the max bson object size for the given server stream.
*
*--------------------------------------------------------------------------
*/
int32_t
mongoc_server_stream_max_bson_obj_size (mongoc_server_stream_t *server_stream)
{
return COALESCE (server_stream->sd->max_bson_obj_size,
MONGOC_DEFAULT_BSON_OBJ_SIZE);
}
/*
*--------------------------------------------------------------------------
*
* mongoc_server_stream_max_msg_size --
*
* Return the max message size for the given server stream.
*
*--------------------------------------------------------------------------
*/
int32_t
mongoc_server_stream_max_msg_size (mongoc_server_stream_t *server_stream)
{
return COALESCE (server_stream->sd->max_msg_size,
MONGOC_DEFAULT_MAX_MSG_SIZE);
}
/*
*--------------------------------------------------------------------------
*
* mongoc_server_stream_max_write_batch_size --
*
* Return the max write batch size for the given server stream.
*
*--------------------------------------------------------------------------
*/
int32_t
mongoc_server_stream_max_write_batch_size (
mongoc_server_stream_t *server_stream)
{
return COALESCE (server_stream->sd->max_write_batch_size,
MONGOC_DEFAULT_WRITE_BATCH_SIZE);
}
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-set-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-set-private.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-set-private.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-set-private.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-set.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-set.c
similarity index 95%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-set.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-set.c
index 840adf10..6c05b32d 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-set.c
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-set.c
@@ -1,221 +1,225 @@
/*
* Copyright 2014 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <bson.h>
#include "mongoc-set-private.h"
#undef MONGOC_LOG_DOMAIN
#define MONGOC_LOG_DOMAIN "set"
mongoc_set_t *
mongoc_set_new (size_t nitems, mongoc_set_item_dtor dtor, void *dtor_ctx)
{
mongoc_set_t *set = (mongoc_set_t *) bson_malloc (sizeof (*set));
set->items_allocated = nitems;
set->items = (mongoc_set_item_t *) bson_malloc (sizeof (*set->items) *
set->items_allocated);
set->items_len = 0;
set->dtor = dtor;
set->dtor_ctx = dtor_ctx;
return set;
}
static int
mongoc_set_id_cmp (const void *a_, const void *b_)
{
mongoc_set_item_t *a = (mongoc_set_item_t *) a_;
mongoc_set_item_t *b = (mongoc_set_item_t *) b_;
if (a->id == b->id) {
return 0;
}
return a->id < b->id ? -1 : 1;
}
void
mongoc_set_add (mongoc_set_t *set, uint32_t id, void *item)
{
if (set->items_len >= set->items_allocated) {
set->items_allocated *= 2;
set->items = (mongoc_set_item_t *) bson_realloc (
set->items, sizeof (*set->items) * set->items_allocated);
}
set->items[set->items_len].id = id;
set->items[set->items_len].item = item;
set->items_len++;
if (set->items_len > 1 && set->items[set->items_len - 2].id > id) {
qsort (
set->items, set->items_len, sizeof (*set->items), mongoc_set_id_cmp);
}
}
void
mongoc_set_rm (mongoc_set_t *set, uint32_t id)
{
mongoc_set_item_t *ptr;
mongoc_set_item_t key;
int i;
key.id = id;
ptr = (mongoc_set_item_t *) bsearch (
&key, set->items, set->items_len, sizeof (key), mongoc_set_id_cmp);
if (ptr) {
- set->dtor (ptr->item, set->dtor_ctx);
+ if (set->dtor) {
+ set->dtor (ptr->item, set->dtor_ctx);
+ }
i = ptr - set->items;
if (i != set->items_len - 1) {
memmove (set->items + i,
set->items + i + 1,
(set->items_len - (i + 1)) * sizeof (key));
}
set->items_len--;
}
}
void *
mongoc_set_get (mongoc_set_t *set, uint32_t id)
{
mongoc_set_item_t *ptr;
mongoc_set_item_t key;
key.id = id;
ptr = (mongoc_set_item_t *) bsearch (
&key, set->items, set->items_len, sizeof (key), mongoc_set_id_cmp);
return ptr ? ptr->item : NULL;
}
void *
mongoc_set_get_item (mongoc_set_t *set, int idx)
{
BSON_ASSERT (set);
BSON_ASSERT (idx < set->items_len);
return set->items[idx].item;
}
void *
mongoc_set_get_item_and_id (mongoc_set_t *set, int idx, uint32_t *id /* OUT */)
{
BSON_ASSERT (set);
BSON_ASSERT (id);
BSON_ASSERT (idx < set->items_len);
*id = set->items[idx].id;
return set->items[idx].item;
}
void
mongoc_set_destroy (mongoc_set_t *set)
{
int i;
- for (i = 0; i < set->items_len; i++) {
- set->dtor (set->items[i].item, set->dtor_ctx);
+ if (set->dtor) {
+ for (i = 0; i < set->items_len; i++) {
+ set->dtor (set->items[i].item, set->dtor_ctx);
+ }
}
bson_free (set->items);
bson_free (set);
}
void
mongoc_set_for_each (mongoc_set_t *set, mongoc_set_for_each_cb_t cb, void *ctx)
{
size_t i;
mongoc_set_item_t *old_set;
size_t items_len;
items_len = set->items_len;
/* prevent undefined behavior of memcpy(NULL) */
if (items_len == 0) {
return;
}
old_set = (mongoc_set_item_t *) bson_malloc (sizeof (*old_set) * items_len);
memcpy (old_set, set->items, sizeof (*old_set) * items_len);
for (i = 0; i < items_len; i++) {
if (!cb (old_set[i].item, ctx)) {
break;
}
}
bson_free (old_set);
}
static mongoc_set_item_t *
_mongoc_set_find (mongoc_set_t *set, mongoc_set_for_each_cb_t cb, void *ctx)
{
size_t i;
size_t items_len;
mongoc_set_item_t *item;
items_len = set->items_len;
for (i = 0; i < items_len; i++) {
item = &set->items[i];
if (cb (item->item, ctx)) {
return item;
}
}
return NULL;
}
void *
mongoc_set_find_item (mongoc_set_t *set, mongoc_set_for_each_cb_t cb, void *ctx)
{
mongoc_set_item_t *item;
if ((item = _mongoc_set_find (set, cb, ctx))) {
return item->item;
}
return NULL;
}
uint32_t
mongoc_set_find_id (mongoc_set_t *set, mongoc_set_for_each_cb_t cb, void *ctx)
{
mongoc_set_item_t *item;
if ((item = _mongoc_set_find (set, cb, ctx))) {
return item->id;
}
return 0;
}
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-socket-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-socket-private.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-socket-private.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-socket-private.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-socket.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-socket.c
similarity index 99%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-socket.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-socket.c
index a3c78a11..4da230b6 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-socket.c
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-socket.c
@@ -1,1536 +1,1542 @@
/*
* Copyright 2014 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <errno.h>
#include <string.h>
#include "mongoc-counters-private.h"
#include "mongoc-errno-private.h"
#include "mongoc-socket-private.h"
#include "mongoc-host-list.h"
#include "mongoc-socket-private.h"
#include "mongoc-trace-private.h"
#ifdef _WIN32
#include <Mstcpip.h>
+#include <process.h>
#endif
#undef MONGOC_LOG_DOMAIN
#define MONGOC_LOG_DOMAIN "socket"
#define OPERATION_EXPIRED(expire_at) \
((expire_at >= 0) && (expire_at < (bson_get_monotonic_time ())))
/* either struct sockaddr or void, depending on platform */
typedef MONGOC_SOCKET_ARG2 mongoc_sockaddr_t;
/*
*--------------------------------------------------------------------------
*
* _mongoc_socket_capture_errno --
*
* Save the errno state for contextual use.
*
* Returns:
* None.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
static void
_mongoc_socket_capture_errno (mongoc_socket_t *sock) /* IN */
{
#ifdef _WIN32
errno = sock->errno_ = WSAGetLastError ();
#else
sock->errno_ = errno;
#endif
TRACE ("setting errno: %d %s", sock->errno_, strerror (sock->errno_));
}
/*
*--------------------------------------------------------------------------
*
* _mongoc_socket_setnonblock --
*
* A helper to set a socket in nonblocking mode.
*
* Returns:
* true if successful; otherwise false.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
static bool
#ifdef _WIN32
_mongoc_socket_setnonblock (SOCKET sd)
#else
_mongoc_socket_setnonblock (int sd)
#endif
{
#ifdef _WIN32
u_long io_mode = 1;
return (NO_ERROR == ioctlsocket (sd, FIONBIO, &io_mode));
#else
int flags;
flags = fcntl (sd, F_GETFL, sd);
return (-1 != fcntl (sd, F_SETFL, (flags | O_NONBLOCK)));
#endif
}
/*
*--------------------------------------------------------------------------
*
* _mongoc_socket_wait --
*
* A single socket poll helper.
*
* @events: in most cases should be POLLIN or POLLOUT.
*
* @expire_at should be an absolute time at which to expire using
* the monotonic clock (bson_get_monotonic_time(), which is in
* microseconds). Or zero to not block at all. Or -1 to block
* forever.
*
* Returns:
* true if an event matched. otherwise false.
* a timeout will return false.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
static bool
_mongoc_socket_wait (mongoc_socket_t *sock, /* IN */
int events, /* IN */
int64_t expire_at) /* IN */
{
#ifdef _WIN32
fd_set read_fds;
fd_set write_fds;
fd_set error_fds;
struct timeval timeout_tv;
#else
struct pollfd pfd;
#endif
int ret;
int timeout;
int64_t now;
ENTRY;
BSON_ASSERT (sock);
BSON_ASSERT (events);
#ifdef _WIN32
FD_ZERO (&read_fds);
FD_ZERO (&write_fds);
FD_ZERO (&error_fds);
if (events & POLLIN) {
FD_SET (sock->sd, &read_fds);
}
if (events & POLLOUT) {
FD_SET (sock->sd, &write_fds);
}
FD_SET (sock->sd, &error_fds);
#else
pfd.fd = sock->sd;
pfd.events = events | POLLERR | POLLHUP;
pfd.revents = 0;
#endif
now = bson_get_monotonic_time ();
for (;;) {
if (expire_at < 0) {
timeout = -1;
} else if (expire_at == 0) {
timeout = 0;
} else {
timeout = (int) ((expire_at - now) / 1000L);
if (timeout < 0) {
timeout = 0;
}
}
#ifdef _WIN32
if (timeout == -1) {
/* not WSAPoll: daniel.haxx.se/blog/2012/10/10/wsapoll-is-broken */
ret = select (0 /*unused*/, &read_fds, &write_fds, &error_fds, NULL);
} else {
timeout_tv.tv_sec = timeout / 1000;
timeout_tv.tv_usec = (timeout % 1000) * 1000;
ret = select (
0 /*unused*/, &read_fds, &write_fds, &error_fds, &timeout_tv);
}
if (ret == SOCKET_ERROR) {
_mongoc_socket_capture_errno (sock);
ret = -1;
} else if (FD_ISSET (sock->sd, &error_fds)) {
errno = WSAECONNRESET;
ret = -1;
}
#else
ret = poll (&pfd, 1, timeout);
#endif
if (ret > 0) {
/* Something happened, so return that */
#ifdef _WIN32
return (FD_ISSET (sock->sd, &read_fds)
|| FD_ISSET (sock->sd, &write_fds));
#else
RETURN (0 != (pfd.revents & events));
#endif
} else if (ret < 0) {
/* poll itself failed */
TRACE ("errno is: %d", errno);
if (MONGOC_ERRNO_IS_AGAIN (errno)) {
now = bson_get_monotonic_time ();
if (expire_at < now) {
_mongoc_socket_capture_errno (sock);
RETURN (false);
} else {
continue;
}
} else {
/* poll failed for some non-transient reason */
_mongoc_socket_capture_errno (sock);
RETURN (false);
}
} else {
/* ret == 0, poll timed out */
#ifdef _WIN32
sock->errno_ = timeout ? WSAETIMEDOUT : EAGAIN;
#else
sock->errno_ = timeout ? ETIMEDOUT : EAGAIN;
#endif
RETURN (false);
}
}
}
/*
*--------------------------------------------------------------------------
*
* mongoc_socket_poll --
*
* A multi-socket poll helper.
*
* @expire_at should be an absolute time at which to expire using
* the monotonic clock (bson_get_monotonic_time(), which is in
* microseconds). Or zero to not block at all. Or -1 to block
* forever.
*
* Returns:
* The number of sockets ready.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
ssize_t
mongoc_socket_poll (mongoc_socket_poll_t *sds, /* IN */
size_t nsds, /* IN */
int32_t timeout) /* IN */
{
#ifdef _WIN32
fd_set read_fds;
fd_set write_fds;
fd_set error_fds;
struct timeval timeout_tv;
#else
struct pollfd *pfds;
#endif
int ret;
int i;
ENTRY;
BSON_ASSERT (sds);
#ifdef _WIN32
FD_ZERO (&read_fds);
FD_ZERO (&write_fds);
FD_ZERO (&error_fds);
for (i = 0; i < nsds; i++) {
if (sds[i].events & POLLIN) {
FD_SET (sds[i].socket->sd, &read_fds);
}
if (sds[i].events & POLLOUT) {
FD_SET (sds[i].socket->sd, &write_fds);
}
FD_SET (sds[i].socket->sd, &error_fds);
}
timeout_tv.tv_sec = timeout / 1000;
timeout_tv.tv_usec = (timeout % 1000) * 1000;
/* not WSAPoll: daniel.haxx.se/blog/2012/10/10/wsapoll-is-broken */
ret = select (0 /*unused*/, &read_fds, &write_fds, &error_fds, &timeout_tv);
if (ret == SOCKET_ERROR) {
errno = WSAGetLastError ();
return -1;
}
for (i = 0; i < nsds; i++) {
if (FD_ISSET (sds[i].socket->sd, &read_fds)) {
sds[i].revents = POLLIN;
} else if (FD_ISSET (sds[i].socket->sd, &write_fds)) {
sds[i].revents = POLLOUT;
} else if (FD_ISSET (sds[i].socket->sd, &error_fds)) {
sds[i].revents = POLLHUP;
} else {
sds[i].revents = 0;
}
}
#else
pfds = (struct pollfd *) bson_malloc (sizeof (*pfds) * nsds);
for (i = 0; i < nsds; i++) {
pfds[i].fd = sds[i].socket->sd;
pfds[i].events = sds[i].events | POLLERR | POLLHUP;
pfds[i].revents = 0;
}
ret = poll (pfds, nsds, timeout);
for (i = 0; i < nsds; i++) {
sds[i].revents = pfds[i].revents;
}
bson_free (pfds);
#endif
return ret;
}
/* https://jira.mongodb.org/browse/CDRIVER-2176 */
#define MONGODB_KEEPALIVEINTVL 10
#define MONGODB_KEEPIDLE 300
#define MONGODB_KEEPALIVECNT 9
#ifdef _WIN32
static void
_mongoc_socket_setkeepalive_windows (SOCKET sd)
{
- BOOL optval = 1;
struct tcp_keepalive keepalive;
DWORD lpcbBytesReturned = 0;
HKEY hKey;
DWORD type;
DWORD data;
DWORD data_size = sizeof data;
const char *reg_key =
"SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters";
keepalive.onoff = true;
keepalive.keepalivetime = MONGODB_KEEPIDLE * 1000;
keepalive.keepaliveinterval = MONGODB_KEEPALIVEINTVL * 1000;
/*
* Windows hardcodes probes to 10:
* https://msdn.microsoft.com/en-us/library/windows/desktop/dd877220(v=vs.85).aspx
* "On Windows Vista and later, the number of keep-alive probes (data
* retransmissions) is set to 10 and cannot be changed."
*
* Note that win2k (and seeminly all versions thereafter) do not set the
* registry value by default so there is no way to derive the default value
* programmatically. It is however listed in the docs. A user can however
* change the default value by setting the registry values.
*/
if (RegOpenKeyExA (HKEY_LOCAL_MACHINE, reg_key, 0, KEY_QUERY_VALUE, &hKey) ==
ERROR_SUCCESS) {
/* https://technet.microsoft.com/en-us/library/cc957549.aspx */
DWORD default_keepalivetime = 7200000; /* 2 hours */
/* https://technet.microsoft.com/en-us/library/cc957548.aspx */
DWORD default_keepaliveinterval = 1000; /* 1 second */
if (RegQueryValueEx (
hKey, "KeepAliveTime", NULL, &type, (LPBYTE) &data, &data_size) ==
ERROR_SUCCESS) {
if (type == REG_DWORD && data < keepalive.keepalivetime) {
keepalive.keepalivetime = data;
}
} else if (default_keepalivetime < keepalive.keepalivetime) {
keepalive.keepalivetime = default_keepalivetime;
}
if (RegQueryValueEx (hKey,
"KeepAliveInterval",
NULL,
&type,
(LPBYTE) &data,
&data_size) == ERROR_SUCCESS) {
if (type == REG_DWORD && data < keepalive.keepaliveinterval) {
keepalive.keepaliveinterval = data;
}
} else if (default_keepaliveinterval < keepalive.keepaliveinterval) {
keepalive.keepaliveinterval = default_keepaliveinterval;
}
RegCloseKey (hKey);
}
if (WSAIoctl (sd,
SIO_KEEPALIVE_VALS,
&keepalive,
sizeof keepalive,
NULL,
0,
&lpcbBytesReturned,
NULL,
NULL) == SOCKET_ERROR) {
- TRACE ("Could not set keepalive values");
+ TRACE ("%s", "Could not set keepalive values");
} else {
- TRACE ("KeepAlive values updated");
+ TRACE ("%s", "KeepAlive values updated");
TRACE ("KeepAliveTime: %d", keepalive.keepalivetime);
TRACE ("KeepAliveInterval: %d", keepalive.keepaliveinterval);
}
}
#else
#ifdef MONGOC_TRACE
static const char *
_mongoc_socket_sockopt_value_to_name (int value)
{
switch (value) {
#ifdef TCP_KEEPIDLE
case TCP_KEEPIDLE:
return "TCP_KEEPIDLE";
#endif
#ifdef TCP_KEEPALIVE
case TCP_KEEPALIVE:
return "TCP_KEEPALIVE";
#endif
#ifdef TCP_KEEPINTVL
case TCP_KEEPINTVL:
return "TCP_KEEPINTVL";
#endif
#ifdef TCP_KEEPCNT
case TCP_KEEPCNT:
return "TCP_KEEPCNT";
#endif
default:
MONGOC_WARNING ("Don't know what socketopt %d is", value);
return "Unknown option name";
}
}
#endif
static void
_mongoc_socket_set_sockopt_if_less (int sd, int name, int value)
{
int optval = 1;
mongoc_socklen_t optlen;
optlen = sizeof optval;
if (getsockopt (sd, IPPROTO_TCP, name, (char *) &optval, &optlen)) {
TRACE ("Getting '%s' failed, errno: %d",
_mongoc_socket_sockopt_value_to_name (name),
errno);
} else {
TRACE ("'%s' is %d, target value is %d",
_mongoc_socket_sockopt_value_to_name (name),
optval,
value);
if (optval > value) {
optval = value;
if (setsockopt (
sd, IPPROTO_TCP, name, (char *) &optval, sizeof optval)) {
TRACE ("Setting '%s' failed, errno: %d",
_mongoc_socket_sockopt_value_to_name (name),
errno);
} else {
TRACE ("'%s' value changed to %d",
_mongoc_socket_sockopt_value_to_name (name),
optval);
}
}
}
}
static void
_mongoc_socket_setkeepalive_nix (int sd)
{
#if defined(TCP_KEEPIDLE)
_mongoc_socket_set_sockopt_if_less (sd, TCP_KEEPIDLE, MONGODB_KEEPIDLE);
#elif defined(TCP_KEEPALIVE)
_mongoc_socket_set_sockopt_if_less (sd, TCP_KEEPALIVE, MONGODB_KEEPIDLE);
#else
TRACE ("%s", "Neither TCP_KEEPIDLE nor TCP_KEEPALIVE available");
#endif
#ifdef TCP_KEEPINTVL
_mongoc_socket_set_sockopt_if_less (
sd, TCP_KEEPINTVL, MONGODB_KEEPALIVEINTVL);
#else
TRACE ("%s", "TCP_KEEPINTVL not available");
#endif
#ifdef TCP_KEEPCNT
_mongoc_socket_set_sockopt_if_less (sd, TCP_KEEPCNT, MONGODB_KEEPALIVECNT);
#else
TRACE ("%s", "TCP_KEEPCNT not available");
#endif
}
#endif
static void
#ifdef _WIN32
_mongoc_socket_setkeepalive (SOCKET sd) /* IN */
#else
_mongoc_socket_setkeepalive (int sd) /* IN */
#endif
{
#ifdef SO_KEEPALIVE
int optval = 1;
ENTRY;
#ifdef SO_KEEPALIVE
if (!setsockopt (
sd, SOL_SOCKET, SO_KEEPALIVE, (char *) &optval, sizeof optval)) {
TRACE ("%s", "Setting SO_KEEPALIVE");
#ifdef _WIN32
_mongoc_socket_setkeepalive_windows (sd);
#else
_mongoc_socket_setkeepalive_nix (sd);
#endif
} else {
TRACE ("%s", "Failed setting SO_KEEPALIVE");
}
#else
TRACE ("%s", "SO_KEEPALIVE not available");
#endif
EXIT;
#endif
}
static bool
#ifdef _WIN32
_mongoc_socket_setnodelay (SOCKET sd) /* IN */
#else
_mongoc_socket_setnodelay (int sd) /* IN */
#endif
{
#ifdef _WIN32
BOOL optval = 1;
#else
int optval = 1;
#endif
int ret;
ENTRY;
errno = 0;
ret = setsockopt (
sd, IPPROTO_TCP, TCP_NODELAY, (char *) &optval, sizeof optval);
#ifdef _WIN32
if (ret == SOCKET_ERROR) {
MONGOC_WARNING ("WSAGetLastError(): %d", (int) WSAGetLastError ());
}
#endif
RETURN (ret == 0);
}
/*
*--------------------------------------------------------------------------
*
* mongoc_socket_errno --
*
* Returns the last error on the socket.
*
* Returns:
* An integer errno, or 0 on no error.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
int
mongoc_socket_errno (mongoc_socket_t *sock) /* IN */
{
BSON_ASSERT (sock);
TRACE ("Current errno: %d", sock->errno_);
return sock->errno_;
}
/*
*--------------------------------------------------------------------------
*
* _mongoc_socket_errno_is_again --
*
* Check to see if we should attempt to make further progress
* based on the error of the last operation.
*
* Returns:
* true if we should try again. otherwise false.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
static bool
_mongoc_socket_errno_is_again (mongoc_socket_t *sock) /* IN */
{
TRACE ("errno is: %d", sock->errno_);
return MONGOC_ERRNO_IS_AGAIN (sock->errno_);
}
/*
*--------------------------------------------------------------------------
*
* mongoc_socket_accept --
*
* Wrapper for BSD socket accept(). Handles portability between
* BSD sockets and WinSock2 on Windows Vista and newer.
*
* Returns:
* NULL upon failure to accept or timeout.
* A newly allocated mongoc_socket_t on success.
*
* Side effects:
* *port contains the client port number.
*
*--------------------------------------------------------------------------
*/
mongoc_socket_t *
mongoc_socket_accept (mongoc_socket_t *sock, /* IN */
int64_t expire_at) /* IN */
{
return mongoc_socket_accept_ex (sock, expire_at, NULL);
}
/*
*--------------------------------------------------------------------------
*
* mongoc_socket_accept_ex --
*
* Private synonym for mongoc_socket_accept, returning client port.
*
* Returns:
* NULL upon failure to accept or timeout.
* A newly allocated mongoc_socket_t on success.
*
* Side effects:
* *port contains the client port number.
*
*--------------------------------------------------------------------------
*/
mongoc_socket_t *
mongoc_socket_accept_ex (mongoc_socket_t *sock, /* IN */
int64_t expire_at, /* IN */
uint16_t *port) /* OUT */
{
mongoc_socket_t *client;
struct sockaddr_in addr = {0};
mongoc_socklen_t addrlen = sizeof addr;
bool try_again = false;
bool failed = false;
#ifdef _WIN32
SOCKET sd;
#else
int sd;
#endif
ENTRY;
BSON_ASSERT (sock);
again:
errno = 0;
sd = accept (sock->sd, (mongoc_sockaddr_t *) &addr, &addrlen);
_mongoc_socket_capture_errno (sock);
#ifdef _WIN32
failed = (sd == INVALID_SOCKET);
#else
failed = (sd == -1);
#endif
try_again = (failed && _mongoc_socket_errno_is_again (sock));
if (failed && try_again) {
if (_mongoc_socket_wait (sock, POLLIN, expire_at)) {
GOTO (again);
}
RETURN (NULL);
} else if (failed) {
RETURN (NULL);
} else if (!_mongoc_socket_setnonblock (sd)) {
#ifdef _WIN32
closesocket (sd);
#else
close (sd);
#endif
RETURN (NULL);
}
client = (mongoc_socket_t *) bson_malloc0 (sizeof *client);
client->sd = sd;
if (port) {
*port = ntohs (addr.sin_port);
}
if (!_mongoc_socket_setnodelay (client->sd)) {
MONGOC_WARNING ("Failed to enable TCP_NODELAY.");
}
RETURN (client);
}
/*
*--------------------------------------------------------------------------
*
* mongo_socket_bind --
*
* A wrapper around bind().
*
* Returns:
* 0 on success, -1 on failure and errno is set.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
int
mongoc_socket_bind (mongoc_socket_t *sock, /* IN */
const struct sockaddr *addr, /* IN */
mongoc_socklen_t addrlen) /* IN */
{
int ret;
ENTRY;
BSON_ASSERT (sock);
BSON_ASSERT (addr);
BSON_ASSERT (addrlen);
ret = bind (sock->sd, addr, addrlen);
_mongoc_socket_capture_errno (sock);
RETURN (ret);
}
int
mongoc_socket_close (mongoc_socket_t *sock) /* IN */
{
bool owned;
ENTRY;
BSON_ASSERT (sock);
- owned = (sock->pid == (int) getpid ());
-
#ifdef _WIN32
+ owned = (sock->pid == (int) _getpid ());
+
if (sock->sd != INVALID_SOCKET) {
if (owned) {
shutdown (sock->sd, SD_BOTH);
}
if (0 == closesocket (sock->sd)) {
sock->sd = INVALID_SOCKET;
} else {
_mongoc_socket_capture_errno (sock);
RETURN (-1);
}
}
RETURN (0);
#else
+ owned = (sock->pid == (int) getpid ());
+
if (sock->sd != -1) {
if (owned) {
shutdown (sock->sd, SHUT_RDWR);
}
if (0 == close (sock->sd)) {
sock->sd = -1;
} else {
_mongoc_socket_capture_errno (sock);
RETURN (-1);
}
}
RETURN (0);
#endif
}
/*
*--------------------------------------------------------------------------
*
* mongoc_socket_connect --
*
* Performs a socket connection but will fail if @expire_at is
* reached by the monotonic clock.
*
* Returns:
* 0 if success, otherwise -1 and errno is set.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
int
mongoc_socket_connect (mongoc_socket_t *sock, /* IN */
const struct sockaddr *addr, /* IN */
mongoc_socklen_t addrlen, /* IN */
int64_t expire_at) /* IN */
{
bool try_again = false;
bool failed = false;
int ret;
int optval;
/* getsockopt parameter types vary, we check in CheckCompiler.m4 */
mongoc_socklen_t optlen = (mongoc_socklen_t) sizeof optval;
ENTRY;
BSON_ASSERT (sock);
BSON_ASSERT (addr);
BSON_ASSERT (addrlen);
ret = connect (sock->sd, addr, addrlen);
#ifdef _WIN32
if (ret == SOCKET_ERROR) {
#else
if (ret == -1) {
#endif
_mongoc_socket_capture_errno (sock);
failed = true;
try_again = _mongoc_socket_errno_is_again (sock);
}
if (failed && try_again) {
if (_mongoc_socket_wait (sock, POLLOUT, expire_at)) {
optval = -1;
ret = getsockopt (
sock->sd, SOL_SOCKET, SO_ERROR, (char *) &optval, &optlen);
if ((ret == 0) && (optval == 0)) {
RETURN (0);
} else {
errno = sock->errno_ = optval;
}
}
RETURN (-1);
} else if (failed) {
RETURN (-1);
} else {
RETURN (0);
}
}
/*
*--------------------------------------------------------------------------
*
* mongoc_socket_destroy --
*
* Cleanup after a mongoc_socket_t structure, possibly closing
* underlying sockets.
*
* Returns:
* None.
*
* Side effects:
* @sock is freed and should be considered invalid.
*
*--------------------------------------------------------------------------
*/
void
mongoc_socket_destroy (mongoc_socket_t *sock) /* IN */
{
if (sock) {
mongoc_socket_close (sock);
bson_free (sock);
}
}
/*
*--------------------------------------------------------------------------
*
* mongoc_socket_listen --
*
* Listen for incoming requests with a backlog up to @backlog.
*
* If @backlog is zero, a sensible default will be chosen.
*
* Returns:
* true if successful; otherwise false.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
int
mongoc_socket_listen (mongoc_socket_t *sock, /* IN */
unsigned int backlog) /* IN */
{
int ret;
ENTRY;
BSON_ASSERT (sock);
if (backlog == 0) {
backlog = 10;
}
ret = listen (sock->sd, backlog);
_mongoc_socket_capture_errno (sock);
RETURN (ret);
}
/*
*--------------------------------------------------------------------------
*
* mongoc_socket_new --
*
* Create a new socket and store the current process id on it.
*
* Free the result with mongoc_socket_destroy().
*
* Returns:
* A newly allocated socket.
* NULL on failure.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
mongoc_socket_t *
mongoc_socket_new (int domain, /* IN */
int type, /* IN */
int protocol) /* IN */
{
mongoc_socket_t *sock;
#ifdef _WIN32
SOCKET sd;
#else
int sd;
#endif
ENTRY;
sd = socket (domain, type, protocol);
#ifdef _WIN32
if (sd == INVALID_SOCKET) {
#else
if (sd == -1) {
#endif
RETURN (NULL);
}
if (!_mongoc_socket_setnonblock (sd)) {
GOTO (fail);
}
if (domain != AF_UNIX) {
if (!_mongoc_socket_setnodelay (sd)) {
MONGOC_WARNING ("Failed to enable TCP_NODELAY.");
}
_mongoc_socket_setkeepalive (sd);
}
sock = (mongoc_socket_t *) bson_malloc0 (sizeof *sock);
sock->sd = sd;
sock->domain = domain;
+#ifdef _WIN32
+ sock->pid = (int) _getpid ();
+#else
sock->pid = (int) getpid ();
+#endif
RETURN (sock);
fail:
#ifdef _WIN32
closesocket (sd);
#else
close (sd);
#endif
RETURN (NULL);
}
/*
*--------------------------------------------------------------------------
*
* mongoc_socket_recv --
*
* A portable wrapper around recv() that also respects an absolute
* timeout.
*
* @expire_at is 0 for no blocking, -1 for infinite blocking,
* or a time using the monotonic clock to expire. Calculate this
* using bson_get_monotonic_time() + N_MICROSECONDS.
*
* Returns:
* The number of bytes received on success.
* 0 on end of stream.
* -1 on failure.
*
* Side effects:
* @buf will be read into.
*
*--------------------------------------------------------------------------
*/
ssize_t
mongoc_socket_recv (mongoc_socket_t *sock, /* IN */
void *buf, /* OUT */
size_t buflen, /* IN */
int flags, /* IN */
int64_t expire_at) /* IN */
{
ssize_t ret = 0;
bool failed = false;
ENTRY;
BSON_ASSERT (sock);
BSON_ASSERT (buf);
BSON_ASSERT (buflen);
again:
sock->errno_ = 0;
#ifdef _WIN32
ret = recv (sock->sd, (char *) buf, (int) buflen, flags);
failed = (ret == SOCKET_ERROR);
#else
ret = recv (sock->sd, buf, buflen, flags);
failed = (ret == -1);
#endif
if (failed) {
_mongoc_socket_capture_errno (sock);
if (_mongoc_socket_errno_is_again (sock) &&
_mongoc_socket_wait (sock, POLLIN, expire_at)) {
GOTO (again);
}
}
if (failed) {
RETURN (-1);
}
mongoc_counter_streams_ingress_add (ret);
RETURN (ret);
}
/*
*--------------------------------------------------------------------------
*
* mongoc_socket_setsockopt --
*
* A wrapper around setsockopt().
*
* Returns:
* 0 on success, -1 on failure.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
int
mongoc_socket_setsockopt (mongoc_socket_t *sock, /* IN */
int level, /* IN */
int optname, /* IN */
const void *optval, /* IN */
mongoc_socklen_t optlen) /* IN */
{
int ret;
ENTRY;
BSON_ASSERT (sock);
ret = setsockopt (sock->sd, level, optname, optval, optlen);
_mongoc_socket_capture_errno (sock);
RETURN (ret);
}
/*
*--------------------------------------------------------------------------
*
* mongoc_socket_send --
*
* A simplified wrapper around mongoc_socket_sendv().
*
* @expire_at is 0 for no blocking, -1 for infinite blocking,
* or a time using the monotonic clock to expire. Calculate this
* using bson_get_monotonic_time() + N_MICROSECONDS.
*
* Returns:
* -1 on failure. number of bytes written on success.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
ssize_t
mongoc_socket_send (mongoc_socket_t *sock, /* IN */
const void *buf, /* IN */
size_t buflen, /* IN */
int64_t expire_at) /* IN */
{
mongoc_iovec_t iov;
BSON_ASSERT (sock);
BSON_ASSERT (buf);
BSON_ASSERT (buflen);
iov.iov_base = (void *) buf;
iov.iov_len = buflen;
return mongoc_socket_sendv (sock, &iov, 1, expire_at);
}
/*
*--------------------------------------------------------------------------
*
* _mongoc_socket_try_sendv_slow --
*
* A slow variant of _mongoc_socket_try_sendv() that sends each
* iovec entry one by one. This can happen if we hit EMSGSIZE
* with sendmsg() on various POSIX systems or WSASend()+WSAEMSGSIZE
* on Windows.
*
* Returns:
* the number of bytes sent or -1 and errno is set.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
static ssize_t
_mongoc_socket_try_sendv_slow (mongoc_socket_t *sock, /* IN */
mongoc_iovec_t *iov, /* IN */
size_t iovcnt) /* IN */
{
ssize_t ret = 0;
size_t i;
ssize_t wrote;
ENTRY;
BSON_ASSERT (sock);
BSON_ASSERT (iov);
BSON_ASSERT (iovcnt);
for (i = 0; i < iovcnt; i++) {
wrote = send (sock->sd, iov[i].iov_base, iov[i].iov_len, 0);
#ifdef _WIN32
if (wrote == SOCKET_ERROR) {
#else
if (wrote == -1) {
#endif
_mongoc_socket_capture_errno (sock);
if (!_mongoc_socket_errno_is_again (sock)) {
RETURN (-1);
}
RETURN (ret ? ret : -1);
}
ret += wrote;
if (wrote != iov[i].iov_len) {
RETURN (ret);
}
}
RETURN (ret);
}
/*
*--------------------------------------------------------------------------
*
* _mongoc_socket_try_sendv --
*
* Helper used by mongoc_socket_sendv() to try to write as many
* bytes to the underlying socket until the socket buffer is full.
*
* This is performed in a non-blocking fashion.
*
* Returns:
* -1 on failure. the number of bytes written on success.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
static ssize_t
_mongoc_socket_try_sendv (mongoc_socket_t *sock, /* IN */
mongoc_iovec_t *iov, /* IN */
size_t iovcnt) /* IN */
{
#ifdef _WIN32
DWORD dwNumberofBytesSent = 0;
int ret;
#else
struct msghdr msg;
ssize_t ret;
#endif
ENTRY;
BSON_ASSERT (sock);
BSON_ASSERT (iov);
BSON_ASSERT (iovcnt);
DUMP_IOVEC (sendbuf, iov, iovcnt);
#ifdef _WIN32
ret = WSASend (
sock->sd, (LPWSABUF) iov, iovcnt, &dwNumberofBytesSent, 0, NULL, NULL);
TRACE ("WSASend sent: %ld (out of: %ld), ret: %d",
dwNumberofBytesSent,
iov->iov_len,
ret);
#else
memset (&msg, 0, sizeof msg);
msg.msg_iov = iov;
msg.msg_iovlen = (int) iovcnt;
ret = sendmsg (sock->sd,
&msg,
#ifdef MSG_NOSIGNAL
MSG_NOSIGNAL);
#else
0);
#endif
TRACE ("Send %ld out of %ld bytes", ret, iov->iov_len);
#endif
#ifdef _WIN32
if (ret == SOCKET_ERROR) {
#else
if (ret == -1) {
#endif
_mongoc_socket_capture_errno (sock);
/*
* Check to see if we have sent an iovec too large for sendmsg to
* complete. If so, we need to fallback to the slow path of multiple
* send() commands.
*/
#ifdef _WIN32
if (mongoc_socket_errno (sock) == WSAEMSGSIZE) {
#else
if (mongoc_socket_errno (sock) == EMSGSIZE) {
#endif
RETURN (_mongoc_socket_try_sendv_slow (sock, iov, iovcnt));
}
RETURN (-1);
}
#ifdef _WIN32
RETURN (dwNumberofBytesSent);
#else
RETURN (ret);
#endif
}
/*
*--------------------------------------------------------------------------
*
* mongoc_socket_sendv --
*
* A wrapper around using sendmsg() to send an iovec.
* This also deals with the structure differences between
* WSABUF and struct iovec.
*
* @expire_at is 0 for no blocking, -1 for infinite blocking,
* or a time using the monotonic clock to expire. Calculate this
* using bson_get_monotonic_time() + N_MICROSECONDS.
*
* Returns:
* -1 on failure.
* the number of bytes written on success.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
ssize_t
mongoc_socket_sendv (mongoc_socket_t *sock, /* IN */
mongoc_iovec_t *in_iov, /* IN */
size_t iovcnt, /* IN */
int64_t expire_at) /* IN */
{
ssize_t ret = 0;
ssize_t sent;
size_t cur = 0;
mongoc_iovec_t *iov;
ENTRY;
BSON_ASSERT (sock);
BSON_ASSERT (in_iov);
BSON_ASSERT (iovcnt);
iov = bson_malloc (sizeof (*iov) * iovcnt);
memcpy (iov, in_iov, sizeof (*iov) * iovcnt);
for (;;) {
sent = _mongoc_socket_try_sendv (sock, &iov[cur], iovcnt - cur);
TRACE (
"Sent %ld (of %ld) out of iovcnt=%ld", sent, iov[cur].iov_len, iovcnt);
/*
* If we failed with anything other than EAGAIN or EWOULDBLOCK,
* we should fail immediately as there is another issue with the
* underlying socket.
*/
if (sent == -1) {
if (!_mongoc_socket_errno_is_again (sock)) {
ret = -1;
GOTO (CLEANUP);
}
}
/*
* Update internal stream counters.
*/
if (sent > 0) {
ret += sent;
mongoc_counter_streams_egress_add (sent);
/*
* Subtract the sent amount from what we still need to send.
*/
while ((cur < iovcnt) && (sent >= (ssize_t) iov[cur].iov_len)) {
TRACE ("still got bytes left: sent -= iov_len: %ld -= %ld",
sent,
iov[cur].iov_len);
sent -= iov[cur++].iov_len;
}
/*
* Check if that made us finish all of the iovecs. If so, we are done
* sending data over the socket.
*/
if (cur == iovcnt) {
TRACE ("%s", "Finished the iovecs");
break;
}
/*
* Increment the current iovec buffer to its proper offset and adjust
* the number of bytes to write.
*/
TRACE ("Seeked io_base+%ld", sent);
TRACE (
"Subtracting iov_len -= sent; %ld -= %ld", iov[cur].iov_len, sent);
iov[cur].iov_base = ((char *) iov[cur].iov_base) + sent;
iov[cur].iov_len -= sent;
TRACE ("iov_len remaining %ld", iov[cur].iov_len);
BSON_ASSERT (iovcnt - cur);
BSON_ASSERT (iov[cur].iov_len);
} else if (OPERATION_EXPIRED (expire_at)) {
GOTO (CLEANUP);
}
/*
* Block on poll() until our desired condition is met.
*/
if (!_mongoc_socket_wait (sock, POLLOUT, expire_at)) {
GOTO (CLEANUP);
}
}
CLEANUP:
bson_free (iov);
RETURN (ret);
}
int
mongoc_socket_getsockname (mongoc_socket_t *sock, /* IN */
struct sockaddr *addr, /* OUT */
mongoc_socklen_t *addrlen) /* INOUT */
{
int ret;
ENTRY;
BSON_ASSERT (sock);
ret = getsockname (sock->sd, (mongoc_sockaddr_t *) addr, addrlen);
_mongoc_socket_capture_errno (sock);
RETURN (ret);
}
char *
mongoc_socket_getnameinfo (mongoc_socket_t *sock) /* IN */
{
/* getpeername parameter types vary, we check in CheckCompiler.m4 */
mongoc_sockaddr_t addr;
mongoc_socklen_t len = (mongoc_socklen_t) sizeof addr;
char *ret;
char host[BSON_HOST_NAME_MAX + 1];
ENTRY;
BSON_ASSERT (sock);
if ((0 == getpeername (sock->sd, &addr, &len)) &&
(0 == getnameinfo (&addr, len, host, sizeof host, NULL, 0, 0))) {
ret = bson_strdup (host);
RETURN (ret);
}
RETURN (NULL);
}
bool
mongoc_socket_check_closed (mongoc_socket_t *sock) /* IN */
{
bool closed = false;
char buf[1];
ssize_t r;
if (_mongoc_socket_wait (sock, POLLIN, 0)) {
sock->errno_ = 0;
r = recv (sock->sd, buf, 1, MSG_PEEK);
if (r < 0) {
_mongoc_socket_capture_errno (sock);
}
if (r < 1) {
closed = true;
}
}
return closed;
}
/*
*
*--------------------------------------------------------------------------
*
* mongoc_socket_inet_ntop --
*
* Convert the ip from addrinfo into a c string.
*
* Returns:
* The value is returned into 'buffer'. The memory has to be allocated
* by the caller
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
void
mongoc_socket_inet_ntop (struct addrinfo *rp, /* IN */
char *buf, /* INOUT */
size_t buflen) /* IN */
{
void *ptr;
char tmp[256];
switch (rp->ai_family) {
case AF_INET:
ptr = &((struct sockaddr_in *) rp->ai_addr)->sin_addr;
inet_ntop (rp->ai_family, ptr, tmp, sizeof (tmp));
bson_snprintf (buf, buflen, "ipv4 %s", tmp);
break;
case AF_INET6:
ptr = &((struct sockaddr_in6 *) rp->ai_addr)->sin6_addr;
inet_ntop (rp->ai_family, ptr, tmp, sizeof (tmp));
bson_snprintf (buf, buflen, "ipv6 %s", tmp);
break;
default:
bson_snprintf (buf, buflen, "unknown ip %d", rp->ai_family);
break;
}
}
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-socket.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-socket.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-socket.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-socket.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-ssl-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-ssl-private.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-ssl-private.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-ssl-private.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-ssl.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-ssl.c
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-ssl.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-ssl.c
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-ssl.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-ssl.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-ssl.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-ssl.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-sspi-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-sspi-private.h
similarity index 99%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-sspi-private.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-sspi-private.h
index c8f04c1d..17b8d4f8 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-sspi-private.h
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-sspi-private.h
@@ -1,88 +1,88 @@
/*
* Copyright 2017 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MONGOC_SSPI_PRIVATE_H
#define MONGOC_SSPI_PRIVATE_H
#if !defined(MONGOC_COMPILATION)
#error "Only <mongoc.h> can be included directly."
#endif
#include <bson.h>
BSON_BEGIN_DECLS
#define SECURITY_WIN32 1 /* Required for SSPI */
#include <Windows.h>
#include <limits.h>
#include <sspi.h>
#include <string.h>
#define MONGOC_SSPI_AUTH_GSS_ERROR -1
#define MONGOC_SSPI_AUTH_GSS_COMPLETE 1
#define MONGOC_SSPI_AUTH_GSS_CONTINUE 0
typedef struct {
CredHandle cred;
CtxtHandle ctx;
WCHAR *spn;
SEC_CHAR *response;
SEC_CHAR *username;
ULONG flags;
UCHAR haveCred;
UCHAR haveCtx;
- INT qop;
+ ULONG qop;
} mongoc_sspi_client_state_t;
void
_mongoc_sspi_set_gsserror (DWORD errCode, const SEC_CHAR *msg);
void
_mongoc_sspi_destroy_sspi_client_state (mongoc_sspi_client_state_t *state);
int
_mongoc_sspi_auth_sspi_client_init (WCHAR *service,
ULONG flags,
WCHAR *user,
ULONG ulen,
WCHAR *domain,
ULONG dlen,
WCHAR *password,
ULONG plen,
mongoc_sspi_client_state_t *state);
int
_mongoc_sspi_auth_sspi_client_step (mongoc_sspi_client_state_t *state,
SEC_CHAR *challenge);
int
_mongoc_sspi_auth_sspi_client_unwrap (mongoc_sspi_client_state_t *state,
SEC_CHAR *challenge);
int
_mongoc_sspi_auth_sspi_client_wrap (mongoc_sspi_client_state_t *state,
SEC_CHAR *data,
SEC_CHAR *user,
ULONG ulen,
INT protect);
BSON_END_DECLS
#endif /* MONGOC_SSPI_PRIVATE_H */
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-sspi.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-sspi.c
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-sspi.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-sspi.c
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-stream-buffered.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-stream-buffered.c
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-stream-buffered.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-stream-buffered.c
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-stream-buffered.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-stream-buffered.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-stream-buffered.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-stream-buffered.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-stream-file.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-stream-file.c
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-stream-file.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-stream-file.c
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-stream-file.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-stream-file.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-stream-file.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-stream-file.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-stream-gridfs.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-stream-gridfs.c
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-stream-gridfs.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-stream-gridfs.c
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-stream-gridfs.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-stream-gridfs.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-stream-gridfs.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-stream-gridfs.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-stream-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-stream-private.h
similarity index 94%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-stream-private.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-stream-private.h
index ef4d3627..084b8726 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-stream-private.h
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-stream-private.h
@@ -1,51 +1,53 @@
/*
* Copyright 2013-2014 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MONGOC_STREAM_PRIVATE_H
#define MONGOC_STREAM_PRIVATE_H
#if !defined(MONGOC_COMPILATION)
#error "Only <mongoc.h> can be included directly."
#endif
#include "mongoc-iovec.h"
#include "mongoc-stream.h"
BSON_BEGIN_DECLS
#define MONGOC_STREAM_SOCKET 1
#define MONGOC_STREAM_FILE 2
#define MONGOC_STREAM_BUFFERED 3
#define MONGOC_STREAM_GRIDFS 4
#define MONGOC_STREAM_TLS 5
bool
mongoc_stream_wait (mongoc_stream_t *stream, int64_t expire_at);
bool
_mongoc_stream_writev_full (mongoc_stream_t *stream,
mongoc_iovec_t *iov,
size_t iovcnt,
int32_t timeout_msec,
bson_error_t *error);
+mongoc_stream_t *
+mongoc_stream_get_root_stream (mongoc_stream_t *stream);
BSON_END_DECLS
#endif /* MONGOC_STREAM_PRIVATE_H */
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-stream-socket.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-stream-socket.c
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-stream-socket.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-stream-socket.c
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-stream-socket.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-stream-socket.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-stream-socket.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-stream-socket.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-stream-tls-libressl-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-stream-tls-libressl-private.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-stream-tls-libressl-private.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-stream-tls-libressl-private.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-stream-tls-libressl.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-stream-tls-libressl.c
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-stream-tls-libressl.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-stream-tls-libressl.c
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-stream-tls-libressl.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-stream-tls-libressl.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-stream-tls-libressl.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-stream-tls-libressl.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-stream-tls-openssl-bio-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-stream-tls-openssl-bio-private.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-stream-tls-openssl-bio-private.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-stream-tls-openssl-bio-private.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-stream-tls-openssl-bio.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-stream-tls-openssl-bio.c
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-stream-tls-openssl-bio.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-stream-tls-openssl-bio.c
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-stream-tls-openssl-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-stream-tls-openssl-private.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-stream-tls-openssl-private.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-stream-tls-openssl-private.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-stream-tls-openssl.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-stream-tls-openssl.c
similarity index 99%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-stream-tls-openssl.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-stream-tls-openssl.c
index a0f17598..fd1800b1 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-stream-tls-openssl.c
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-stream-tls-openssl.c
@@ -1,761 +1,762 @@
/*
* Copyright 2016 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "mongoc-config.h"
#ifdef MONGOC_ENABLE_SSL_OPENSSL
#include <bson.h>
#include <errno.h>
#include <string.h>
#include <openssl/bio.h>
#include <openssl/ssl.h>
#include <openssl/err.h>
#include <openssl/x509v3.h>
#include "mongoc-counters-private.h"
#include "mongoc-errno-private.h"
#include "mongoc-stream-tls.h"
#include "mongoc-stream-private.h"
#include "mongoc-stream-tls-private.h"
#include "mongoc-stream-tls-openssl-bio-private.h"
#include "mongoc-stream-tls-openssl-private.h"
#include "mongoc-openssl-private.h"
#include "mongoc-trace-private.h"
#include "mongoc-log.h"
#include "mongoc-error.h"
#undef MONGOC_LOG_DOMAIN
#define MONGOC_LOG_DOMAIN "stream-tls-openssl"
#define MONGOC_STREAM_TLS_OPENSSL_BUFFER_SIZE 4096
#if OPENSSL_VERSION_NUMBER < 0x10100000L || defined(LIBRESSL_VERSION_NUMBER)
static void
BIO_meth_free (BIO_METHOD *meth)
{
/* Nothing to free pre OpenSSL 1.1.0 */
}
#endif
/*
*--------------------------------------------------------------------------
*
* _mongoc_stream_tls_openssl_destroy --
*
* Cleanup after usage of a mongoc_stream_tls_openssl_t. Free all
*allocated
* resources and ensure connections are closed.
*
* Returns:
* None.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
static void
_mongoc_stream_tls_openssl_destroy (mongoc_stream_t *stream)
{
mongoc_stream_tls_t *tls = (mongoc_stream_tls_t *) stream;
mongoc_stream_tls_openssl_t *openssl =
(mongoc_stream_tls_openssl_t *) tls->ctx;
BSON_ASSERT (tls);
BIO_free_all (openssl->bio);
openssl->bio = NULL;
BIO_meth_free (openssl->meth);
openssl->meth = NULL;
mongoc_stream_destroy (tls->base_stream);
tls->base_stream = NULL;
SSL_CTX_free (openssl->ctx);
openssl->ctx = NULL;
bson_free (openssl);
bson_free (stream);
mongoc_counter_streams_active_dec ();
mongoc_counter_streams_disposed_inc ();
}
/*
*--------------------------------------------------------------------------
*
* _mongoc_stream_tls_openssl_failed --
*
* Called on stream failure. Same as _mongoc_stream_tls_openssl_destroy()
*
* Returns:
* None.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
static void
_mongoc_stream_tls_openssl_failed (mongoc_stream_t *stream)
{
_mongoc_stream_tls_openssl_destroy (stream);
}
/*
*--------------------------------------------------------------------------
*
* _mongoc_stream_tls_openssl_close --
*
* Close the underlying socket.
*
* Linus dictates that you should not check the result of close()
* since there is a race condition with EAGAIN and a new file
* descriptor being opened.
*
* Returns:
* 0 on success; otherwise -1.
*
* Side effects:
* The BIO fd is closed.
*
*--------------------------------------------------------------------------
*/
static int
_mongoc_stream_tls_openssl_close (mongoc_stream_t *stream)
{
mongoc_stream_tls_t *tls = (mongoc_stream_tls_t *) stream;
int ret = 0;
ENTRY;
BSON_ASSERT (tls);
ret = mongoc_stream_close (tls->base_stream);
RETURN (ret);
}
/*
*--------------------------------------------------------------------------
*
* _mongoc_stream_tls_openssl_flush --
*
* Flush the underlying stream.
*
* Returns:
* 0 if successful; otherwise -1.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
static int
_mongoc_stream_tls_openssl_flush (mongoc_stream_t *stream)
{
mongoc_stream_tls_t *tls = (mongoc_stream_tls_t *) stream;
mongoc_stream_tls_openssl_t *openssl =
(mongoc_stream_tls_openssl_t *) tls->ctx;
BSON_ASSERT (openssl);
return BIO_flush (openssl->bio);
}
static ssize_t
_mongoc_stream_tls_openssl_write (mongoc_stream_tls_t *tls,
char *buf,
size_t buf_len)
{
mongoc_stream_tls_openssl_t *openssl =
(mongoc_stream_tls_openssl_t *) tls->ctx;
ssize_t ret;
int64_t now;
int64_t expire = 0;
ENTRY;
BSON_ASSERT (tls);
BSON_ASSERT (buf);
BSON_ASSERT (buf_len);
if (tls->timeout_msec >= 0) {
expire = bson_get_monotonic_time () + (tls->timeout_msec * 1000UL);
}
ret = BIO_write (openssl->bio, buf, buf_len);
if (ret <= 0) {
return ret;
}
if (expire) {
now = bson_get_monotonic_time ();
if ((expire - now) < 0) {
if (ret < buf_len) {
mongoc_counter_streams_timeout_inc ();
}
tls->timeout_msec = 0;
} else {
tls->timeout_msec = (expire - now) / 1000L;
}
}
RETURN (ret);
}
/*
*--------------------------------------------------------------------------
*
* _mongoc_stream_tls_openssl_writev --
*
* Write the iovec to the stream. This function will try to write
* all of the bytes or fail. If the number of bytes is not equal
* to the number requested, a failure or EOF has occurred.
*
* Returns:
* -1 on failure, otherwise the number of bytes written.
*
* Side effects:
* None.
*
* This function is copied as _mongoc_stream_tls_secure_transport_writev
*--------------------------------------------------------------------------
*/
static ssize_t
_mongoc_stream_tls_openssl_writev (mongoc_stream_t *stream,
mongoc_iovec_t *iov,
size_t iovcnt,
int32_t timeout_msec)
{
mongoc_stream_tls_t *tls = (mongoc_stream_tls_t *) stream;
char buf[MONGOC_STREAM_TLS_OPENSSL_BUFFER_SIZE];
ssize_t ret = 0;
ssize_t child_ret;
size_t i;
size_t iov_pos = 0;
/* There's a bit of a dance to coalesce vectorized writes into
* MONGOC_STREAM_TLS_OPENSSL_BUFFER_SIZE'd writes to avoid lots of small tls
* packets.
*
* The basic idea is that we want to combine writes in the buffer if they're
* smaller than the buffer, flushing as it gets full. For larger writes, or
* the last write in the iovec array, we want to ignore the buffer and just
* write immediately. We take care of doing buffer writes by re-invoking
* ourself with a single iovec_t, pointing at our stack buffer.
*/
char *buf_head = buf;
char *buf_tail = buf;
char *buf_end = buf + MONGOC_STREAM_TLS_OPENSSL_BUFFER_SIZE;
size_t bytes;
char *to_write = NULL;
size_t to_write_len;
BSON_ASSERT (tls);
BSON_ASSERT (iov);
BSON_ASSERT (iovcnt);
ENTRY;
tls->timeout_msec = timeout_msec;
for (i = 0; i < iovcnt; i++) {
iov_pos = 0;
while (iov_pos < iov[i].iov_len) {
if (buf_head != buf_tail ||
((i + 1 < iovcnt) &&
((buf_end - buf_tail) > (iov[i].iov_len - iov_pos)))) {
/* If we have either of:
* - buffered bytes already
* - another iovec to send after this one and we don't have more
* bytes to send than the size of the buffer.
*
* copy into the buffer */
bytes = BSON_MIN (iov[i].iov_len - iov_pos, buf_end - buf_tail);
memcpy (buf_tail, (char *) iov[i].iov_base + iov_pos, bytes);
buf_tail += bytes;
iov_pos += bytes;
if (buf_tail == buf_end) {
/* If we're full, request send */
to_write = buf_head;
to_write_len = buf_tail - buf_head;
buf_tail = buf_head = buf;
}
} else {
/* Didn't buffer, so just write it through */
to_write = (char *) iov[i].iov_base + iov_pos;
to_write_len = iov[i].iov_len - iov_pos;
iov_pos += to_write_len;
}
if (to_write) {
/* We get here if we buffered some bytes and filled the buffer, or
* if we didn't buffer and have to send out of the iovec */
child_ret =
_mongoc_stream_tls_openssl_write (tls, to_write, to_write_len);
if (child_ret != to_write_len) {
TRACE ("Got child_ret: %ld while to_write_len is: %ld",
child_ret,
to_write_len);
}
if (child_ret < 0) {
TRACE ("Returning what I had (%ld) as apposed to the error "
"(%ld, errno:%d)",
ret,
child_ret,
errno);
RETURN (ret);
}
ret += child_ret;
if (child_ret < to_write_len) {
/* we timed out, so send back what we could send */
RETURN (ret);
}
to_write = NULL;
}
}
}
if (buf_head != buf_tail) {
/* If we have any bytes buffered, send */
child_ret =
_mongoc_stream_tls_openssl_write (tls, buf_head, buf_tail - buf_head);
if (child_ret < 0) {
RETURN (child_ret);
}
ret += child_ret;
}
if (ret >= 0) {
mongoc_counter_streams_egress_add (ret);
}
RETURN (ret);
}
/*
*--------------------------------------------------------------------------
*
* _mongoc_stream_tls_openssl_readv --
*
* Read from the stream into iov. This function will try to read
* all of the bytes or fail. If the number of bytes is not equal
* to the number requested, a failure or EOF has occurred.
*
* Returns:
* -1 on failure, 0 on EOF, otherwise the number of bytes read.
*
* Side effects:
* iov buffers will be written to.
*
* This function is copied as _mongoc_stream_tls_secure_transport_readv
*
*--------------------------------------------------------------------------
*/
static ssize_t
_mongoc_stream_tls_openssl_readv (mongoc_stream_t *stream,
mongoc_iovec_t *iov,
size_t iovcnt,
size_t min_bytes,
int32_t timeout_msec)
{
mongoc_stream_tls_t *tls = (mongoc_stream_tls_t *) stream;
mongoc_stream_tls_openssl_t *openssl =
(mongoc_stream_tls_openssl_t *) tls->ctx;
ssize_t ret = 0;
size_t i;
int read_ret;
size_t iov_pos = 0;
int64_t now;
int64_t expire = 0;
ENTRY;
BSON_ASSERT (tls);
BSON_ASSERT (iov);
BSON_ASSERT (iovcnt);
tls->timeout_msec = timeout_msec;
if (timeout_msec >= 0) {
expire = bson_get_monotonic_time () + (timeout_msec * 1000UL);
}
for (i = 0; i < iovcnt; i++) {
iov_pos = 0;
while (iov_pos < iov[i].iov_len) {
read_ret = BIO_read (openssl->bio,
(char *) iov[i].iov_base + iov_pos,
(int) (iov[i].iov_len - iov_pos));
/* https://www.openssl.org/docs/crypto/BIO_should_retry.html:
*
* If BIO_should_retry() returns false then the precise "error
* condition" depends on the BIO type that caused it and the return
* code of the BIO operation. For example if a call to BIO_read() on a
* socket BIO returns 0 and BIO_should_retry() is false then the cause
* will be that the connection closed.
*/
if (read_ret < 0 ||
(read_ret == 0 && !BIO_should_retry (openssl->bio))) {
return -1;
}
if (expire) {
now = bson_get_monotonic_time ();
if ((expire - now) < 0) {
if (read_ret == 0) {
mongoc_counter_streams_timeout_inc ();
#ifdef _WIN32
errno = WSAETIMEDOUT;
#else
errno = ETIMEDOUT;
#endif
RETURN (-1);
}
tls->timeout_msec = 0;
} else {
tls->timeout_msec = (expire - now) / 1000L;
}
}
ret += read_ret;
if ((size_t) ret >= min_bytes) {
mongoc_counter_streams_ingress_add (ret);
RETURN (ret);
}
iov_pos += read_ret;
}
}
if (ret >= 0) {
mongoc_counter_streams_ingress_add (ret);
}
RETURN (ret);
}
/*
*--------------------------------------------------------------------------
*
* _mongoc_stream_tls_openssl_setsockopt --
*
* Perform a setsockopt on the underlying stream.
*
* Returns:
* -1 on failure, otherwise opt specific value.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
static int
_mongoc_stream_tls_openssl_setsockopt (mongoc_stream_t *stream,
int level,
int optname,
void *optval,
mongoc_socklen_t optlen)
{
mongoc_stream_tls_t *tls = (mongoc_stream_tls_t *) stream;
BSON_ASSERT (tls);
return mongoc_stream_setsockopt (
tls->base_stream, level, optname, optval, optlen);
}
static mongoc_stream_t *
_mongoc_stream_tls_openssl_get_base_stream (mongoc_stream_t *stream)
{
mongoc_stream_tls_t *tls = (mongoc_stream_tls_t *) stream;
return tls->base_stream;
}
static bool
_mongoc_stream_tls_openssl_check_closed (mongoc_stream_t *stream) /* IN */
{
mongoc_stream_tls_t *tls = (mongoc_stream_tls_t *) stream;
BSON_ASSERT (stream);
return mongoc_stream_check_closed (tls->base_stream);
}
/**
* mongoc_stream_tls_openssl_handshake:
*/
bool
mongoc_stream_tls_openssl_handshake (mongoc_stream_t *stream,
const char *host,
int *events,
bson_error_t *error)
{
mongoc_stream_tls_t *tls = (mongoc_stream_tls_t *) stream;
mongoc_stream_tls_openssl_t *openssl =
(mongoc_stream_tls_openssl_t *) tls->ctx;
SSL *ssl;
BSON_ASSERT (tls);
BSON_ASSERT (host);
ENTRY;
BIO_get_ssl (openssl->bio, &ssl);
if (BIO_do_handshake (openssl->bio) == 1) {
if (_mongoc_openssl_check_cert (
ssl, host, tls->ssl_opts.allow_invalid_hostname)) {
RETURN (true);
}
*events = 0;
bson_set_error (error,
MONGOC_ERROR_STREAM,
MONGOC_ERROR_STREAM_SOCKET,
"TLS handshake failed: Failed certificate verification");
RETURN (false);
}
if (BIO_should_retry (openssl->bio)) {
*events = BIO_should_read (openssl->bio) ? POLLIN : POLLOUT;
RETURN (false);
}
if (!errno) {
#ifdef _WIN32
errno = WSAETIMEDOUT;
#else
errno = ETIMEDOUT;
#endif
}
*events = 0;
bson_set_error (error,
MONGOC_ERROR_STREAM,
MONGOC_ERROR_STREAM_SOCKET,
"TLS handshake failed: %s",
ERR_error_string (ERR_get_error (), NULL));
RETURN (false);
}
/* Callback to get the client provided SNI, if any
* It is only called in SSL "server mode" (e.g. when using the Mock Server),
* and we don't actually use the hostname for anything, just debug print it
*/
static int
_mongoc_stream_tls_openssl_sni (SSL *ssl, int *ad, void *arg)
{
const char *hostname;
if (ssl == NULL) {
- MONGOC_DEBUG ("No SNI hostname provided");
+ TRACE ("%s", "No SNI hostname provided");
return SSL_TLSEXT_ERR_NOACK;
}
hostname = SSL_get_servername (ssl, TLSEXT_NAMETYPE_host_name);
+ /* This is intentionally debug since its only used by the mock test server */
MONGOC_DEBUG ("Got SNI: '%s'", hostname);
return SSL_TLSEXT_ERR_OK;
}
static bool
_mongoc_stream_tls_openssl_timed_out (mongoc_stream_t *stream)
{
mongoc_stream_tls_t *tls = (mongoc_stream_tls_t *) stream;
ENTRY;
RETURN (mongoc_stream_timed_out (tls->base_stream));
}
/*
*--------------------------------------------------------------------------
*
* mongoc_stream_tls_openssl_new --
*
* Creates a new mongoc_stream_tls_openssl_t to communicate with a remote
* server using a TLS stream.
*
* @base_stream should be a stream that will become owned by the
* resulting tls stream. It will be used for raw I/O.
*
* @trust_store_dir should be a path to the SSL cert db to use for
* verifying trust of the remote server.
*
* Returns:
* NULL on failure, otherwise a mongoc_stream_t.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
mongoc_stream_t *
mongoc_stream_tls_openssl_new (mongoc_stream_t *base_stream,
const char *host,
mongoc_ssl_opt_t *opt,
int client)
{
mongoc_stream_tls_t *tls;
mongoc_stream_tls_openssl_t *openssl;
SSL_CTX *ssl_ctx = NULL;
BIO *bio_ssl = NULL;
BIO *bio_mongoc_shim = NULL;
BIO_METHOD *meth;
BSON_ASSERT (base_stream);
BSON_ASSERT (opt);
ENTRY;
ssl_ctx = _mongoc_openssl_ctx_new (opt);
if (!ssl_ctx) {
RETURN (NULL);
}
#if OPENSSL_VERSION_NUMBER >= 0x10002000L && !defined(LIBRESSL_VERSION_NUMBER)
if (!opt->allow_invalid_hostname) {
struct in_addr addr;
X509_VERIFY_PARAM *param = X509_VERIFY_PARAM_new ();
X509_VERIFY_PARAM_set_hostflags (param,
X509_CHECK_FLAG_NO_PARTIAL_WILDCARDS);
if (inet_pton (AF_INET, host, &addr) ||
inet_pton (AF_INET6, host, &addr)) {
X509_VERIFY_PARAM_set1_ip_asc (param, host);
} else {
X509_VERIFY_PARAM_set1_host (param, host, 0);
}
SSL_CTX_set1_param (ssl_ctx, param);
X509_VERIFY_PARAM_free (param);
}
#endif
if (!client) {
/* Only usd by the Mock Server.
* Set a callback to get the SNI, if provided */
SSL_CTX_set_tlsext_servername_callback (ssl_ctx,
_mongoc_stream_tls_openssl_sni);
}
if (opt->weak_cert_validation) {
SSL_CTX_set_verify (ssl_ctx, SSL_VERIFY_NONE, NULL);
} else {
SSL_CTX_set_verify (ssl_ctx, SSL_VERIFY_PEER, NULL);
}
bio_ssl = BIO_new_ssl (ssl_ctx, client);
if (!bio_ssl) {
SSL_CTX_free (ssl_ctx);
RETURN (NULL);
}
meth = mongoc_stream_tls_openssl_bio_meth_new ();
bio_mongoc_shim = BIO_new (meth);
if (!bio_mongoc_shim) {
BIO_free_all (bio_ssl);
BIO_meth_free (meth);
RETURN (NULL);
}
/* Added in OpenSSL 0.9.8f, as a build time option */
#ifdef SSL_CTRL_SET_TLSEXT_HOSTNAME
if (client) {
SSL *ssl;
/* Set the SNI hostname we are expecting certificate for */
BIO_get_ssl (bio_ssl, &ssl);
SSL_set_tlsext_host_name (ssl, host);
#endif
}
BIO_push (bio_ssl, bio_mongoc_shim);
openssl = (mongoc_stream_tls_openssl_t *) bson_malloc0 (sizeof *openssl);
openssl->bio = bio_ssl;
openssl->meth = meth;
openssl->ctx = ssl_ctx;
tls = (mongoc_stream_tls_t *) bson_malloc0 (sizeof *tls);
tls->parent.type = MONGOC_STREAM_TLS;
tls->parent.destroy = _mongoc_stream_tls_openssl_destroy;
tls->parent.failed = _mongoc_stream_tls_openssl_failed;
tls->parent.close = _mongoc_stream_tls_openssl_close;
tls->parent.flush = _mongoc_stream_tls_openssl_flush;
tls->parent.writev = _mongoc_stream_tls_openssl_writev;
tls->parent.readv = _mongoc_stream_tls_openssl_readv;
tls->parent.setsockopt = _mongoc_stream_tls_openssl_setsockopt;
tls->parent.get_base_stream = _mongoc_stream_tls_openssl_get_base_stream;
tls->parent.check_closed = _mongoc_stream_tls_openssl_check_closed;
tls->parent.timed_out = _mongoc_stream_tls_openssl_timed_out;
memcpy (&tls->ssl_opts, opt, sizeof tls->ssl_opts);
tls->handshake = mongoc_stream_tls_openssl_handshake;
tls->ctx = (void *) openssl;
tls->timeout_msec = -1;
tls->base_stream = base_stream;
mongoc_stream_tls_openssl_bio_set_data (bio_mongoc_shim, tls);
mongoc_counter_streams_active_inc ();
RETURN ((mongoc_stream_t *) tls);
}
#endif /* MONGOC_ENABLE_SSL_OPENSSL */
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-stream-tls-openssl.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-stream-tls-openssl.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-stream-tls-openssl.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-stream-tls-openssl.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-stream-tls-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-stream-tls-private.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-stream-tls-private.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-stream-tls-private.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-stream-tls-secure-channel-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-stream-tls-secure-channel-private.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-stream-tls-secure-channel-private.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-stream-tls-secure-channel-private.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-stream-tls-secure-channel.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-stream-tls-secure-channel.c
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-stream-tls-secure-channel.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-stream-tls-secure-channel.c
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-stream-tls-secure-channel.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-stream-tls-secure-channel.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-stream-tls-secure-channel.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-stream-tls-secure-channel.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-stream-tls-secure-transport-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-stream-tls-secure-transport-private.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-stream-tls-secure-transport-private.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-stream-tls-secure-transport-private.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-stream-tls-secure-transport.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-stream-tls-secure-transport.c
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-stream-tls-secure-transport.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-stream-tls-secure-transport.c
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-stream-tls-secure-transport.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-stream-tls-secure-transport.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-stream-tls-secure-transport.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-stream-tls-secure-transport.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-stream-tls.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-stream-tls.c
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-stream-tls.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-stream-tls.c
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-stream-tls.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-stream-tls.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-stream-tls.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-stream-tls.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-stream.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-stream.c
similarity index 99%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-stream.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-stream.c
index 3068cffe..d91f3b74 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-stream.c
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-stream.c
@@ -1,544 +1,543 @@
/*
* Copyright 2013 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <bson.h>
#include "mongoc-array-private.h"
#include "mongoc-buffer-private.h"
#include "mongoc-error.h"
#include "mongoc-errno-private.h"
#include "mongoc-flags.h"
#include "mongoc-log.h"
#include "mongoc-opcode.h"
#include "mongoc-rpc-private.h"
#include "mongoc-stream.h"
#include "mongoc-stream-private.h"
#include "mongoc-trace-private.h"
#include "mongoc-util-private.h"
#undef MONGOC_LOG_DOMAIN
#define MONGOC_LOG_DOMAIN "stream"
#ifndef MONGOC_DEFAULT_TIMEOUT_MSEC
#define MONGOC_DEFAULT_TIMEOUT_MSEC (60L * 60L * 1000L)
#endif
/**
* mongoc_stream_close:
* @stream: A mongoc_stream_t.
*
* Closes the underlying file-descriptor used by @stream.
*
* Returns: 0 on success, -1 on failure.
*/
int
mongoc_stream_close (mongoc_stream_t *stream)
{
int ret;
ENTRY;
BSON_ASSERT (stream);
BSON_ASSERT (stream->close);
ret = stream->close (stream);
RETURN (ret);
}
/**
* mongoc_stream_failed:
* @stream: A mongoc_stream_t.
*
* Frees any resources referenced by @stream, including the memory allocation
* for @stream.
* This handler is called upon stream failure, such as network errors, invalid
* replies
* or replicaset reconfigures deleteing the stream
*/
void
mongoc_stream_failed (mongoc_stream_t *stream)
{
ENTRY;
BSON_ASSERT (stream);
if (stream->failed) {
stream->failed (stream);
} else {
stream->destroy (stream);
}
EXIT;
}
/**
* mongoc_stream_destroy:
* @stream: A mongoc_stream_t.
*
* Frees any resources referenced by @stream, including the memory allocation
* for @stream.
*/
void
mongoc_stream_destroy (mongoc_stream_t *stream)
{
ENTRY;
BSON_ASSERT (stream);
BSON_ASSERT (stream->destroy);
stream->destroy (stream);
EXIT;
}
/**
* mongoc_stream_flush:
* @stream: A mongoc_stream_t.
*
* Flushes the data in the underlying stream to the transport.
*
* Returns: 0 on success, -1 on failure.
*/
int
mongoc_stream_flush (mongoc_stream_t *stream)
{
BSON_ASSERT (stream);
return stream->flush (stream);
}
/**
* mongoc_stream_writev:
* @stream: A mongoc_stream_t.
* @iov: An array of iovec to write to the stream.
* @iovcnt: The number of elements in @iov.
*
* Writes an array of iovec buffers to the underlying stream.
*
* Returns: the number of bytes written, or -1 upon failure.
*/
ssize_t
mongoc_stream_writev (mongoc_stream_t *stream,
mongoc_iovec_t *iov,
size_t iovcnt,
int32_t timeout_msec)
{
ssize_t ret;
ENTRY;
BSON_ASSERT (stream);
BSON_ASSERT (iov);
BSON_ASSERT (iovcnt);
BSON_ASSERT (stream->writev);
if (timeout_msec < 0) {
timeout_msec = MONGOC_DEFAULT_TIMEOUT_MSEC;
}
DUMP_IOVEC (writev, iov, iovcnt);
ret = stream->writev (stream, iov, iovcnt, timeout_msec);
RETURN (ret);
}
/**
* mongoc_stream_write:
* @stream: A mongoc_stream_t.
* @buf: A buffer to write.
* @count: The number of bytes to write into @buf.
*
* Simplified access to mongoc_stream_writev(). Creates a single iovec
* with the buffer provided.
*
* Returns: -1 on failure, otherwise the number of bytes write.
*/
ssize_t
mongoc_stream_write (mongoc_stream_t *stream,
void *buf,
size_t count,
int32_t timeout_msec)
{
mongoc_iovec_t iov;
ssize_t ret;
ENTRY;
BSON_ASSERT (stream);
BSON_ASSERT (buf);
iov.iov_base = buf;
iov.iov_len = count;
BSON_ASSERT (stream->writev);
ret = mongoc_stream_writev (stream, &iov, 1, timeout_msec);
RETURN (ret);
}
/**
* mongoc_stream_readv:
* @stream: A mongoc_stream_t.
* @iov: An array of iovec containing the location and sizes to read.
* @iovcnt: the number of elements in @iov.
* @min_bytes: the minumum number of bytes to return, or -1.
*
* Reads into the various buffers pointed to by @iov and associated
* buffer lengths.
*
* If @min_bytes is specified, then at least min_bytes will be returned unless
* eof is encountered. This may result in ETIMEDOUT
*
* Returns: the number of bytes read or -1 on failure.
*/
ssize_t
mongoc_stream_readv (mongoc_stream_t *stream,
mongoc_iovec_t *iov,
size_t iovcnt,
size_t min_bytes,
int32_t timeout_msec)
{
ssize_t ret;
ENTRY;
BSON_ASSERT (stream);
BSON_ASSERT (iov);
BSON_ASSERT (iovcnt);
BSON_ASSERT (stream->readv);
ret = stream->readv (stream, iov, iovcnt, min_bytes, timeout_msec);
if (ret >= 0) {
DUMP_IOVEC (readv, iov, iovcnt);
}
RETURN (ret);
}
/**
* mongoc_stream_read:
* @stream: A mongoc_stream_t.
* @buf: A buffer to write into.
* @count: The number of bytes to write into @buf.
* @min_bytes: The minimum number of bytes to receive
*
* Simplified access to mongoc_stream_readv(). Creates a single iovec
* with the buffer provided.
*
* If @min_bytes is specified, then at least min_bytes will be returned unless
* eof is encountered. This may result in ETIMEDOUT
*
* Returns: -1 on failure, otherwise the number of bytes read.
*/
ssize_t
mongoc_stream_read (mongoc_stream_t *stream,
void *buf,
size_t count,
size_t min_bytes,
int32_t timeout_msec)
{
mongoc_iovec_t iov;
ssize_t ret;
ENTRY;
BSON_ASSERT (stream);
BSON_ASSERT (buf);
iov.iov_base = buf;
iov.iov_len = count;
BSON_ASSERT (stream->readv);
ret = mongoc_stream_readv (stream, &iov, 1, min_bytes, timeout_msec);
RETURN (ret);
}
int
mongoc_stream_setsockopt (mongoc_stream_t *stream,
int level,
int optname,
void *optval,
mongoc_socklen_t optlen)
{
BSON_ASSERT (stream);
if (stream->setsockopt) {
return stream->setsockopt (stream, level, optname, optval, optlen);
}
return 0;
}
mongoc_stream_t *
mongoc_stream_get_base_stream (mongoc_stream_t *stream) /* IN */
{
BSON_ASSERT (stream);
if (stream->get_base_stream) {
return stream->get_base_stream (stream);
}
return stream;
}
-static mongoc_stream_t *
+mongoc_stream_t *
mongoc_stream_get_root_stream (mongoc_stream_t *stream)
-
{
BSON_ASSERT (stream);
while (stream->get_base_stream) {
stream = stream->get_base_stream (stream);
}
return stream;
}
mongoc_stream_t *
mongoc_stream_get_tls_stream (mongoc_stream_t *stream) /* IN */
{
BSON_ASSERT (stream);
for (; stream && stream->type != MONGOC_STREAM_TLS;
stream = stream->get_base_stream (stream))
;
return stream;
}
ssize_t
mongoc_stream_poll (mongoc_stream_poll_t *streams,
size_t nstreams,
int32_t timeout)
{
mongoc_stream_poll_t *poller =
(mongoc_stream_poll_t *) bson_malloc (sizeof (*poller) * nstreams);
int i;
int last_type = 0;
ssize_t rval = -1;
errno = 0;
for (i = 0; i < nstreams; i++) {
poller[i].stream = mongoc_stream_get_root_stream (streams[i].stream);
poller[i].events = streams[i].events;
poller[i].revents = 0;
if (i == 0) {
last_type = poller[i].stream->type;
} else if (last_type != poller[i].stream->type) {
errno = EINVAL;
goto CLEANUP;
}
}
if (!poller[0].stream->poll) {
errno = EINVAL;
goto CLEANUP;
}
rval = poller[0].stream->poll (poller, nstreams, timeout);
if (rval > 0) {
for (i = 0; i < nstreams; i++) {
streams[i].revents = poller[i].revents;
}
}
CLEANUP:
bson_free (poller);
return rval;
}
/*
*--------------------------------------------------------------------------
*
* mongoc_stream_wait --
*
* Internal helper, poll a single stream until it connects.
*
* For now, only the POLLOUT (connected) event is supported.
*
* @expire_at should be an absolute time at which to expire using
* the monotonic clock (bson_get_monotonic_time(), which is in
* microseconds). expire_at of 0 or -1 is prohibited.
*
* Returns:
* true if an event matched. otherwise false.
* a timeout will return false.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
bool
mongoc_stream_wait (mongoc_stream_t *stream, int64_t expire_at)
{
mongoc_stream_poll_t poller;
int64_t now;
int32_t timeout_msec;
ssize_t ret;
ENTRY;
BSON_ASSERT (stream);
BSON_ASSERT (expire_at > 0);
poller.stream = stream;
poller.events = POLLOUT;
poller.revents = 0;
now = bson_get_monotonic_time ();
for (;;) {
/* TODO CDRIVER-804 use int64_t for timeouts consistently */
timeout_msec = (int32_t) BSON_MIN ((expire_at - now) / 1000L, INT32_MAX);
if (timeout_msec < 0) {
timeout_msec = 0;
}
ret = mongoc_stream_poll (&poller, 1, timeout_msec);
if (ret > 0) {
/* an event happened, return true if POLLOUT else false */
RETURN (0 != (poller.revents & POLLOUT));
} else if (ret < 0) {
/* poll itself failed */
TRACE ("errno is: %d", errno);
if (MONGOC_ERRNO_IS_AGAIN (errno)) {
now = bson_get_monotonic_time ();
if (expire_at < now) {
RETURN (false);
} else {
continue;
}
} else {
/* poll failed for some non-transient reason */
RETURN (false);
}
} else {
/* poll timed out */
RETURN (false);
}
}
return true;
}
bool
mongoc_stream_check_closed (mongoc_stream_t *stream)
{
int ret;
ENTRY;
if (!stream) {
return true;
}
ret = stream->check_closed (stream);
RETURN (ret);
}
bool
mongoc_stream_timed_out (mongoc_stream_t *stream)
{
ENTRY;
BSON_ASSERT (stream);
/* for e.g. a file stream there is no timed_out function */
RETURN (stream->timed_out && stream->timed_out (stream));
}
bool
_mongoc_stream_writev_full (mongoc_stream_t *stream,
mongoc_iovec_t *iov,
size_t iovcnt,
int32_t timeout_msec,
bson_error_t *error)
{
size_t total_bytes = 0;
int i;
ssize_t r;
ENTRY;
for (i = 0; i < iovcnt; i++) {
total_bytes += iov[i].iov_len;
}
r = mongoc_stream_writev (stream, iov, iovcnt, timeout_msec);
TRACE ("writev returned: %ld", r);
if (r < 0) {
if (error) {
char buf[128];
char *errstr;
errstr = bson_strerror_r (errno, buf, sizeof (buf));
bson_set_error (error,
MONGOC_ERROR_STREAM,
MONGOC_ERROR_STREAM_SOCKET,
"Failure during socket delivery: %s (%d)",
errstr,
errno);
}
RETURN (false);
}
if (r != total_bytes) {
bson_set_error (error,
MONGOC_ERROR_STREAM,
MONGOC_ERROR_STREAM_SOCKET,
"Failure to send all requested bytes (only sent: %" PRIu64
"/%" PRId64 " in %dms) during socket delivery",
(uint64_t) r,
(int64_t) total_bytes,
timeout_msec);
RETURN (false);
}
RETURN (true);
}
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-stream.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-stream.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-stream.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-stream.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-thread-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-thread-private.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-thread-private.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-thread-private.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-topology-description-apm-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-topology-description-apm-private.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-topology-description-apm-private.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-topology-description-apm-private.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-topology-description-apm.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-topology-description-apm.c
similarity index 97%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-topology-description-apm.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-topology-description-apm.c
index 7c8c01a7..f7f0225f 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-topology-description-apm.c
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-topology-description-apm.c
@@ -1,160 +1,159 @@
/*
* Copyright 2016 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "mongoc-topology-description-apm-private.h"
#include "mongoc-server-description-private.h"
/* Application Performance Monitoring for topology events, complies with the
* SDAM Monitoring Spec:
https://github.com/mongodb/specifications/blob/master/source/server-discovery-and-monitoring/server-discovery-and-monitoring-monitoring.rst
*/
/* ServerOpeningEvent */
void
_mongoc_topology_description_monitor_server_opening (
const mongoc_topology_description_t *td,
mongoc_server_description_t *sd)
{
if (td->apm_callbacks.server_opening && !sd->opened) {
mongoc_apm_server_opening_t event;
bson_oid_copy (&td->topology_id, &event.topology_id);
event.host = &sd->host;
event.context = td->apm_context;
sd->opened = true;
td->apm_callbacks.server_opening (&event);
}
}
/* ServerDescriptionChangedEvent */
void
_mongoc_topology_description_monitor_server_changed (
const mongoc_topology_description_t *td,
const mongoc_server_description_t *prev_sd,
const mongoc_server_description_t *new_sd)
{
if (td->apm_callbacks.server_changed) {
mongoc_apm_server_changed_t event;
/* address is same in previous and new sd */
bson_oid_copy (&td->topology_id, &event.topology_id);
event.host = &new_sd->host;
event.previous_description = prev_sd;
event.new_description = new_sd;
event.context = td->apm_context;
td->apm_callbacks.server_changed (&event);
}
}
/* ServerClosedEvent */
void
_mongoc_topology_description_monitor_server_closed (
const mongoc_topology_description_t *td,
const mongoc_server_description_t *sd)
{
if (td->apm_callbacks.server_closed) {
mongoc_apm_server_closed_t event;
bson_oid_copy (&td->topology_id, &event.topology_id);
event.host = &sd->host;
event.context = td->apm_context;
td->apm_callbacks.server_closed (&event);
}
}
/* Send TopologyOpeningEvent when first called on this topology description.
* td is not const: we set its "opened" field here */
void
_mongoc_topology_description_monitor_opening (mongoc_topology_description_t *td)
{
mongoc_topology_description_t *prev_td = NULL;
size_t i;
mongoc_server_description_t *sd;
if (td->opened) {
return;
}
if (td->apm_callbacks.topology_changed) {
/* prepare to call monitor_changed */
prev_td = bson_malloc0 (sizeof (mongoc_topology_description_t));
- mongoc_topology_description_init (
- prev_td, MONGOC_TOPOLOGY_UNKNOWN, td->heartbeat_msec);
+ mongoc_topology_description_init (prev_td, td->heartbeat_msec);
}
td->opened = true;
if (td->apm_callbacks.topology_opening) {
mongoc_apm_topology_opening_t event;
bson_oid_copy (&td->topology_id, &event.topology_id);
event.context = td->apm_context;
td->apm_callbacks.topology_opening (&event);
}
if (td->apm_callbacks.topology_changed) {
/* send initial description-changed event */
_mongoc_topology_description_monitor_changed (prev_td, td);
}
for (i = 0; i < td->servers->items_len; i++) {
sd = (mongoc_server_description_t *) mongoc_set_get_item (td->servers,
(int) i);
_mongoc_topology_description_monitor_server_opening (td, sd);
}
if (prev_td) {
mongoc_topology_description_destroy (prev_td);
bson_free (prev_td);
}
}
/* TopologyDescriptionChangedEvent */
void
_mongoc_topology_description_monitor_changed (
const mongoc_topology_description_t *prev_td,
const mongoc_topology_description_t *new_td)
{
if (new_td->apm_callbacks.topology_changed) {
mongoc_apm_topology_changed_t event;
/* callbacks, context, and id are the same in previous and new td */
bson_oid_copy (&new_td->topology_id, &event.topology_id);
event.context = new_td->apm_context;
event.previous_description = prev_td;
event.new_description = new_td;
new_td->apm_callbacks.topology_changed (&event);
}
}
/* TopologyClosedEvent */
void
_mongoc_topology_description_monitor_closed (
const mongoc_topology_description_t *td)
{
if (td->apm_callbacks.topology_closed) {
mongoc_apm_topology_closed_t event;
bson_oid_copy (&td->topology_id, &event.topology_id);
event.context = td->apm_context;
td->apm_callbacks.topology_closed (&event);
}
}
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-topology-description-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-topology-description-private.h
similarity index 87%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-topology-description-private.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-topology-description-private.h
index 419e0be6..7c8f27e4 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-topology-description-private.h
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-topology-description-private.h
@@ -1,122 +1,136 @@
/*
* Copyright 2014 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MONGOC_TOPOLOGY_DESCRIPTION_PRIVATE_H
#define MONGOC_TOPOLOGY_DESCRIPTION_PRIVATE_H
#include "mongoc-set-private.h"
#include "mongoc-server-description.h"
#include "mongoc-array-private.h"
#include "mongoc-topology-description.h"
#include "mongoc-apm-private.h"
typedef enum {
MONGOC_TOPOLOGY_UNKNOWN,
MONGOC_TOPOLOGY_SHARDED,
MONGOC_TOPOLOGY_RS_NO_PRIMARY,
MONGOC_TOPOLOGY_RS_WITH_PRIMARY,
MONGOC_TOPOLOGY_SINGLE,
MONGOC_TOPOLOGY_DESCRIPTION_TYPES
} mongoc_topology_description_type_t;
struct _mongoc_topology_description_t {
bson_oid_t topology_id;
bool opened;
mongoc_topology_description_type_t type;
int64_t heartbeat_msec;
mongoc_set_t *servers;
char *set_name;
int64_t max_set_version;
bson_oid_t max_election_id;
bson_error_t compatibility_error;
uint32_t max_server_id;
bool stale;
unsigned int rand_seed;
+ /* the greatest seen cluster time, for a MongoDB 3.6+ sharded cluster.
+ * see Driver Sessions Spec. */
+ bson_t cluster_time;
+
+ /* smallest seen logicalSessionTimeoutMinutes, or -1 if any server has no
+ * logicalSessionTimeoutMinutes. see Server Discovery and Monitoring Spec */
+ int64_t session_timeout_minutes;
+
mongoc_apm_callbacks_t apm_callbacks;
void *apm_context;
};
typedef enum { MONGOC_SS_READ, MONGOC_SS_WRITE } mongoc_ss_optype_t;
void
mongoc_topology_description_init (mongoc_topology_description_t *description,
- mongoc_topology_description_type_t type,
int64_t heartbeat_msec);
void
_mongoc_topology_description_copy_to (const mongoc_topology_description_t *src,
mongoc_topology_description_t *dst);
void
mongoc_topology_description_destroy (
mongoc_topology_description_t *description);
void
mongoc_topology_description_handle_ismaster (
mongoc_topology_description_t *topology,
uint32_t server_id,
const bson_t *reply,
int64_t rtt_msec,
const bson_error_t *error /* IN */);
mongoc_server_description_t *
mongoc_topology_description_select (mongoc_topology_description_t *description,
mongoc_ss_optype_t optype,
const mongoc_read_prefs_t *read_pref,
int64_t local_threshold_ms);
mongoc_server_description_t *
mongoc_topology_description_server_by_id (
mongoc_topology_description_t *description,
uint32_t id,
bson_error_t *error);
int32_t
mongoc_topology_description_lowest_max_wire_version (
const mongoc_topology_description_t *td);
bool
mongoc_topology_description_all_sds_have_write_date (
const mongoc_topology_description_t *td);
bool
_mongoc_topology_description_validate_max_staleness (
const mongoc_topology_description_t *td,
int64_t max_staleness_seconds,
bson_error_t *error);
void
mongoc_topology_description_suitable_servers (
mongoc_array_t *set, /* OUT */
mongoc_ss_optype_t optype,
mongoc_topology_description_t *topology,
const mongoc_read_prefs_t *read_pref,
size_t local_threshold_ms);
+bool
+mongoc_topology_description_has_data_node (mongoc_topology_description_t *td);
+
void
mongoc_topology_description_invalidate_server (
mongoc_topology_description_t *topology,
uint32_t id,
const bson_error_t *error /* IN */);
bool
mongoc_topology_description_add_server (mongoc_topology_description_t *topology,
const char *server,
uint32_t *id /* OUT */);
+void
+mongoc_topology_description_update_cluster_time (
+ mongoc_topology_description_t *td, const bson_t *reply);
+
#endif /* MONGOC_TOPOLOGY_DESCRIPTION_PRIVATE_H */
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-topology-description.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-topology-description.c
similarity index 90%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-topology-description.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-topology-description.c
index 7fab3bc3..fe7eedb8 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-topology-description.c
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-topology-description.c
@@ -1,1954 +1,2116 @@
/*
* Copyright 2014 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "mongoc-array-private.h"
#include "mongoc-error.h"
#include "mongoc-server-description-private.h"
#include "mongoc-topology-description-apm-private.h"
#include "mongoc-trace-private.h"
#include "mongoc-util-private.h"
#include "mongoc-read-prefs-private.h"
#include "mongoc-set-private.h"
#include "mongoc-client-private.h"
+#include "mongoc-thread-private.h"
+
+
+static bool
+_is_data_node (mongoc_server_description_t *sd)
+{
+ switch (sd->type) {
+ case MONGOC_SERVER_MONGOS:
+ case MONGOC_SERVER_STANDALONE:
+ case MONGOC_SERVER_RS_SECONDARY:
+ case MONGOC_SERVER_RS_PRIMARY:
+ return true;
+ case MONGOC_SERVER_RS_OTHER:
+ case MONGOC_SERVER_RS_ARBITER:
+ case MONGOC_SERVER_UNKNOWN:
+ case MONGOC_SERVER_POSSIBLE_PRIMARY:
+ case MONGOC_SERVER_RS_GHOST:
+ case MONGOC_SERVER_DESCRIPTION_TYPES:
+ default:
+ return false;
+ }
+}
static void
_mongoc_topology_server_dtor (void *server_, void *ctx_)
{
mongoc_server_description_destroy ((mongoc_server_description_t *) server_);
}
/*
*--------------------------------------------------------------------------
*
* mongoc_topology_description_init --
*
* Initialize the given topology description
*
* Returns:
* None.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
void
mongoc_topology_description_init (mongoc_topology_description_t *description,
- mongoc_topology_description_type_t type,
int64_t heartbeat_msec)
{
ENTRY;
BSON_ASSERT (description);
- BSON_ASSERT (type == MONGOC_TOPOLOGY_UNKNOWN ||
- type == MONGOC_TOPOLOGY_SINGLE ||
- type == MONGOC_TOPOLOGY_RS_NO_PRIMARY);
memset (description, 0, sizeof (*description));
bson_oid_init (&description->topology_id, NULL);
description->opened = false;
- description->type = type;
+ description->type = MONGOC_TOPOLOGY_UNKNOWN;
description->heartbeat_msec = heartbeat_msec;
description->servers =
mongoc_set_new (8, _mongoc_topology_server_dtor, NULL);
description->set_name = NULL;
description->max_set_version = MONGOC_NO_SET_VERSION;
description->stale = true;
description->rand_seed = (unsigned int) bson_get_monotonic_time ();
+ bson_init (&description->cluster_time);
+ description->session_timeout_minutes = MONGOC_NO_SESSIONS;
EXIT;
}
/*
*--------------------------------------------------------------------------
*
* _mongoc_topology_description_copy_to --
*
* Deep-copy @src to an uninitialized topology description @dst.
* @dst must not already point to any allocated resources. Clean
* up with mongoc_topology_description_destroy.
*
* WARNING: @dst's rand_seed is not initialized.
*
* Returns:
* None.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
void
_mongoc_topology_description_copy_to (const mongoc_topology_description_t *src,
mongoc_topology_description_t *dst)
{
size_t nitems;
size_t i;
mongoc_server_description_t *sd;
uint32_t id;
ENTRY;
BSON_ASSERT (src);
BSON_ASSERT (dst);
bson_oid_copy (&src->topology_id, &dst->topology_id);
dst->opened = src->opened;
dst->type = src->type;
dst->heartbeat_msec = src->heartbeat_msec;
nitems = bson_next_power_of_two (src->servers->items_len);
dst->servers = mongoc_set_new (nitems, _mongoc_topology_server_dtor, NULL);
for (i = 0; i < src->servers->items_len; i++) {
sd = mongoc_set_get_item_and_id (src->servers, (int) i, &id);
mongoc_set_add (
dst->servers, id, mongoc_server_description_new_copy (sd));
}
dst->set_name = bson_strdup (src->set_name);
dst->max_set_version = src->max_set_version;
memcpy (&dst->compatibility_error,
&src->compatibility_error,
sizeof (bson_error_t));
dst->max_server_id = src->max_server_id;
dst->stale = src->stale;
memcpy (&dst->apm_callbacks,
&src->apm_callbacks,
sizeof (mongoc_apm_callbacks_t));
dst->apm_context = src->apm_context;
+ bson_copy_to (&src->cluster_time, &dst->cluster_time);
+
+ dst->session_timeout_minutes = src->session_timeout_minutes;
+
EXIT;
}
/*
*--------------------------------------------------------------------------
*
* mongoc_topology_description_destroy --
*
* Destroy allocated resources within @description
*
* Returns:
* None.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
void
mongoc_topology_description_destroy (mongoc_topology_description_t *description)
{
ENTRY;
BSON_ASSERT (description);
- mongoc_set_destroy (description->servers);
+ if (description->servers) {
+ mongoc_set_destroy (description->servers);
+ }
if (description->set_name) {
bson_free (description->set_name);
}
+ bson_destroy (&description->cluster_time);
+
EXIT;
}
/* find the primary, then stop iterating */
static bool
_mongoc_topology_description_has_primary_cb (void *item, void *ctx /* OUT */)
{
mongoc_server_description_t *server = (mongoc_server_description_t *) item;
mongoc_server_description_t **primary = (mongoc_server_description_t **) ctx;
/* TODO should this include MONGOS? */
if (server->type == MONGOC_SERVER_RS_PRIMARY ||
server->type == MONGOC_SERVER_STANDALONE) {
*primary = (mongoc_server_description_t *) item;
return false;
}
return true;
}
/*
*--------------------------------------------------------------------------
*
* _mongoc_topology_description_has_primary --
*
* If topology has a primary, return it.
*
* Returns:
* A pointer to the primary, or NULL.
*
* Side effects:
* None
*
*--------------------------------------------------------------------------
*/
static mongoc_server_description_t *
_mongoc_topology_description_has_primary (
mongoc_topology_description_t *description)
{
mongoc_server_description_t *primary = NULL;
mongoc_set_for_each (description->servers,
_mongoc_topology_description_has_primary_cb,
&primary);
return primary;
}
/*
*--------------------------------------------------------------------------
*
* _mongoc_topology_description_later_election --
*
* Check if we've seen a more recent election in the replica set
* than this server has.
*
* Returns:
* True if the topology description's max replica set version plus
* election id is later than the server description's.
*
* Side effects:
* None
*
*--------------------------------------------------------------------------
*/
static bool
_mongoc_topology_description_later_election (mongoc_topology_description_t *td,
mongoc_server_description_t *sd)
{
/* initially max_set_version is -1 and max_election_id is zeroed */
return td->max_set_version > sd->set_version ||
(td->max_set_version == sd->set_version &&
bson_oid_compare (&td->max_election_id, &sd->election_id) > 0);
}
/*
*--------------------------------------------------------------------------
*
* _mongoc_topology_description_set_max_set_version --
*
* Remember that we've seen a new replica set version. Unconditionally
* sets td->set_version to sd->set_version.
*
*--------------------------------------------------------------------------
*/
static void
_mongoc_topology_description_set_max_set_version (
mongoc_topology_description_t *td, mongoc_server_description_t *sd)
{
td->max_set_version = sd->set_version;
}
/*
*--------------------------------------------------------------------------
*
* _mongoc_topology_description_set_max_election_id --
*
* Remember that we've seen a new election id. Unconditionally sets
* td->max_election_id to sd->election_id.
*
*--------------------------------------------------------------------------
*/
static void
_mongoc_topology_description_set_max_election_id (
mongoc_topology_description_t *td, mongoc_server_description_t *sd)
{
bson_oid_copy (&sd->election_id, &td->max_election_id);
}
static bool
_mongoc_topology_description_server_is_candidate (
mongoc_server_description_type_t desc_type,
mongoc_read_mode_t read_mode,
mongoc_topology_description_type_t topology_type)
{
switch ((int) topology_type) {
case MONGOC_TOPOLOGY_SINGLE:
switch ((int) desc_type) {
case MONGOC_SERVER_STANDALONE:
return true;
default:
return false;
}
case MONGOC_TOPOLOGY_RS_NO_PRIMARY:
case MONGOC_TOPOLOGY_RS_WITH_PRIMARY:
switch ((int) read_mode) {
case MONGOC_READ_PRIMARY:
switch ((int) desc_type) {
case MONGOC_SERVER_RS_PRIMARY:
return true;
default:
return false;
}
case MONGOC_READ_SECONDARY:
switch ((int) desc_type) {
case MONGOC_SERVER_RS_SECONDARY:
return true;
default:
return false;
}
default:
switch ((int) desc_type) {
case MONGOC_SERVER_RS_PRIMARY:
case MONGOC_SERVER_RS_SECONDARY:
return true;
default:
return false;
}
}
case MONGOC_TOPOLOGY_SHARDED:
switch ((int) desc_type) {
case MONGOC_SERVER_MONGOS:
return true;
default:
return false;
}
default:
return false;
}
}
typedef struct _mongoc_suitable_data_t {
mongoc_read_mode_t read_mode;
mongoc_topology_description_type_t topology_type;
mongoc_server_description_t *primary; /* OUT */
mongoc_server_description_t **candidates; /* OUT */
size_t candidates_len; /* OUT */
bool has_secondary; /* OUT */
} mongoc_suitable_data_t;
static bool
_mongoc_replica_set_read_suitable_cb (void *item, void *ctx)
{
mongoc_server_description_t *server = (mongoc_server_description_t *) item;
mongoc_suitable_data_t *data = (mongoc_suitable_data_t *) ctx;
/* primary's used in staleness calculation, even with mode SECONDARY */
if (server->type == MONGOC_SERVER_RS_PRIMARY) {
data->primary = server;
}
if (_mongoc_topology_description_server_is_candidate (
server->type, data->read_mode, data->topology_type)) {
if (server->type == MONGOC_SERVER_RS_PRIMARY) {
if (data->read_mode == MONGOC_READ_PRIMARY ||
data->read_mode == MONGOC_READ_PRIMARY_PREFERRED) {
/* we want a primary and we have one, done! */
return false;
}
}
if (server->type == MONGOC_SERVER_RS_SECONDARY) {
data->has_secondary = true;
}
/* add to our candidates */
data->candidates[data->candidates_len++] = server;
} else {
TRACE ("Rejected [%s] [%s] for mode [%s]",
mongoc_server_description_type (server),
server->host.host_and_port,
_mongoc_read_mode_as_str (data->read_mode));
}
return true;
}
/* if any mongos are candidates, add them to the candidates array */
static void
_mongoc_try_mode_secondary (mongoc_array_t *set, /* OUT */
mongoc_topology_description_t *topology,
const mongoc_read_prefs_t *read_pref,
size_t local_threshold_ms)
{
mongoc_read_prefs_t *secondary;
secondary = mongoc_read_prefs_copy (read_pref);
mongoc_read_prefs_set_mode (secondary, MONGOC_READ_SECONDARY);
mongoc_topology_description_suitable_servers (
set, MONGOC_SS_READ, topology, secondary, local_threshold_ms);
mongoc_read_prefs_destroy (secondary);
}
/* if any mongos are candidates, add them to the candidates array */
static bool
_mongoc_find_suitable_mongos_cb (void *item, void *ctx)
{
mongoc_server_description_t *server = (mongoc_server_description_t *) item;
mongoc_suitable_data_t *data = (mongoc_suitable_data_t *) ctx;
if (_mongoc_topology_description_server_is_candidate (
server->type, data->read_mode, data->topology_type)) {
data->candidates[data->candidates_len++] = server;
}
return true;
}
/*
*-------------------------------------------------------------------------
*
* mongoc_topology_description_lowest_max_wire_version --
*
* The topology's max wire version.
*
* NOTE: this method should only be called while holding the mutex on
* the owning topology object.
*
* Returns:
* The minimum of all known servers' max wire versions, or INT32_MAX
* if there are no known servers.
*
* Side effects:
* None.
*
*-------------------------------------------------------------------------
*/
int32_t
mongoc_topology_description_lowest_max_wire_version (
const mongoc_topology_description_t *td)
{
int i;
int32_t ret = INT32_MAX;
mongoc_server_description_t *sd;
for (i = 0; (size_t) i < td->servers->items_len; i++) {
sd = (mongoc_server_description_t *) mongoc_set_get_item (td->servers, i);
if (sd->type != MONGOC_SERVER_UNKNOWN && sd->max_wire_version < ret) {
ret = sd->max_wire_version;
}
}
return ret;
}
/*
*-------------------------------------------------------------------------
*
* mongoc_topology_description_all_sds_have_write_date --
*
* Whether the primary and all secondaries' server descriptions have
* last_write_date_ms.
*
* Side effects:
* None.
*
*-------------------------------------------------------------------------
*/
bool
mongoc_topology_description_all_sds_have_write_date (
const mongoc_topology_description_t *td)
{
int i;
mongoc_server_description_t *sd;
for (i = 0; (size_t) i < td->servers->items_len; i++) {
sd = (mongoc_server_description_t *) mongoc_set_get_item (td->servers, i);
if (sd->last_write_date_ms <= 0 &&
(sd->type == MONGOC_SERVER_RS_PRIMARY ||
sd->type == MONGOC_SERVER_RS_SECONDARY)) {
return false;
}
}
return true;
}
/*
*-------------------------------------------------------------------------
*
* _mongoc_topology_description_validate_max_staleness --
*
* If the provided "maxStalenessSeconds" component of the read
* preference is not valid for this topology, fill out @error and
* return false.
*
* Side effects:
* None.
*
*-------------------------------------------------------------------------
*/
bool
_mongoc_topology_description_validate_max_staleness (
const mongoc_topology_description_t *td,
int64_t max_staleness_seconds,
bson_error_t *error)
{
mongoc_topology_description_type_t td_type;
/* Server Selection Spec: A driver MUST raise an error if the TopologyType
* is ReplicaSetWithPrimary or ReplicaSetNoPrimary and either of these
* conditions is false:
*
* maxStalenessSeconds * 1000 >= heartbeatFrequencyMS + idleWritePeriodMS
* maxStalenessSeconds >= smallestMaxStalenessSeconds
*/
td_type = td->type;
if (td_type != MONGOC_TOPOLOGY_RS_WITH_PRIMARY &&
td_type != MONGOC_TOPOLOGY_RS_NO_PRIMARY) {
return true;
}
if (max_staleness_seconds * 1000 <
td->heartbeat_msec + MONGOC_IDLE_WRITE_PERIOD_MS) {
bson_set_error (error,
MONGOC_ERROR_COMMAND,
MONGOC_ERROR_COMMAND_INVALID_ARG,
"maxStalenessSeconds is set to %" PRId64
", it must be at least heartbeatFrequencyMS (%" PRId64
") + server's idle write period (%d seconds)",
max_staleness_seconds,
td->heartbeat_msec,
MONGOC_IDLE_WRITE_PERIOD_MS / 1000);
return false;
}
if (max_staleness_seconds < MONGOC_SMALLEST_MAX_STALENESS_SECONDS) {
bson_set_error (error,
MONGOC_ERROR_COMMAND,
MONGOC_ERROR_COMMAND_INVALID_ARG,
"maxStalenessSeconds is set to %" PRId64
", it must be at least %d seconds",
max_staleness_seconds,
MONGOC_SMALLEST_MAX_STALENESS_SECONDS);
return false;
}
return true;
}
/*
*-------------------------------------------------------------------------
*
* mongoc_topology_description_suitable_servers --
*
* Fill out an array of servers matching the read preference and
* localThresholdMS.
*
* NOTE: this method should only be called while holding the mutex on
* the owning topology object.
*
* Side effects:
* None.
*
*-------------------------------------------------------------------------
*/
void
mongoc_topology_description_suitable_servers (
mongoc_array_t *set, /* OUT */
mongoc_ss_optype_t optype,
mongoc_topology_description_t *topology,
const mongoc_read_prefs_t *read_pref,
size_t local_threshold_ms)
{
mongoc_suitable_data_t data;
mongoc_server_description_t **candidates;
mongoc_server_description_t *server;
int64_t nearest = -1;
int i;
mongoc_read_mode_t read_mode = mongoc_read_prefs_get_mode (read_pref);
candidates = (mongoc_server_description_t **) bson_malloc0 (
sizeof (*candidates) * topology->servers->items_len);
data.read_mode = read_mode;
data.topology_type = topology->type;
data.primary = NULL;
data.candidates = candidates;
data.candidates_len = 0;
data.has_secondary = false;
/* Single server --
* Either it is suitable or it isn't */
if (topology->type == MONGOC_TOPOLOGY_SINGLE) {
server = (mongoc_server_description_t *) mongoc_set_get_item (
topology->servers, 0);
if (_mongoc_topology_description_server_is_candidate (
server->type, read_mode, topology->type)) {
_mongoc_array_append_val (set, server);
} else {
TRACE (
"Rejected [%s] [%s] for read mode [%s] with topology type Single",
mongoc_server_description_type (server),
server->host.host_and_port,
_mongoc_read_mode_as_str (read_mode));
}
goto DONE;
}
/* Replica sets --
* Find suitable servers based on read mode */
if (topology->type == MONGOC_TOPOLOGY_RS_NO_PRIMARY ||
topology->type == MONGOC_TOPOLOGY_RS_WITH_PRIMARY) {
if (optype == MONGOC_SS_READ) {
mongoc_set_for_each (
topology->servers, _mongoc_replica_set_read_suitable_cb, &data);
if (read_mode == MONGOC_READ_PRIMARY) {
if (data.primary) {
_mongoc_array_append_val (set, data.primary);
}
goto DONE;
}
if (read_mode == MONGOC_READ_PRIMARY_PREFERRED && data.primary) {
_mongoc_array_append_val (set, data.primary);
goto DONE;
}
if (read_mode == MONGOC_READ_SECONDARY_PREFERRED) {
/* try read_mode SECONDARY */
_mongoc_try_mode_secondary (
set, topology, read_pref, local_threshold_ms);
/* otherwise fall back to primary */
if (!set->len && data.primary) {
_mongoc_array_append_val (set, data.primary);
}
goto DONE;
}
if (read_mode == MONGOC_READ_SECONDARY) {
for (i = 0; i < data.candidates_len; i++) {
if (candidates[i] &&
candidates[i]->type != MONGOC_SERVER_RS_SECONDARY) {
TRACE ("Rejected [%s] [%s] for mode [%s] with RS topology",
mongoc_server_description_type (candidates[i]),
candidates[i]->host.host_and_port,
_mongoc_read_mode_as_str (read_mode));
candidates[i] = NULL;
}
}
}
/* mode is SECONDARY or NEAREST, filter by staleness and tags */
mongoc_server_description_filter_stale (data.candidates,
data.candidates_len,
data.primary,
topology->heartbeat_msec,
read_pref);
mongoc_server_description_filter_tags (
data.candidates, data.candidates_len, read_pref);
} else if (topology->type == MONGOC_TOPOLOGY_RS_WITH_PRIMARY) {
/* includes optype == MONGOC_SS_WRITE as the exclusion of the above if
*/
mongoc_set_for_each (topology->servers,
_mongoc_topology_description_has_primary_cb,
&data.primary);
if (data.primary) {
_mongoc_array_append_val (set, data.primary);
goto DONE;
}
}
}
/* Sharded clusters --
* All candidates in the latency window are suitable */
if (topology->type == MONGOC_TOPOLOGY_SHARDED) {
mongoc_set_for_each (
topology->servers, _mongoc_find_suitable_mongos_cb, &data);
}
/* Ways to get here:
* - secondary read
* - secondary preferred read
* - primary_preferred and no primary read
* - sharded anything
* Find the nearest, then select within the window */
for (i = 0; i < data.candidates_len; i++) {
if (candidates[i] &&
(nearest == -1 || nearest > candidates[i]->round_trip_time_msec)) {
nearest = candidates[i]->round_trip_time_msec;
}
}
for (i = 0; i < data.candidates_len; i++) {
if (candidates[i] && (candidates[i]->round_trip_time_msec <=
nearest + local_threshold_ms)) {
_mongoc_array_append_val (set, candidates[i]);
}
}
DONE:
bson_free (candidates);
return;
}
+/*
+ *--------------------------------------------------------------------------
+ *
+ * mongoc_topology_description_has_data_node --
+ *
+ * Internal method: are any servers not Arbiter, Ghost, or Unknown?
+ *
+ *--------------------------------------------------------------------------
+ */
+bool
+mongoc_topology_description_has_data_node (mongoc_topology_description_t *td)
+{
+ int i;
+ mongoc_server_description_t *sd;
+
+ for (i = 0; i < (int) td->servers->items_len; i++) {
+ sd = (mongoc_server_description_t *) mongoc_set_get_item (td->servers, i);
+ if (_is_data_node (sd)) {
+ return true;
+ }
+ }
+
+ return false;
+}
/*
*-------------------------------------------------------------------------
*
* mongoc_topology_description_select --
*
* Return a server description of a node that is appropriate for
* the given read preference and operation type.
*
* NOTE: this method simply attempts to select a server from the
* current topology, it does not retry or trigger topology checks.
*
* NOTE: this method should only be called while holding the mutex on
* the owning topology object.
*
* Returns:
* Selected server description, or NULL upon failure.
*
* Side effects:
* None.
*
*-------------------------------------------------------------------------
*/
mongoc_server_description_t *
mongoc_topology_description_select (mongoc_topology_description_t *topology,
mongoc_ss_optype_t optype,
const mongoc_read_prefs_t *read_pref,
int64_t local_threshold_ms)
{
mongoc_array_t suitable_servers;
mongoc_server_description_t *sd = NULL;
int rand_n;
ENTRY;
if (topology->type == MONGOC_TOPOLOGY_SINGLE) {
sd = (mongoc_server_description_t *) mongoc_set_get_item (
topology->servers, 0);
if (sd->has_is_master) {
RETURN (sd);
} else {
TRACE ("Topology type single, [%s] is down", sd->host.host_and_port);
RETURN (NULL);
}
}
_mongoc_array_init (&suitable_servers,
sizeof (mongoc_server_description_t *));
mongoc_topology_description_suitable_servers (
&suitable_servers, optype, topology, read_pref, local_threshold_ms);
if (suitable_servers.len != 0) {
rand_n = _mongoc_rand_simple (&topology->rand_seed);
sd = _mongoc_array_index (&suitable_servers,
mongoc_server_description_t *,
rand_n % suitable_servers.len);
}
_mongoc_array_destroy (&suitable_servers);
if (sd) {
TRACE ("Topology type [%s], selected [%s] [%s]",
mongoc_topology_description_type (topology),
mongoc_server_description_type (sd),
sd->host.host_and_port);
}
RETURN (sd);
}
/*
*--------------------------------------------------------------------------
*
* mongoc_topology_description_server_by_id --
*
* Get the server description for @id, if that server is present
* in @description. Otherwise, return NULL and fill out optional
* @error.
*
* NOTE: In most cases, caller should create a duplicate of the
* returned server description. Caller should hold the mutex on the
* owning topology object while calling this method and while using
* the returned reference.
*
* Returns:
* A mongoc_server_description_t *, or NULL.
*
* Side effects:
* Fills out optional @error if server not found.
*
*--------------------------------------------------------------------------
*/
mongoc_server_description_t *
mongoc_topology_description_server_by_id (
mongoc_topology_description_t *description, uint32_t id, bson_error_t *error)
{
mongoc_server_description_t *sd;
BSON_ASSERT (description);
sd =
(mongoc_server_description_t *) mongoc_set_get (description->servers, id);
if (!sd) {
bson_set_error (error,
MONGOC_ERROR_STREAM,
MONGOC_ERROR_STREAM_NOT_ESTABLISHED,
"Could not find description for node %u",
id);
}
return sd;
}
/*
*--------------------------------------------------------------------------
*
* _mongoc_topology_description_remove_server --
*
* If present, remove this server from this topology description.
*
* Returns:
* None.
*
* Side effects:
* Removes the server description from topology and destroys it.
*
*--------------------------------------------------------------------------
*/
static void
_mongoc_topology_description_remove_server (
mongoc_topology_description_t *description,
mongoc_server_description_t *server)
{
BSON_ASSERT (description);
BSON_ASSERT (server);
_mongoc_topology_description_monitor_server_closed (description, server);
mongoc_set_rm (description->servers, server->id);
}
typedef struct _mongoc_address_and_id_t {
const char *address; /* IN */
bool found; /* OUT */
uint32_t id; /* OUT */
} mongoc_address_and_id_t;
/* find the given server and stop iterating */
static bool
_mongoc_topology_description_has_server_cb (void *item,
void *ctx /* IN - OUT */)
{
mongoc_server_description_t *server = (mongoc_server_description_t *) item;
mongoc_address_and_id_t *data = (mongoc_address_and_id_t *) ctx;
if (strcasecmp (data->address, server->connection_address) == 0) {
data->found = true;
data->id = server->id;
return false;
}
return true;
}
/*
*--------------------------------------------------------------------------
*
* _mongoc_topology_description_has_set_version --
*
* Whether @topology's max replica set version has been set.
*
* Returns:
* True if the max setVersion was ever set.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
static bool
_mongoc_topology_description_has_set_version (mongoc_topology_description_t *td)
{
return td->max_set_version != MONGOC_NO_SET_VERSION;
}
/*
*--------------------------------------------------------------------------
*
* _mongoc_topology_description_topology_has_server --
*
* Return true if @server is in @topology. If so, place its id in
* @id if given.
*
* Returns:
* True if server is in topology, false otherwise.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
static bool
_mongoc_topology_description_has_server (
mongoc_topology_description_t *description,
const char *address,
uint32_t *id /* OUT */)
{
mongoc_address_and_id_t data;
BSON_ASSERT (description);
BSON_ASSERT (address);
data.address = address;
data.found = false;
mongoc_set_for_each (
description->servers, _mongoc_topology_description_has_server_cb, &data);
if (data.found && id) {
*id = data.id;
}
return data.found;
}
typedef struct _mongoc_address_and_type_t {
const char *address;
mongoc_server_description_type_t type;
} mongoc_address_and_type_t;
static bool
_mongoc_label_unknown_member_cb (void *item, void *ctx)
{
mongoc_server_description_t *server = (mongoc_server_description_t *) item;
mongoc_address_and_type_t *data = (mongoc_address_and_type_t *) ctx;
if (strcasecmp (server->connection_address, data->address) == 0 &&
server->type == MONGOC_SERVER_UNKNOWN) {
mongoc_server_description_set_state (server, data->type);
return false;
}
return true;
}
/*
*--------------------------------------------------------------------------
*
* _mongoc_topology_description_label_unknown_member --
*
* Find the server description with the given @address and if its
* type is UNKNOWN, set its type to @type.
*
* Returns:
* None.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
static void
_mongoc_topology_description_label_unknown_member (
mongoc_topology_description_t *description,
const char *address,
mongoc_server_description_type_t type)
{
mongoc_address_and_type_t data;
BSON_ASSERT (description);
BSON_ASSERT (address);
data.type = type;
data.address = address;
mongoc_set_for_each (
description->servers, _mongoc_label_unknown_member_cb, &data);
}
/*
*--------------------------------------------------------------------------
*
* _mongoc_topology_description_set_state --
*
* Change the state of this cluster and unblock things waiting
* on a change of topology type.
*
* Returns:
* None.
*
* Side effects:
* Unblocks anything waiting on this description to change states.
*
*--------------------------------------------------------------------------
*/
static void
_mongoc_topology_description_set_state (
mongoc_topology_description_t *description,
mongoc_topology_description_type_t type)
{
description->type = type;
}
static void
_update_rs_type (mongoc_topology_description_t *topology)
{
if (_mongoc_topology_description_has_primary (topology)) {
_mongoc_topology_description_set_state (topology,
MONGOC_TOPOLOGY_RS_WITH_PRIMARY);
} else {
_mongoc_topology_description_set_state (topology,
MONGOC_TOPOLOGY_RS_NO_PRIMARY);
}
}
/*
*--------------------------------------------------------------------------
*
* _mongoc_topology_description_check_if_has_primary --
*
* If there is a primary in topology, set topology
* type to RS_WITH_PRIMARY, otherwise set it to
* RS_NO_PRIMARY.
*
* Returns:
* None.
*
* Side effects:
* Changes the topology type.
*
*--------------------------------------------------------------------------
*/
static void
_mongoc_topology_description_check_if_has_primary (
mongoc_topology_description_t *topology, mongoc_server_description_t *server)
{
_update_rs_type (topology);
}
/*
*--------------------------------------------------------------------------
*
* mongoc_topology_description_invalidate_server --
*
* Invalidate a server if a network error occurred while using it in
* another part of the client. Server description is set to type
* UNKNOWN, the error is recorded, and other parameters are reset to
* defaults. Pass in the reason for invalidation in @error.
*
* NOTE: this method should only be called while holding the mutex on
* the owning topology object.
*
*--------------------------------------------------------------------------
*/
void
mongoc_topology_description_invalidate_server (
mongoc_topology_description_t *topology,
uint32_t id,
const bson_error_t *error /* IN */)
{
BSON_ASSERT (error);
/* send NULL ismaster reply */
mongoc_topology_description_handle_ismaster (topology, id, NULL, 0, error);
}
/*
*--------------------------------------------------------------------------
*
* mongoc_topology_description_add_server --
*
* Add the specified server to the cluster topology if it is not
* already a member. If @id, place its id in @id.
*
* NOTE: this method should only be called while holding the mutex on
* the owning topology object.
*
* Return:
* True if the server was added or already existed in the topology,
* false if an error occurred.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
bool
mongoc_topology_description_add_server (mongoc_topology_description_t *topology,
const char *server,
uint32_t *id /* OUT */)
{
uint32_t server_id;
mongoc_server_description_t *description;
BSON_ASSERT (topology);
BSON_ASSERT (server);
if (!_mongoc_topology_description_has_server (
topology, server, &server_id)) {
/* TODO this might not be an accurate count in all cases */
server_id = ++topology->max_server_id;
description =
(mongoc_server_description_t *) bson_malloc0 (sizeof *description);
mongoc_server_description_init (description, server, server_id);
mongoc_set_add (topology->servers, server_id, description);
/* if we're in topology_new then no callbacks are registered and this is
* a no-op. later, if we discover a new RS member this sends an event. */
_mongoc_topology_description_monitor_server_opening (topology,
description);
}
if (id) {
*id = server_id;
}
return true;
}
+/*
+ *--------------------------------------------------------------------------
+ *
+ * mongoc_topology_description_update_cluster_time --
+ *
+ * Drivers Session Spec: Drivers MUST examine responses to server commands to
+ * see if they contain a top level field named $clusterTime formatted as
+ * follows:
+ *
+ * {
+ * ...
+ * $clusterTime : {
+ * clusterTime : <BsonTimestamp>,
+ * signature : {
+ * hash : <BsonBinaryData>,
+ * keyId : <BsonInt64>
+ * }
+ * },
+ * ...
+ * }
+ *
+ * Whenever a driver receives a clusterTime from a server it MUST compare it
+ * to the current highest seen clusterTime for the cluster. If the new
+ * clusterTime is higher than the highest seen clusterTime it MUST become
+ * the new highest seen clusterTime. Two clusterTimes are compared using
+ * only the BsonTimestamp value of the clusterTime embedded field (be sure to
+ * include both the timestamp and the increment of the BsonTimestamp in the
+ * comparison). The signature field does not participate in the comparison.
+ *
+ *--------------------------------------------------------------------------
+ */
+
+void
+mongoc_topology_description_update_cluster_time (
+ mongoc_topology_description_t *td, const bson_t *reply)
+{
+ bson_iter_t iter;
+ bson_iter_t child;
+ const uint8_t *data;
+ uint32_t size;
+ bson_t cluster_time;
+
+ if (!reply || !bson_iter_init_find (&iter, reply, "$clusterTime")) {
+ return;
+ }
+
+ if (!BSON_ITER_HOLDS_DOCUMENT (&iter) ||
+ !bson_iter_recurse (&iter, &child)) {
+ MONGOC_ERROR ("Can't parse $clusterTime");
+ return;
+ }
+
+ bson_iter_document (&iter, &size, &data);
+ bson_init_static (&cluster_time, data, (size_t) size);
+
+ if (bson_empty (&td->cluster_time) ||
+ _mongoc_cluster_time_greater (&cluster_time, &td->cluster_time)) {
+ bson_destroy (&td->cluster_time);
+ bson_copy_to (&cluster_time, &td->cluster_time);
+ }
+}
+
+
static void
_mongoc_topology_description_add_new_servers (
mongoc_topology_description_t *topology, mongoc_server_description_t *server)
{
bson_iter_t member_iter;
const bson_t *rs_members[3];
int i;
rs_members[0] = &server->hosts;
rs_members[1] = &server->arbiters;
rs_members[2] = &server->passives;
for (i = 0; i < 3; i++) {
bson_iter_init (&member_iter, rs_members[i]);
while (bson_iter_next (&member_iter)) {
mongoc_topology_description_add_server (
topology, bson_iter_utf8 (&member_iter, NULL), NULL);
}
}
}
typedef struct _mongoc_primary_and_topology_t {
mongoc_topology_description_t *topology;
mongoc_server_description_t *primary;
} mongoc_primary_and_topology_t;
/* invalidate old primaries */
static bool
_mongoc_topology_description_invalidate_primaries_cb (void *item, void *ctx)
{
mongoc_server_description_t *server = (mongoc_server_description_t *) item;
mongoc_primary_and_topology_t *data = (mongoc_primary_and_topology_t *) ctx;
if (server->id != data->primary->id &&
server->type == MONGOC_SERVER_RS_PRIMARY) {
mongoc_server_description_set_state (server, MONGOC_SERVER_UNKNOWN);
mongoc_server_description_set_set_version (server, MONGOC_NO_SET_VERSION);
mongoc_server_description_set_election_id (server, NULL);
}
return true;
}
/* Remove and destroy all replica set members not in primary's hosts lists */
static void
_mongoc_topology_description_remove_unreported_servers (
mongoc_topology_description_t *topology,
mongoc_server_description_t *primary)
{
mongoc_array_t to_remove;
int i;
mongoc_server_description_t *member;
const char *address;
_mongoc_array_init (&to_remove, sizeof (mongoc_server_description_t *));
/* Accumulate servers to be removed - do this before calling
* _mongoc_topology_description_remove_server, which could call
* mongoc_server_description_cleanup on the primary itself if it
* doesn't report its own connection_address in its hosts list.
* See hosts_differ_from_seeds.json */
for (i = 0; i < topology->servers->items_len; i++) {
member = (mongoc_server_description_t *) mongoc_set_get_item (
topology->servers, i);
address = member->connection_address;
if (!mongoc_server_description_has_rs_member (primary, address)) {
_mongoc_array_append_val (&to_remove, member);
}
}
/* now it's safe to call _mongoc_topology_description_remove_server,
* even on the primary */
for (i = 0; i < to_remove.len; i++) {
member =
_mongoc_array_index (&to_remove, mongoc_server_description_t *, i);
_mongoc_topology_description_remove_server (topology, member);
}
_mongoc_array_destroy (&to_remove);
}
/*
*--------------------------------------------------------------------------
*
* _mongoc_topology_description_matches_me --
*
* Server Discovery And Monitoring Spec: "Removal from the topology of
* seed list members where the "me" property does not match the address
* used to connect prevents clients from being able to select a server,
* only to fail to re-select that server once the primary has responded.
*
* Returns:
* True if "me" matches "connection_address".
*
* Side Effects:
* None.
*
*--------------------------------------------------------------------------
*/
static bool
_mongoc_topology_description_matches_me (mongoc_server_description_t *server)
{
BSON_ASSERT (server->connection_address);
if (!server->me) {
/* "me" is unknown: consider it a match */
return true;
}
return strcasecmp (server->connection_address, server->me) == 0;
}
/*
*--------------------------------------------------------------------------
*
* _mongoc_update_rs_from_primary --
*
* First, determine that this is really the primary:
* -If this node isn't in the cluster, do nothing.
* -If the cluster's set name is null, set it to node's set name.
* Otherwise if the cluster's set name is different from node's,
* we found a rogue primary, so remove it from the cluster and
* check the cluster for a primary, then return.
* -If any of the members of cluster reports an address different
* from node's, node cannot be the primary.
* Now that we know this is the primary:
* -If any hosts, passives, or arbiters in node's description aren't
* in the cluster, add them as UNKNOWN servers.
* -If the cluster has any servers that aren't in node's description,
* remove and destroy them.
* Finally, check the cluster for the new primary.
*
* Returns:
* None.
*
* Side effects:
* Changes to the cluster, possible removal of cluster nodes.
*
*--------------------------------------------------------------------------
*/
static void
_mongoc_topology_description_update_rs_from_primary (
mongoc_topology_description_t *topology, mongoc_server_description_t *server)
{
mongoc_primary_and_topology_t data;
bson_error_t error;
BSON_ASSERT (topology);
BSON_ASSERT (server);
if (!_mongoc_topology_description_has_server (
topology, server->connection_address, NULL))
return;
/* If server->set_name was null this function wouldn't be called from
* mongoc_server_description_handle_ismaster(). static code analyzers however
* don't know that so we check for it explicitly. */
if (server->set_name) {
/* 'Server' can only be the primary if it has the right rs name */
if (!topology->set_name) {
topology->set_name = bson_strdup (server->set_name);
} else if (strcmp (topology->set_name, server->set_name) != 0) {
_mongoc_topology_description_remove_server (topology, server);
_update_rs_type (topology);
return;
}
}
if (mongoc_server_description_has_set_version (server) &&
mongoc_server_description_has_election_id (server)) {
/* Server Discovery And Monitoring Spec: "The client remembers the
* greatest electionId reported by a primary, and distrusts primaries
* with lesser electionIds. This prevents the client from oscillating
* between the old and new primary during a split-brain period."
*/
if (_mongoc_topology_description_later_election (topology, server)) {
bson_set_error (&error,
MONGOC_ERROR_STREAM,
MONGOC_ERROR_STREAM_CONNECT,
"member's setVersion or electionId is stale");
mongoc_topology_description_invalidate_server (
topology, server->id, &error);
_update_rs_type (topology);
return;
}
/* server's electionId >= topology's max electionId */
_mongoc_topology_description_set_max_election_id (topology, server);
}
if (mongoc_server_description_has_set_version (server) &&
(!_mongoc_topology_description_has_set_version (topology) ||
server->set_version > topology->max_set_version)) {
_mongoc_topology_description_set_max_set_version (topology, server);
}
/* 'Server' is the primary! Invalidate other primaries if found */
data.primary = server;
data.topology = topology;
mongoc_set_for_each (topology->servers,
_mongoc_topology_description_invalidate_primaries_cb,
&data);
/* Add to topology description any new servers primary knows about */
_mongoc_topology_description_add_new_servers (topology, server);
/* Remove from topology description any servers primary doesn't know about */
_mongoc_topology_description_remove_unreported_servers (topology, server);
/* Finally, set topology type */
_update_rs_type (topology);
}
/*
*--------------------------------------------------------------------------
*
* _mongoc_topology_description_update_rs_without_primary --
*
* Update cluster's information when there is no primary.
*
* Returns:
* None.
*
* Side Effects:
* Alters cluster state, may remove node from cluster.
*
*--------------------------------------------------------------------------
*/
static void
_mongoc_topology_description_update_rs_without_primary (
mongoc_topology_description_t *topology, mongoc_server_description_t *server)
{
BSON_ASSERT (topology);
BSON_ASSERT (server);
if (!_mongoc_topology_description_has_server (
topology, server->connection_address, NULL)) {
return;
}
/* make sure we're talking about the same replica set */
if (server->set_name) {
if (!topology->set_name) {
topology->set_name = bson_strdup (server->set_name);
} else if (strcmp (topology->set_name, server->set_name) != 0) {
_mongoc_topology_description_remove_server (topology, server);
return;
}
}
/* Add new servers that this replica set member knows about */
_mongoc_topology_description_add_new_servers (topology, server);
- if (!_mongoc_topology_description_matches_me (server)) {
- _mongoc_topology_description_remove_server (topology, server);
- return;
- }
-
/* If this server thinks there is a primary, label it POSSIBLE_PRIMARY */
if (server->current_primary) {
_mongoc_topology_description_label_unknown_member (
topology, server->current_primary, MONGOC_SERVER_POSSIBLE_PRIMARY);
}
+
+ if (!_mongoc_topology_description_matches_me (server)) {
+ _mongoc_topology_description_remove_server (topology, server);
+ return;
+ }
}
/*
*--------------------------------------------------------------------------
*
* _mongoc_topology_description_update_rs_with_primary_from_member --
*
* Update cluster's information when there is a primary, but the
* update is coming from another replica set member.
*
* Returns:
* None.
*
* Side Effects:
* Alters cluster state.
*
*--------------------------------------------------------------------------
*/
static void
_mongoc_topology_description_update_rs_with_primary_from_member (
mongoc_topology_description_t *topology, mongoc_server_description_t *server)
{
BSON_ASSERT (topology);
BSON_ASSERT (server);
if (!_mongoc_topology_description_has_server (
topology, server->connection_address, NULL)) {
return;
}
/* set_name should never be null here */
if (strcmp (topology->set_name, server->set_name) != 0) {
_mongoc_topology_description_remove_server (topology, server);
_update_rs_type (topology);
return;
}
if (!_mongoc_topology_description_matches_me (server)) {
_mongoc_topology_description_remove_server (topology, server);
return;
}
/* If there is no primary, label server's current_primary as the
* POSSIBLE_PRIMARY */
if (!_mongoc_topology_description_has_primary (topology) &&
server->current_primary) {
_mongoc_topology_description_set_state (topology,
MONGOC_TOPOLOGY_RS_NO_PRIMARY);
_mongoc_topology_description_label_unknown_member (
topology, server->current_primary, MONGOC_SERVER_POSSIBLE_PRIMARY);
}
}
/*
*--------------------------------------------------------------------------
*
* _mongoc_topology_description_set_topology_type_to_sharded --
*
* Sets topology's type to SHARDED.
*
* Returns:
* None
*
* Side effects:
* Alter's topology's type
*
*--------------------------------------------------------------------------
*/
static void
_mongoc_topology_description_set_topology_type_to_sharded (
mongoc_topology_description_t *topology, mongoc_server_description_t *server)
{
_mongoc_topology_description_set_state (topology, MONGOC_TOPOLOGY_SHARDED);
}
/*
*--------------------------------------------------------------------------
*
* _mongoc_topology_description_transition_unknown_to_rs_no_primary --
*
* Encapsulates transition from cluster state UNKNOWN to
* RS_NO_PRIMARY. Sets the type to RS_NO_PRIMARY,
* then updates the replica set accordingly.
*
* Returns:
* None.
*
* Side effects:
* Changes topology state.
*
*--------------------------------------------------------------------------
*/
static void
_mongoc_topology_description_transition_unknown_to_rs_no_primary (
mongoc_topology_description_t *topology, mongoc_server_description_t *server)
{
_mongoc_topology_description_set_state (topology,
MONGOC_TOPOLOGY_RS_NO_PRIMARY);
_mongoc_topology_description_update_rs_without_primary (topology, server);
}
/*
*--------------------------------------------------------------------------
*
* _mongoc_topology_description_remove_and_check_primary --
*
* Remove the server and check if the topology still has a primary.
*
* Returns:
* None.
*
* Side effects:
* Removes server from topology and destroys it.
*
*--------------------------------------------------------------------------
*/
static void
_mongoc_topology_description_remove_and_check_primary (
mongoc_topology_description_t *topology, mongoc_server_description_t *server)
{
_mongoc_topology_description_remove_server (topology, server);
_update_rs_type (topology);
}
/*
*--------------------------------------------------------------------------
*
* _mongoc_topology_description_update_unknown_with_standalone --
*
* If the cluster doesn't contain this server, do nothing.
* Otherwise, if the topology only has one seed, change its
* type to SINGLE. If the topology has multiple seeds, it does not
* include us, so remove this server and destroy it.
*
* Returns:
* None.
*
* Side effects:
* Changes the topology type, might remove server from topology.
*
*--------------------------------------------------------------------------
*/
static void
_mongoc_topology_description_update_unknown_with_standalone (
mongoc_topology_description_t *topology, mongoc_server_description_t *server)
{
BSON_ASSERT (topology);
BSON_ASSERT (server);
if (!_mongoc_topology_description_has_server (
topology, server->connection_address, NULL))
return;
if (topology->servers->items_len > 1) {
/* This cluster contains other servers, it cannot be a standalone. */
_mongoc_topology_description_remove_server (topology, server);
} else {
_mongoc_topology_description_set_state (topology, MONGOC_TOPOLOGY_SINGLE);
}
}
/*
*--------------------------------------------------------------------------
*
* This table implements the 'ToplogyType' table outlined in the Server
* Discovery and Monitoring spec. Each row represents a server type,
* and each column represents the topology type. Given a current topology
* type T and a newly-observed server type S, use the function at
* state_transions[S][T] to transition to a new state.
*
* Rows should be read like so:
* { server type for this row
* UNKNOWN,
* SHARDED,
* RS_NO_PRIMARY,
* RS_WITH_PRIMARY
* }
*
*--------------------------------------------------------------------------
*/
typedef void (*transition_t) (mongoc_topology_description_t *topology,
mongoc_server_description_t *server);
transition_t gSDAMTransitionTable
[MONGOC_SERVER_DESCRIPTION_TYPES][MONGOC_TOPOLOGY_DESCRIPTION_TYPES] = {
{
/* UNKNOWN */
NULL, /* MONGOC_TOPOLOGY_UNKNOWN */
NULL, /* MONGOC_TOPOLOGY_SHARDED */
NULL, /* MONGOC_TOPOLOGY_RS_NO_PRIMARY */
_mongoc_topology_description_check_if_has_primary /* MONGOC_TOPOLOGY_RS_WITH_PRIMARY
*/
},
{/* STANDALONE */
_mongoc_topology_description_update_unknown_with_standalone,
_mongoc_topology_description_remove_server,
_mongoc_topology_description_remove_server,
_mongoc_topology_description_remove_and_check_primary},
{/* MONGOS */
_mongoc_topology_description_set_topology_type_to_sharded,
NULL,
_mongoc_topology_description_remove_server,
_mongoc_topology_description_remove_and_check_primary},
{/* POSSIBLE_PRIMARY */
NULL,
NULL,
NULL,
NULL},
{/* PRIMARY */
_mongoc_topology_description_update_rs_from_primary,
_mongoc_topology_description_remove_server,
_mongoc_topology_description_update_rs_from_primary,
_mongoc_topology_description_update_rs_from_primary},
{/* SECONDARY */
_mongoc_topology_description_transition_unknown_to_rs_no_primary,
_mongoc_topology_description_remove_server,
_mongoc_topology_description_update_rs_without_primary,
_mongoc_topology_description_update_rs_with_primary_from_member},
{/* ARBITER */
_mongoc_topology_description_transition_unknown_to_rs_no_primary,
_mongoc_topology_description_remove_server,
_mongoc_topology_description_update_rs_without_primary,
_mongoc_topology_description_update_rs_with_primary_from_member},
{/* RS_OTHER */
_mongoc_topology_description_transition_unknown_to_rs_no_primary,
_mongoc_topology_description_remove_server,
_mongoc_topology_description_update_rs_without_primary,
_mongoc_topology_description_update_rs_with_primary_from_member},
{/* RS_GHOST */
NULL,
_mongoc_topology_description_remove_server,
NULL,
_mongoc_topology_description_check_if_has_primary}};
#ifdef MONGOC_TRACE
/*
*--------------------------------------------------------------------------
*
* _mongoc_topology_description_type --
*
* Get this topology's type, one of the types defined in the Server
* Discovery And Monitoring Spec.
*
* NOTE: this method should only be called while holding the mutex on
* the owning topology object.
*
* Returns:
* A string.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
static const char *
_mongoc_topology_description_type (mongoc_topology_description_t *topology)
{
switch (topology->type) {
case MONGOC_TOPOLOGY_UNKNOWN:
return "Unknown";
case MONGOC_TOPOLOGY_SHARDED:
return "Sharded";
case MONGOC_TOPOLOGY_RS_NO_PRIMARY:
return "RSNoPrimary";
case MONGOC_TOPOLOGY_RS_WITH_PRIMARY:
return "RSWithPrimary";
case MONGOC_TOPOLOGY_SINGLE:
return "Single";
case MONGOC_TOPOLOGY_DESCRIPTION_TYPES:
default:
MONGOC_ERROR ("Invalid mongoc_topology_description_type_t type");
return "Invalid";
}
}
#endif
+/*
+ *--------------------------------------------------------------------------
+ *
+ * _mongoc_topology_description_update_session_timeout --
+ *
+ * Fill out td.session_timeout_minutes.
+ *
+ * Server Discovery and Monitoring Spec: "set logicalSessionTimeoutMinutes
+ * to the smallest logicalSessionTimeoutMinutes value among all
+ * ServerDescriptions of known ServerType. If any ServerDescription of
+ * known ServerType has a null logicalSessionTimeoutMinutes, then
+ * logicalSessionTimeoutMinutes MUST be set to null."
+ *
+ * --------------------------------------------------------------------------
+ */
+
+static void
+_mongoc_topology_description_update_session_timeout (
+ mongoc_topology_description_t *td)
+{
+ mongoc_set_t *set;
+ size_t i;
+ mongoc_server_description_t *sd;
+
+ set = td->servers;
+
+ td->session_timeout_minutes = MONGOC_NO_SESSIONS;
+
+ for (i = 0; i < set->items_len; i++) {
+ sd = (mongoc_server_description_t *) mongoc_set_get_item (set, (int) i);
+ if (!_is_data_node (sd)) {
+ continue;
+ }
+
+ if (sd->session_timeout_minutes == MONGOC_NO_SESSIONS) {
+ td->session_timeout_minutes = MONGOC_NO_SESSIONS;
+ return;
+ } else if (td->session_timeout_minutes == MONGOC_NO_SESSIONS) {
+ td->session_timeout_minutes = sd->session_timeout_minutes;
+ } else if (td->session_timeout_minutes > sd->session_timeout_minutes) {
+ td->session_timeout_minutes = sd->session_timeout_minutes;
+ }
+ }
+}
+
/*
*--------------------------------------------------------------------------
*
* _mongoc_topology_description_check_compatible --
*
* Fill out td.compatibility_error if any server's wire versions do
* not overlap with ours. Otherwise clear td.compatibility_error.
*
* If any server is incompatible, the topology as a whole is considered
* incompatible.
*
*--------------------------------------------------------------------------
*/
static void
_mongoc_topology_description_check_compatible (
mongoc_topology_description_t *td)
{
size_t i;
mongoc_server_description_t *sd;
- bool server_too_new;
- bool server_too_old;
memset (&td->compatibility_error, 0, sizeof (bson_error_t));
for (i = 0; i < td->servers->items_len; i++) {
sd = (mongoc_server_description_t *) mongoc_set_get_item (td->servers,
(int) i);
if (sd->type == MONGOC_SERVER_UNKNOWN ||
sd->type == MONGOC_SERVER_POSSIBLE_PRIMARY) {
continue;
}
- /* A server is considered to be incompatible with a driver if its min and
- * max wire version does not overlap the driver’s. Specifically, a driver
- * with a min and max range of [a, b] must be considered incompatible
- * with any server with min and max range of [c, d] where c > b or d < a.
- * All other servers are considered to be compatible. */
- server_too_new = sd->min_wire_version > WIRE_VERSION_MAX;
- server_too_old = sd->max_wire_version < WIRE_VERSION_MIN;
-
- if (server_too_new || server_too_old) {
- bson_set_error (&td->compatibility_error,
- MONGOC_ERROR_PROTOCOL,
- MONGOC_ERROR_PROTOCOL_BAD_WIRE_VERSION,
- "Server at \"%s\" uses wire protocol versions %d"
- " through %d, but libmongoc %s only supports %d"
- " through %d",
- sd->host.host_and_port,
- sd->min_wire_version,
- sd->max_wire_version,
- MONGOC_VERSION_S,
- WIRE_VERSION_MIN,
- WIRE_VERSION_MAX);
-
- break;
+ if (sd->min_wire_version > WIRE_VERSION_MAX) {
+ bson_set_error (
+ &td->compatibility_error,
+ MONGOC_ERROR_PROTOCOL,
+ MONGOC_ERROR_PROTOCOL_BAD_WIRE_VERSION,
+ "Server at %s requires wire version %d,"
+ " but this version of libmongoc only supports up to %d",
+ sd->host.host_and_port,
+ sd->min_wire_version,
+ WIRE_VERSION_MAX);
+ } else if (sd->max_wire_version < WIRE_VERSION_MIN) {
+ bson_set_error (
+ &td->compatibility_error,
+ MONGOC_ERROR_PROTOCOL,
+ MONGOC_ERROR_PROTOCOL_BAD_WIRE_VERSION,
+ "Server at %s reports wire version %d, but this"
+ " version of libmongoc requires at least 2 (MongoDB 2.6)",
+ sd->host.host_and_port,
+ sd->max_wire_version);
}
}
}
/*
*--------------------------------------------------------------------------
*
* mongoc_topology_description_handle_ismaster --
*
* Handle an ismaster. This is called by the background SDAM process,
* and by client when invalidating servers. If there was an error
* calling ismaster, pass it in as @error.
*
* NOTE: this method should only be called while holding the mutex on
* the owning topology object.
*
*--------------------------------------------------------------------------
*/
void
mongoc_topology_description_handle_ismaster (
mongoc_topology_description_t *topology,
uint32_t server_id,
const bson_t *ismaster_response,
int64_t rtt_msec,
const bson_error_t *error /* IN */)
{
mongoc_topology_description_t *prev_td = NULL;
mongoc_server_description_t *prev_sd = NULL;
mongoc_server_description_t *sd;
BSON_ASSERT (topology);
BSON_ASSERT (server_id != 0);
sd = mongoc_topology_description_server_by_id (topology, server_id, NULL);
if (!sd) {
return; /* server already removed from topology */
}
if (topology->apm_callbacks.topology_changed) {
prev_td = bson_malloc0 (sizeof (mongoc_topology_description_t));
_mongoc_topology_description_copy_to (topology, prev_td);
}
if (topology->apm_callbacks.server_changed) {
prev_sd = mongoc_server_description_new_copy (sd);
}
/* pass the current error in */
mongoc_server_description_handle_ismaster (
sd, ismaster_response, rtt_msec, error);
+ mongoc_topology_description_update_cluster_time (topology,
+ ismaster_response);
_mongoc_topology_description_monitor_server_changed (topology, prev_sd, sd);
if (gSDAMTransitionTable[sd->type][topology->type]) {
TRACE ("Transitioning to %s for %s",
_mongoc_topology_description_type (topology),
mongoc_server_description_type (sd));
gSDAMTransitionTable[sd->type][topology->type](topology, sd);
} else {
TRACE ("No transition entry to %s for %s",
_mongoc_topology_description_type (topology),
mongoc_server_description_type (sd));
}
- _mongoc_topology_description_check_compatible (topology);
+ _mongoc_topology_description_update_session_timeout (topology);
+
+ /* Don't bother checking wire version compatibility if we already errored */
+ if (ismaster_response && (!error || !error->code)) {
+ _mongoc_topology_description_check_compatible (topology);
+ }
_mongoc_topology_description_monitor_changed (prev_td, topology);
if (prev_td) {
mongoc_topology_description_destroy (prev_td);
bson_free (prev_td);
}
if (prev_sd) {
mongoc_server_description_destroy (prev_sd);
}
}
/*
*--------------------------------------------------------------------------
*
* mongoc_topology_description_has_readable_server --
*
* SDAM Monitoring Spec:
* "Determines if the topology has a readable server available."
*
* NOTE: this method should only be called by user code in an SDAM
* Monitoring callback, while the monitoring framework holds the mutex
* on the owning topology object.
*
*--------------------------------------------------------------------------
*/
bool
mongoc_topology_description_has_readable_server (
mongoc_topology_description_t *td, const mongoc_read_prefs_t *prefs)
{
bson_error_t error;
if (!mongoc_topology_compatible (td, NULL, &error)) {
return false;
}
/* local threshold argument doesn't matter */
return mongoc_topology_description_select (td, MONGOC_SS_READ, prefs, 0) !=
NULL;
}
/*
*--------------------------------------------------------------------------
*
* mongoc_topology_description_has_writable_server --
*
* SDAM Monitoring Spec:
* "Determines if the topology has a writable server available."
*
* NOTE: this method should only be called by user code in an SDAM
* Monitoring callback, while the monitoring framework holds the mutex
* on the owning topology object.
*
*--------------------------------------------------------------------------
*/
bool
mongoc_topology_description_has_writable_server (
mongoc_topology_description_t *td)
{
bson_error_t error;
if (!mongoc_topology_compatible (td, NULL, &error)) {
return false;
}
return mongoc_topology_description_select (td, MONGOC_SS_WRITE, NULL, 0) !=
NULL;
}
/*
*--------------------------------------------------------------------------
*
* mongoc_topology_description_type --
*
* Get this topology's type, one of the types defined in the Server
* Discovery And Monitoring Spec.
*
* NOTE: this method should only be called by user code in an SDAM
* Monitoring callback, while the monitoring framework holds the mutex
* on the owning topology object.
*
* Returns:
* A string.
*
*--------------------------------------------------------------------------
*/
const char *
mongoc_topology_description_type (const mongoc_topology_description_t *td)
{
switch (td->type) {
case MONGOC_TOPOLOGY_UNKNOWN:
return "Unknown";
case MONGOC_TOPOLOGY_SHARDED:
return "Sharded";
case MONGOC_TOPOLOGY_RS_NO_PRIMARY:
return "ReplicaSetNoPrimary";
case MONGOC_TOPOLOGY_RS_WITH_PRIMARY:
return "ReplicaSetWithPrimary";
case MONGOC_TOPOLOGY_SINGLE:
return "Single";
case MONGOC_TOPOLOGY_DESCRIPTION_TYPES:
default:
fprintf (stderr, "ERROR: Unknown topology type %d\n", td->type);
BSON_ASSERT (0);
}
return NULL;
}
+
/*
*--------------------------------------------------------------------------
*
* mongoc_topology_description_get_servers --
*
* Fetch an array of server descriptions for all known servers in the
* topology.
*
* Returns:
* An array you must free with mongoc_server_descriptions_destroy_all.
*
*--------------------------------------------------------------------------
*/
mongoc_server_description_t **
mongoc_topology_description_get_servers (
const mongoc_topology_description_t *td, size_t *n /* OUT */)
{
size_t i;
mongoc_set_t *set;
mongoc_server_description_t **sds;
mongoc_server_description_t *sd;
BSON_ASSERT (td);
BSON_ASSERT (n);
set = td->servers;
/* enough room for all descriptions, even if some are unknown */
sds = (mongoc_server_description_t **) bson_malloc0 (
sizeof (mongoc_server_description_t *) * set->items_len);
*n = 0;
for (i = 0; i < set->items_len; ++i) {
sd = (mongoc_server_description_t *) mongoc_set_get_item (set, (int) i);
if (sd->type != MONGOC_SERVER_UNKNOWN) {
sds[*n] = mongoc_server_description_new_copy (sd);
++(*n);
}
}
return sds;
}
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-topology-description.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-topology-description.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-topology-description.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-topology-description.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-topology-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-topology-private.h
similarity index 84%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-topology-private.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-topology-private.h
index 0526a181..5f100196 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-topology-private.h
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-topology-private.h
@@ -1,123 +1,145 @@
/*
* Copyright 2014 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MONGOC_TOPOLOGY_PRIVATE_H
#define MONGOC_TOPOLOGY_PRIVATE_H
-#include "mongoc-read-prefs-private.h"
#include "mongoc-topology-scanner-private.h"
#include "mongoc-server-description-private.h"
#include "mongoc-topology-description-private.h"
#include "mongoc-thread-private.h"
#include "mongoc-uri.h"
+#include "mongoc-client-session-private.h"
#define MONGOC_TOPOLOGY_MIN_HEARTBEAT_FREQUENCY_MS 500
#define MONGOC_TOPOLOGY_SOCKET_CHECK_INTERVAL_MS 5000
#define MONGOC_TOPOLOGY_COOLDOWN_MS 5000
#define MONGOC_TOPOLOGY_LOCAL_THRESHOLD_MS 15
#define MONGOC_TOPOLOGY_SERVER_SELECTION_TIMEOUT_MS 30000
#define MONGOC_TOPOLOGY_HEARTBEAT_FREQUENCY_MS_MULTI_THREADED 10000
#define MONGOC_TOPOLOGY_HEARTBEAT_FREQUENCY_MS_SINGLE_THREADED 60000
typedef enum {
MONGOC_TOPOLOGY_SCANNER_OFF,
MONGOC_TOPOLOGY_SCANNER_BG_RUNNING,
MONGOC_TOPOLOGY_SCANNER_SHUTTING_DOWN,
MONGOC_TOPOLOGY_SCANNER_SINGLE_THREADED,
} mongoc_topology_scanner_state_t;
typedef struct _mongoc_topology_t {
mongoc_topology_description_t description;
mongoc_uri_t *uri;
mongoc_topology_scanner_t *scanner;
bool server_selection_try_once;
int64_t last_scan;
int64_t local_threshold_msec;
int64_t connect_timeout_msec;
int64_t server_selection_timeout_msec;
mongoc_mutex_t mutex;
mongoc_cond_t cond_client;
mongoc_cond_t cond_server;
mongoc_thread_t thread;
mongoc_topology_scanner_state_t scanner_state;
bool scan_requested;
bool shutdown_requested;
bool single_threaded;
bool stale;
+
+ mongoc_server_session_t *session_pool;
} mongoc_topology_t;
mongoc_topology_t *
mongoc_topology_new (const mongoc_uri_t *uri, bool single_threaded);
void
mongoc_topology_set_apm_callbacks (mongoc_topology_t *topology,
mongoc_apm_callbacks_t *callbacks,
void *context);
void
mongoc_topology_destroy (mongoc_topology_t *topology);
bool
mongoc_topology_compatible (const mongoc_topology_description_t *td,
const mongoc_read_prefs_t *read_prefs,
bson_error_t *error);
mongoc_server_description_t *
mongoc_topology_select (mongoc_topology_t *topology,
mongoc_ss_optype_t optype,
const mongoc_read_prefs_t *read_prefs,
bson_error_t *error);
uint32_t
mongoc_topology_select_server_id (mongoc_topology_t *topology,
mongoc_ss_optype_t optype,
const mongoc_read_prefs_t *read_prefs,
bson_error_t *error);
mongoc_server_description_t *
mongoc_topology_server_by_id (mongoc_topology_t *topology,
uint32_t id,
bson_error_t *error);
mongoc_host_list_t *
_mongoc_topology_host_by_id (mongoc_topology_t *topology,
uint32_t id,
bson_error_t *error);
void
mongoc_topology_invalidate_server (mongoc_topology_t *topology,
uint32_t id,
const bson_error_t *error);
bool
_mongoc_topology_update_from_handshake (mongoc_topology_t *topology,
const mongoc_server_description_t *sd);
+void
+_mongoc_topology_update_last_used (mongoc_topology_t *topology,
+ uint32_t server_id);
+
int64_t
mongoc_topology_server_timestamp (mongoc_topology_t *topology, uint32_t id);
mongoc_topology_description_type_t
_mongoc_topology_get_type (mongoc_topology_t *topology);
bool
_mongoc_topology_start_background_scanner (mongoc_topology_t *topology);
bool
_mongoc_topology_set_appname (mongoc_topology_t *topology, const char *appname);
+
+void
+_mongoc_topology_update_cluster_time (mongoc_topology_t *topology,
+ const bson_t *reply);
+
+mongoc_server_session_t *
+_mongoc_topology_pop_server_session (mongoc_topology_t *topology,
+ bson_error_t *error);
+
+void
+_mongoc_topology_push_server_session (mongoc_topology_t *topology,
+ mongoc_server_session_t *server_session);
+
+void
+_mongoc_topology_end_sessions_cmd (mongoc_topology_t *topology, bson_t *cmd);
+
#endif
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-topology-scanner-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-topology-scanner-private.h
similarity index 95%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-topology-scanner-private.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-topology-scanner-private.h
index f5c214b1..c41d1d0d 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-topology-scanner-private.h
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-topology-scanner-private.h
@@ -1,177 +1,183 @@
/*
* Copyright 2014 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MONGOC_TOPOLOGY_SCANNER_PRIVATE_H
#define MONGOC_TOPOLOGY_SCANNER_PRIVATE_H
/* TODO: rename to TOPOLOGY scanner */
#if !defined(MONGOC_COMPILATION)
#error "Only <mongoc.h> can be included directly."
#endif
#include <bson.h>
#include "mongoc-async-private.h"
#include "mongoc-async-cmd-private.h"
#include "mongoc-host-list.h"
#include "mongoc-apm-private.h"
#ifdef MONGOC_ENABLE_SSL
#include "mongoc-ssl.h"
#endif
BSON_BEGIN_DECLS
typedef void (*mongoc_topology_scanner_setup_err_cb_t) (
uint32_t id, void *data, const bson_error_t *error /* IN */);
typedef void (*mongoc_topology_scanner_cb_t) (
uint32_t id,
const bson_t *bson,
int64_t rtt,
void *data,
const bson_error_t *error /* IN */);
struct mongoc_topology_scanner;
typedef struct mongoc_topology_scanner_node {
uint32_t id;
mongoc_async_cmd_t *cmd;
mongoc_stream_t *stream;
int64_t timestamp;
int64_t last_used;
int64_t last_failed;
bool has_auth;
mongoc_host_list_t host;
struct addrinfo *dns_results;
struct addrinfo *current_dns_result;
struct mongoc_topology_scanner *ts;
struct mongoc_topology_scanner_node *next;
struct mongoc_topology_scanner_node *prev;
bool retired;
bson_error_t last_error;
} mongoc_topology_scanner_node_t;
typedef struct mongoc_topology_scanner {
mongoc_async_t *async;
mongoc_topology_scanner_node_t *nodes;
bson_t ismaster_cmd;
-
bson_t ismaster_cmd_with_handshake;
+ bson_t cluster_time;
bool handshake_ok_to_send;
const char *appname;
mongoc_topology_scanner_setup_err_cb_t setup_err_cb;
mongoc_topology_scanner_cb_t cb;
void *cb_data;
bool in_progress;
const mongoc_uri_t *uri;
mongoc_async_cmd_setup_t setup;
mongoc_stream_initiator_t initiator;
void *initiator_context;
bson_error_t error;
#ifdef MONGOC_ENABLE_SSL
mongoc_ssl_opt_t *ssl_opts;
#endif
mongoc_apm_callbacks_t apm_callbacks;
void *apm_context;
} mongoc_topology_scanner_t;
mongoc_topology_scanner_t *
mongoc_topology_scanner_new (
const mongoc_uri_t *uri,
mongoc_topology_scanner_setup_err_cb_t setup_err_cb,
mongoc_topology_scanner_cb_t cb,
void *data);
void
mongoc_topology_scanner_destroy (mongoc_topology_scanner_t *ts);
+bool
+mongoc_topology_scanner_valid (mongoc_topology_scanner_t *ts);
+
void
mongoc_topology_scanner_add (mongoc_topology_scanner_t *ts,
const mongoc_host_list_t *host,
uint32_t id);
void
mongoc_topology_scanner_scan (mongoc_topology_scanner_t *ts,
uint32_t id,
int64_t timeout_msec);
void
mongoc_topology_scanner_node_retire (mongoc_topology_scanner_node_t *node);
void
mongoc_topology_scanner_node_disconnect (mongoc_topology_scanner_node_t *node,
bool failed);
void
mongoc_topology_scanner_node_destroy (mongoc_topology_scanner_node_t *node,
bool failed);
void
mongoc_topology_scanner_start (mongoc_topology_scanner_t *ts,
int64_t timeout_msec,
bool obey_cooldown);
void
mongoc_topology_scanner_work (mongoc_topology_scanner_t *ts);
void
_mongoc_topology_scanner_finish (mongoc_topology_scanner_t *ts);
void
mongoc_topology_scanner_get_error (mongoc_topology_scanner_t *ts,
bson_error_t *error);
void
mongoc_topology_scanner_reset (mongoc_topology_scanner_t *ts);
bool
mongoc_topology_scanner_node_setup (mongoc_topology_scanner_node_t *node,
bson_error_t *error);
mongoc_topology_scanner_node_t *
mongoc_topology_scanner_get_node (mongoc_topology_scanner_t *ts, uint32_t id);
bson_t *
_mongoc_topology_scanner_get_ismaster (mongoc_topology_scanner_t *ts);
bool
mongoc_topology_scanner_has_node_for_host (mongoc_topology_scanner_t *ts,
mongoc_host_list_t *host);
void
mongoc_topology_scanner_set_stream_initiator (mongoc_topology_scanner_t *ts,
mongoc_stream_initiator_t si,
void *ctx);
bool
_mongoc_topology_scanner_set_appname (mongoc_topology_scanner_t *ts,
const char *name);
+void
+_mongoc_topology_scanner_set_cluster_time (mongoc_topology_scanner_t *ts,
+ const bson_t *cluster_time);
#ifdef MONGOC_ENABLE_SSL
void
mongoc_topology_scanner_set_ssl_opts (mongoc_topology_scanner_t *ts,
mongoc_ssl_opt_t *opts);
#endif
BSON_END_DECLS
#endif /* MONGOC_TOPOLOGY_SCANNER_PRIVATE_H */
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-topology-scanner.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-topology-scanner.c
similarity index 95%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-topology-scanner.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-topology-scanner.c
index ccc49e40..122110de 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-topology-scanner.c
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-topology-scanner.c
@@ -1,861 +1,888 @@
/*
* Copyright 2014 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <bson.h>
#include <bson-string.h>
#include "mongoc-config.h"
#include "mongoc-error.h"
#include "mongoc-trace-private.h"
#include "mongoc-topology-scanner-private.h"
#include "mongoc-stream-socket.h"
#include "mongoc-handshake.h"
#include "mongoc-handshake-private.h"
#ifdef MONGOC_ENABLE_SSL
#include "mongoc-stream-tls.h"
#endif
#include "mongoc-counters-private.h"
#include "utlist.h"
#include "mongoc-topology-private.h"
#include "mongoc-host-list-private.h"
#undef MONGOC_LOG_DOMAIN
#define MONGOC_LOG_DOMAIN "topology_scanner"
/* forward declarations */
static void
mongoc_topology_scanner_ismaster_handler (
mongoc_async_cmd_result_t async_status,
const bson_t *ismaster_response,
int64_t rtt_msec,
void *data,
bson_error_t *error);
static void
_mongoc_topology_scanner_monitor_heartbeat_started (
const mongoc_topology_scanner_t *ts, const mongoc_host_list_t *host);
static void
_mongoc_topology_scanner_monitor_heartbeat_succeeded (
const mongoc_topology_scanner_t *ts,
const mongoc_host_list_t *host,
const bson_t *reply);
static void
_mongoc_topology_scanner_monitor_heartbeat_failed (
const mongoc_topology_scanner_t *ts,
const mongoc_host_list_t *host,
const bson_error_t *error);
static void
_add_ismaster (bson_t *cmd)
{
BSON_APPEND_INT32 (cmd, "isMaster", 1);
}
static bool
_build_ismaster_with_handshake (mongoc_topology_scanner_t *ts)
{
bson_t *doc = &ts->ismaster_cmd_with_handshake;
bson_t subdoc;
bson_iter_t iter;
const char *key;
int keylen;
bool res;
const bson_t *compressors;
int count = 0;
char buf[16];
_add_ismaster (doc);
BSON_APPEND_DOCUMENT_BEGIN (doc, HANDSHAKE_FIELD, &subdoc);
res = _mongoc_handshake_build_doc_with_application (&subdoc, ts->appname);
bson_append_document_end (doc, &subdoc);
BSON_APPEND_ARRAY_BEGIN (doc, "compression", &subdoc);
if (ts->uri) {
compressors = mongoc_uri_get_compressors (ts->uri);
if (bson_iter_init (&iter, compressors)) {
while (bson_iter_next (&iter)) {
keylen = bson_uint32_to_string (count++, &key, buf, sizeof buf);
bson_append_utf8 (
&subdoc, key, (int) keylen, bson_iter_key (&iter), -1);
}
}
}
bson_append_array_end (doc, &subdoc);
/* Return whether the handshake doc fit the size limit */
return res;
}
bson_t *
_mongoc_topology_scanner_get_ismaster (mongoc_topology_scanner_t *ts)
{
/* If this is the first time using the node or if it's the first time
* using it after a failure, build handshake doc */
if (bson_empty (&ts->ismaster_cmd_with_handshake)) {
ts->handshake_ok_to_send = _build_ismaster_with_handshake (ts);
if (!ts->handshake_ok_to_send) {
MONGOC_WARNING ("Handshake doc too big, not including in isMaster");
}
}
/* If the doc turned out to be too big */
if (!ts->handshake_ok_to_send) {
return &ts->ismaster_cmd;
}
return &ts->ismaster_cmd_with_handshake;
}
static void
_begin_ismaster_cmd (mongoc_topology_scanner_t *ts,
mongoc_topology_scanner_node_t *node,
int64_t timeout_msec)
{
- const bson_t *command;
+ bson_t cmd;
if (node->last_used != -1 && node->last_failed == -1) {
/* The node's been used before and not failed recently */
- command = &ts->ismaster_cmd;
+ bson_copy_to (&ts->ismaster_cmd, &cmd);
} else {
- command = _mongoc_topology_scanner_get_ismaster (ts);
+ bson_copy_to (_mongoc_topology_scanner_get_ismaster (ts), &cmd);
}
+ if (!bson_empty (&ts->cluster_time)) {
+ bson_append_document (&cmd, "$clusterTime", 12, &ts->cluster_time);
+ }
node->cmd = mongoc_async_cmd_new (ts->async,
node->stream,
ts->setup,
node->host.host,
"admin",
- command,
+ &cmd,
&mongoc_topology_scanner_ismaster_handler,
node,
timeout_msec);
+
+ bson_destroy (&cmd);
}
mongoc_topology_scanner_t *
mongoc_topology_scanner_new (
const mongoc_uri_t *uri,
mongoc_topology_scanner_setup_err_cb_t setup_err_cb,
mongoc_topology_scanner_cb_t cb,
void *data)
{
mongoc_topology_scanner_t *ts =
(mongoc_topology_scanner_t *) bson_malloc0 (sizeof (*ts));
ts->async = mongoc_async_new ();
bson_init (&ts->ismaster_cmd);
_add_ismaster (&ts->ismaster_cmd);
bson_init (&ts->ismaster_cmd_with_handshake);
+ bson_init (&ts->cluster_time);
ts->setup_err_cb = setup_err_cb;
ts->cb = cb;
ts->cb_data = data;
ts->uri = uri;
ts->appname = NULL;
ts->handshake_ok_to_send = false;
return ts;
}
#ifdef MONGOC_ENABLE_SSL
void
mongoc_topology_scanner_set_ssl_opts (mongoc_topology_scanner_t *ts,
mongoc_ssl_opt_t *opts)
{
ts->ssl_opts = opts;
ts->setup = mongoc_async_cmd_tls_setup;
}
#endif
void
mongoc_topology_scanner_set_stream_initiator (mongoc_topology_scanner_t *ts,
mongoc_stream_initiator_t si,
void *ctx)
{
ts->initiator = si;
ts->initiator_context = ctx;
ts->setup = NULL;
}
void
mongoc_topology_scanner_destroy (mongoc_topology_scanner_t *ts)
{
mongoc_topology_scanner_node_t *ele, *tmp;
DL_FOREACH_SAFE (ts->nodes, ele, tmp)
{
mongoc_topology_scanner_node_destroy (ele, false);
}
mongoc_async_destroy (ts->async);
bson_destroy (&ts->ismaster_cmd);
bson_destroy (&ts->ismaster_cmd_with_handshake);
+ bson_destroy (&ts->cluster_time);
/* This field can be set by a mongoc_client */
bson_free ((char *) ts->appname);
bson_free (ts);
}
+/* whether the scanner was successfully initialized - false if a mongodb+srv
+ * URI failed to resolve to any hosts */
+bool
+mongoc_topology_scanner_valid (mongoc_topology_scanner_t *ts)
+{
+ return ts->nodes != NULL;
+}
+
void
mongoc_topology_scanner_add (mongoc_topology_scanner_t *ts,
const mongoc_host_list_t *host,
uint32_t id)
{
mongoc_topology_scanner_node_t *node;
node = (mongoc_topology_scanner_node_t *) bson_malloc0 (sizeof (*node));
memcpy (&node->host, host, sizeof (*host));
node->id = id;
node->ts = ts;
node->last_failed = -1;
node->last_used = -1;
DL_APPEND (ts->nodes, node);
}
void
mongoc_topology_scanner_scan (mongoc_topology_scanner_t *ts,
uint32_t id,
int64_t timeout_msec)
{
mongoc_topology_scanner_node_t *node;
BSON_ASSERT (timeout_msec < INT32_MAX);
node = mongoc_topology_scanner_get_node (ts, id);
/* begin non-blocking connection, don't wait for success */
if (node && mongoc_topology_scanner_node_setup (node, &node->last_error)) {
_begin_ismaster_cmd (ts, node, timeout_msec);
}
/* if setup fails the node stays in the scanner. destroyed after the scan. */
}
void
mongoc_topology_scanner_node_retire (mongoc_topology_scanner_node_t *node)
{
if (node->cmd) {
node->cmd->state = MONGOC_ASYNC_CMD_CANCELED_STATE;
}
node->retired = true;
}
void
mongoc_topology_scanner_node_disconnect (mongoc_topology_scanner_node_t *node,
bool failed)
{
if (node->dns_results) {
freeaddrinfo (node->dns_results);
node->dns_results = NULL;
node->current_dns_result = NULL;
}
if (node->cmd) {
mongoc_async_cmd_destroy (node->cmd);
node->cmd = NULL;
}
if (node->stream) {
if (failed) {
mongoc_stream_failed (node->stream);
} else {
mongoc_stream_destroy (node->stream);
}
node->stream = NULL;
}
}
void
mongoc_topology_scanner_node_destroy (mongoc_topology_scanner_node_t *node,
bool failed)
{
DL_DELETE (node->ts->nodes, node);
mongoc_topology_scanner_node_disconnect (node, failed);
bson_free (node);
}
/*
*--------------------------------------------------------------------------
*
* mongoc_topology_scanner_get_node --
*
* Return the scanner node with the given id.
*
*--------------------------------------------------------------------------
*/
mongoc_topology_scanner_node_t *
mongoc_topology_scanner_get_node (mongoc_topology_scanner_t *ts, uint32_t id)
{
mongoc_topology_scanner_node_t *ele, *tmp;
DL_FOREACH_SAFE (ts->nodes, ele, tmp)
{
if (ele->id == id) {
return ele;
}
if (ele->id > id) {
break;
}
}
return NULL;
}
/*
*--------------------------------------------------------------------------
*
* mongoc_topology_scanner_has_node_for_host --
*
* Whether the scanner has a node for the given host and port.
*
*--------------------------------------------------------------------------
*/
bool
mongoc_topology_scanner_has_node_for_host (mongoc_topology_scanner_t *ts,
mongoc_host_list_t *host)
{
mongoc_topology_scanner_node_t *ele, *tmp;
DL_FOREACH_SAFE (ts->nodes, ele, tmp)
{
if (_mongoc_host_list_equal (&ele->host, host)) {
return true;
}
}
return false;
}
/*
*-----------------------------------------------------------------------
*
* This is the callback passed to async_cmd when we're running
* ismasters from within the topology monitor.
*
*-----------------------------------------------------------------------
*/
static void
mongoc_topology_scanner_ismaster_handler (
mongoc_async_cmd_result_t async_status,
const bson_t *ismaster_response,
int64_t rtt_msec,
void *data,
bson_error_t *error)
{
mongoc_topology_scanner_node_t *node;
mongoc_topology_scanner_t *ts;
int64_t now;
const char *message;
BSON_ASSERT (data);
node = (mongoc_topology_scanner_node_t *) data;
ts = node->ts;
node->cmd = NULL;
if (node->retired) {
return;
}
now = bson_get_monotonic_time ();
/* if no ismaster response, async cmd had an error or timed out */
if (!ismaster_response || async_status == MONGOC_ASYNC_CMD_ERROR ||
async_status == MONGOC_ASYNC_CMD_TIMEOUT) {
mongoc_stream_failed (node->stream);
node->stream = NULL;
node->last_failed = now;
if (error->code) {
message = error->message;
} else {
if (async_status == MONGOC_ASYNC_CMD_TIMEOUT) {
message = "connection timeout";
} else {
message = "connection error";
}
}
bson_set_error (&node->last_error,
MONGOC_ERROR_CLIENT,
MONGOC_ERROR_STREAM_CONNECT,
"%s calling ismaster on \'%s\'",
message,
node->host.host_and_port);
_mongoc_topology_scanner_monitor_heartbeat_failed (
ts, &node->host, &node->last_error);
} else {
node->last_failed = -1;
_mongoc_topology_scanner_monitor_heartbeat_succeeded (
ts, &node->host, ismaster_response);
}
node->last_used = now;
ts->cb (node->id, ismaster_response, rtt_msec, ts->cb_data, error);
}
/*
*--------------------------------------------------------------------------
*
* mongoc_topology_scanner_node_connect_tcp --
*
* Create a socket stream for this node, begin a non-blocking
* connect and return.
*
* Returns:
* A stream. On failure, return NULL and fill out the error.
*
*--------------------------------------------------------------------------
*/
static mongoc_stream_t *
mongoc_topology_scanner_node_connect_tcp (mongoc_topology_scanner_node_t *node,
bson_error_t *error)
{
mongoc_socket_t *sock = NULL;
struct addrinfo hints;
struct addrinfo *rp;
char portstr[8];
mongoc_host_list_t *host;
int s;
ENTRY;
host = &node->host;
if (!node->dns_results) {
bson_snprintf (portstr, sizeof portstr, "%hu", host->port);
memset (&hints, 0, sizeof hints);
hints.ai_family = host->family;
hints.ai_socktype = SOCK_STREAM;
hints.ai_flags = 0;
hints.ai_protocol = 0;
s = getaddrinfo (host->host, portstr, &hints, &node->dns_results);
if (s != 0) {
mongoc_counter_dns_failure_inc ();
bson_set_error (error,
MONGOC_ERROR_STREAM,
MONGOC_ERROR_STREAM_NAME_RESOLUTION,
"Failed to resolve '%s'",
host->host);
RETURN (NULL);
}
node->current_dns_result = node->dns_results;
mongoc_counter_dns_success_inc ();
}
for (; node->current_dns_result;
node->current_dns_result = node->current_dns_result->ai_next) {
rp = node->current_dns_result;
/*
* Create a new non-blocking socket.
*/
if (!(sock = mongoc_socket_new (
rp->ai_family, rp->ai_socktype, rp->ai_protocol))) {
continue;
}
mongoc_socket_connect (
sock, rp->ai_addr, (mongoc_socklen_t) rp->ai_addrlen, 0);
break;
}
if (!sock) {
bson_set_error (error,
MONGOC_ERROR_STREAM,
MONGOC_ERROR_STREAM_CONNECT,
"Failed to connect to target host: '%s'",
host->host_and_port);
freeaddrinfo (node->dns_results);
node->dns_results = NULL;
node->current_dns_result = NULL;
RETURN (NULL);
}
return mongoc_stream_socket_new (sock);
}
static mongoc_stream_t *
mongoc_topology_scanner_node_connect_unix (mongoc_topology_scanner_node_t *node,
bson_error_t *error)
{
#ifdef _WIN32
ENTRY;
bson_set_error (error,
MONGOC_ERROR_STREAM,
MONGOC_ERROR_STREAM_CONNECT,
"UNIX domain sockets not supported on win32.");
RETURN (NULL);
#else
struct sockaddr_un saddr;
mongoc_socket_t *sock;
mongoc_stream_t *ret = NULL;
mongoc_host_list_t *host;
ENTRY;
host = &node->host;
memset (&saddr, 0, sizeof saddr);
saddr.sun_family = AF_UNIX;
bson_snprintf (saddr.sun_path, sizeof saddr.sun_path - 1, "%s", host->host);
sock = mongoc_socket_new (AF_UNIX, SOCK_STREAM, 0);
if (sock == NULL) {
bson_set_error (error,
MONGOC_ERROR_STREAM,
MONGOC_ERROR_STREAM_SOCKET,
"Failed to create socket.");
RETURN (NULL);
}
if (-1 == mongoc_socket_connect (
sock, (struct sockaddr *) &saddr, sizeof saddr, -1)) {
char buf[128];
char *errstr;
errstr = bson_strerror_r (mongoc_socket_errno (sock), buf, sizeof (buf));
bson_set_error (error,
MONGOC_ERROR_STREAM,
MONGOC_ERROR_STREAM_CONNECT,
"Failed to connect to UNIX domain socket: %s",
errstr);
mongoc_socket_destroy (sock);
RETURN (NULL);
}
ret = mongoc_stream_socket_new (sock);
RETURN (ret);
#endif
}
/*
*--------------------------------------------------------------------------
*
* mongoc_topology_scanner_node_setup --
*
* Create a stream and begin a non-blocking connect.
*
* Returns:
* true on success, or false and error is set.
*
*--------------------------------------------------------------------------
*/
bool
mongoc_topology_scanner_node_setup (mongoc_topology_scanner_node_t *node,
bson_error_t *error)
{
mongoc_stream_t *sock_stream;
_mongoc_topology_scanner_monitor_heartbeat_started (node->ts, &node->host);
if (node->stream) {
return true;
}
BSON_ASSERT (!node->retired);
if (node->ts->initiator) {
sock_stream = node->ts->initiator (
node->ts->uri, &node->host, node->ts->initiator_context, error);
} else {
if (node->host.family == AF_UNIX) {
sock_stream = mongoc_topology_scanner_node_connect_unix (node, error);
} else {
sock_stream = mongoc_topology_scanner_node_connect_tcp (node, error);
}
#ifdef MONGOC_ENABLE_SSL
if (sock_stream && node->ts->ssl_opts) {
mongoc_stream_t *original = sock_stream;
sock_stream = mongoc_stream_tls_new_with_hostname (
sock_stream, node->host.host, node->ts->ssl_opts, 1);
if (!sock_stream) {
mongoc_stream_destroy (original);
}
}
#endif
}
if (!sock_stream) {
_mongoc_topology_scanner_monitor_heartbeat_failed (
node->ts, &node->host, error);
node->ts->setup_err_cb (node->id, node->ts->cb_data, error);
return false;
}
node->stream = sock_stream;
node->has_auth = false;
node->timestamp = bson_get_monotonic_time ();
return true;
}
/*
*--------------------------------------------------------------------------
*
* mongoc_topology_scanner_start --
*
* Initializes the scanner and begins a full topology check. This
* should be called once before calling mongoc_topology_scanner_work()
* repeatedly to complete the scan.
*
* If "obey_cooldown" is true, this is a single-threaded blocking scan
* that must obey the Server Discovery And Monitoring Spec's cooldownMS:
*
* "After a single-threaded client gets a network error trying to check
* a server, the client skips re-checking the server until cooldownMS has
* passed.
*
* "This avoids spending connectTimeoutMS on each unavailable server
* during each scan.
*
* "This value MUST be 5000 ms, and it MUST NOT be configurable."
*
*--------------------------------------------------------------------------
*/
void
mongoc_topology_scanner_start (mongoc_topology_scanner_t *ts,
int64_t timeout_msec,
bool obey_cooldown)
{
mongoc_topology_scanner_node_t *node, *tmp;
int64_t cooldown = INT64_MAX;
BSON_ASSERT (ts);
if (ts->in_progress) {
return;
}
if (obey_cooldown) {
/* when current cooldown period began */
cooldown =
bson_get_monotonic_time () - 1000 * MONGOC_TOPOLOGY_COOLDOWN_MS;
}
DL_FOREACH_SAFE (ts->nodes, node, tmp)
{
/* check node if it last failed before current cooldown period began */
if (node->last_failed < cooldown) {
if (mongoc_topology_scanner_node_setup (node, &node->last_error)) {
BSON_ASSERT (!node->cmd);
_begin_ismaster_cmd (ts, node, timeout_msec);
}
}
}
}
/*
*--------------------------------------------------------------------------
*
* mongoc_topology_scanner_finish_scan --
*
* Summarizes all scanner node errors into one error message.
*
*--------------------------------------------------------------------------
*/
void
_mongoc_topology_scanner_finish (mongoc_topology_scanner_t *ts)
{
mongoc_topology_scanner_node_t *node, *tmp;
bson_error_t *error = &ts->error;
bson_string_t *msg;
memset (&ts->error, 0, sizeof (bson_error_t));
msg = bson_string_new (NULL);
DL_FOREACH_SAFE (ts->nodes, node, tmp)
{
if (node->last_error.code) {
if (msg->len) {
bson_string_append_c (msg, ' ');
}
bson_string_append_printf (msg, "[%s]", node->last_error.message);
/* last error domain and code win */
error->domain = node->last_error.domain;
error->code = node->last_error.code;
}
}
bson_strncpy ((char *) &error->message, msg->str, sizeof (error->message));
bson_string_free (msg, true);
}
/*
*--------------------------------------------------------------------------
*
* mongoc_topology_scanner_work --
*
* Crank the knob on the topology scanner state machine. This should
* be called only after mongoc_topology_scanner_start() has been used
* to begin the scan.
*
*--------------------------------------------------------------------------
*/
void
mongoc_topology_scanner_work (mongoc_topology_scanner_t *ts)
{
mongoc_async_run (ts->async);
}
/*
*--------------------------------------------------------------------------
*
* mongoc_topology_scanner_get_error --
*
* Copy the scanner's current error; which may no-error (code 0).
*
*--------------------------------------------------------------------------
*/
void
mongoc_topology_scanner_get_error (mongoc_topology_scanner_t *ts,
bson_error_t *error)
{
BSON_ASSERT (ts);
BSON_ASSERT (error);
memcpy (error, &ts->error, sizeof (bson_error_t));
}
/*
*--------------------------------------------------------------------------
*
* mongoc_topology_scanner_reset --
*
* Reset "retired" nodes that failed or were removed in the previous
* scan.
*
*--------------------------------------------------------------------------
*/
void
mongoc_topology_scanner_reset (mongoc_topology_scanner_t *ts)
{
mongoc_topology_scanner_node_t *node, *tmp;
DL_FOREACH_SAFE (ts->nodes, node, tmp)
{
if (node->retired) {
mongoc_topology_scanner_node_destroy (node, true);
}
}
}
/*
* Set a field in the topology scanner.
*/
bool
_mongoc_topology_scanner_set_appname (mongoc_topology_scanner_t *ts,
const char *appname)
{
if (!_mongoc_handshake_appname_is_valid (appname)) {
MONGOC_ERROR ("Cannot set appname: %s is invalid", appname);
return false;
}
if (ts->appname != NULL) {
MONGOC_ERROR ("Cannot set appname more than once");
return false;
}
ts->appname = bson_strdup (appname);
return true;
}
+/*
+ * Set the scanner's clusterTime unconditionally: don't compare with prior
+ * @cluster_time is like {clusterTime: <timestamp>}
+ */
+void
+_mongoc_topology_scanner_set_cluster_time (mongoc_topology_scanner_t *ts,
+ const bson_t *cluster_time)
+{
+ bson_destroy (&ts->cluster_time);
+ bson_copy_to (cluster_time, &ts->cluster_time);
+}
+
/* SDAM Monitoring Spec: send HeartbeatStartedEvent */
static void
_mongoc_topology_scanner_monitor_heartbeat_started (
const mongoc_topology_scanner_t *ts, const mongoc_host_list_t *host)
{
if (ts->apm_callbacks.server_heartbeat_started) {
mongoc_apm_server_heartbeat_started_t event;
event.host = host;
event.context = ts->apm_context;
ts->apm_callbacks.server_heartbeat_started (&event);
}
}
/* SDAM Monitoring Spec: send HeartbeatSucceededEvent */
static void
_mongoc_topology_scanner_monitor_heartbeat_succeeded (
const mongoc_topology_scanner_t *ts,
const mongoc_host_list_t *host,
const bson_t *reply)
{
if (ts->apm_callbacks.server_heartbeat_succeeded) {
mongoc_apm_server_heartbeat_succeeded_t event;
event.host = host;
event.context = ts->apm_context;
event.reply = reply;
ts->apm_callbacks.server_heartbeat_succeeded (&event);
}
}
/* SDAM Monitoring Spec: send HeartbeatFailedEvent */
static void
_mongoc_topology_scanner_monitor_heartbeat_failed (
const mongoc_topology_scanner_t *ts,
const mongoc_host_list_t *host,
const bson_error_t *error)
{
if (ts->apm_callbacks.server_heartbeat_failed) {
mongoc_apm_server_heartbeat_failed_t event;
event.host = host;
event.context = ts->apm_context;
event.error = error;
ts->apm_callbacks.server_heartbeat_failed (&event);
}
}
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-topology.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-topology.c
similarity index 81%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-topology.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-topology.c
index 0b076b36..3b245caf 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-topology.c
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-topology.c
@@ -1,1157 +1,1421 @@
/*
* Copyright 2014 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "mongoc-config.h"
#include "mongoc-handshake.h"
#include "mongoc-handshake-private.h"
#include "mongoc-error.h"
#include "mongoc-log.h"
#include "mongoc-topology-private.h"
#include "mongoc-topology-description-apm-private.h"
#include "mongoc-client-private.h"
#include "mongoc-uri-private.h"
#include "mongoc-util-private.h"
+#include "mongoc-host-list-private.h"
+#include "mongoc-trace-private.h"
#include "utlist.h"
static void
_mongoc_topology_background_thread_stop (mongoc_topology_t *topology);
static void
_mongoc_topology_request_scan (mongoc_topology_t *topology);
static bool
_mongoc_topology_reconcile_add_nodes (void *item, void *ctx)
{
mongoc_server_description_t *sd = item;
mongoc_topology_t *topology = (mongoc_topology_t *) ctx;
mongoc_topology_scanner_t *scanner = topology->scanner;
/* quickly search by id, then check if a node for this host was retired in
* this scan. */
if (!mongoc_topology_scanner_get_node (scanner, sd->id) &&
!mongoc_topology_scanner_has_node_for_host (scanner, &sd->host)) {
mongoc_topology_scanner_add (scanner, &sd->host, sd->id);
mongoc_topology_scanner_scan (
scanner, sd->id, topology->connect_timeout_msec);
}
return true;
}
void
mongoc_topology_reconcile (mongoc_topology_t *topology)
{
mongoc_topology_scanner_node_t *ele, *tmp;
mongoc_topology_description_t *description;
mongoc_topology_scanner_t *scanner;
description = &topology->description;
scanner = topology->scanner;
/* Add newly discovered nodes */
mongoc_set_for_each (
description->servers, _mongoc_topology_reconcile_add_nodes, topology);
/* Remove removed nodes */
DL_FOREACH_SAFE (scanner->nodes, ele, tmp)
{
if (!mongoc_topology_description_server_by_id (
description, ele->id, NULL)) {
mongoc_topology_scanner_node_retire (ele);
}
}
}
/* call this while already holding the lock */
static bool
_mongoc_topology_update_no_lock (uint32_t id,
const bson_t *ismaster_response,
int64_t rtt_msec,
mongoc_topology_t *topology,
const bson_error_t *error /* IN */)
{
mongoc_topology_description_handle_ismaster (
&topology->description, id, ismaster_response, rtt_msec, error);
/* The processing of the ismaster results above may have added/removed
* server descriptions. We need to reconcile that with our monitoring agents
*/
mongoc_topology_reconcile (topology);
/* return false if server removed from topology */
return mongoc_topology_description_server_by_id (
&topology->description, id, NULL) != NULL;
}
/*
*-------------------------------------------------------------------------
*
* _mongoc_topology_scanner_setup_err_cb --
*
* Callback method to handle errors during topology scanner node
* setup, typically DNS or SSL errors.
*
*-------------------------------------------------------------------------
*/
void
_mongoc_topology_scanner_setup_err_cb (uint32_t id,
void *data,
const bson_error_t *error /* IN */)
{
mongoc_topology_t *topology;
BSON_ASSERT (data);
topology = (mongoc_topology_t *) data;
mongoc_topology_description_handle_ismaster (&topology->description,
id,
NULL /* ismaster reply */,
-1 /* rtt_msec */,
error);
}
/*
*-------------------------------------------------------------------------
*
* _mongoc_topology_scanner_cb --
*
* Callback method to handle ismaster responses received by async
* command objects.
*
* NOTE: This method locks the given topology's mutex.
*
*-------------------------------------------------------------------------
*/
void
_mongoc_topology_scanner_cb (uint32_t id,
const bson_t *ismaster_response,
int64_t rtt_msec,
void *data,
const bson_error_t *error /* IN */)
{
mongoc_topology_t *topology;
mongoc_server_description_t *sd;
BSON_ASSERT (data);
topology = (mongoc_topology_t *) data;
mongoc_mutex_lock (&topology->mutex);
sd = mongoc_topology_description_server_by_id (
&topology->description, id, NULL);
/* Server Discovery and Monitoring Spec: "Once a server is connected, the
* client MUST change its type to Unknown only after it has retried the
* server once." */
if (!ismaster_response && sd && sd->type != MONGOC_SERVER_UNKNOWN) {
_mongoc_topology_update_no_lock (
id, ismaster_response, rtt_msec, topology, error);
/* add another ismaster call to the current scan - the scan continues
* until all commands are done */
mongoc_topology_scanner_scan (
topology->scanner, sd->id, topology->connect_timeout_msec);
} else {
_mongoc_topology_update_no_lock (
id, ismaster_response, rtt_msec, topology, error);
mongoc_cond_broadcast (&topology->cond_client);
}
mongoc_mutex_unlock (&topology->mutex);
}
/*
*-------------------------------------------------------------------------
*
* mongoc_topology_new --
*
* Creates and returns a new topology object.
*
* Returns:
* A new topology object.
*
* Side effects:
* None.
*
*-------------------------------------------------------------------------
*/
mongoc_topology_t *
mongoc_topology_new (const mongoc_uri_t *uri, bool single_threaded)
{
int64_t heartbeat_default;
int64_t heartbeat;
mongoc_topology_t *topology;
+ bool topology_valid;
mongoc_topology_description_type_t init_type;
+ const char *service;
+ char *prefixed_service;
uint32_t id;
const mongoc_host_list_t *hl;
BSON_ASSERT (uri);
topology = (mongoc_topology_t *) bson_malloc0 (sizeof *topology);
-
- /*
- * Not ideal, but there's no great way to do this.
- * Base on the URI, we assume:
- * - if we've got a replicaSet name, initialize to RS_NO_PRIMARY
- * - otherwise, if the seed list has a single host, initialize to SINGLE
- * - everything else gets initialized to UNKNOWN
- */
- if (mongoc_uri_get_replica_set (uri)) {
- init_type = MONGOC_TOPOLOGY_RS_NO_PRIMARY;
- } else {
- hl = mongoc_uri_get_hosts (uri);
- if (hl->next) {
- init_type = MONGOC_TOPOLOGY_UNKNOWN;
- } else {
- init_type = MONGOC_TOPOLOGY_SINGLE;
- }
- }
-
+ topology->session_pool = NULL;
heartbeat_default =
single_threaded ? MONGOC_TOPOLOGY_HEARTBEAT_FREQUENCY_MS_SINGLE_THREADED
: MONGOC_TOPOLOGY_HEARTBEAT_FREQUENCY_MS_MULTI_THREADED;
heartbeat = mongoc_uri_get_option_as_int32 (
uri, MONGOC_URI_HEARTBEATFREQUENCYMS, heartbeat_default);
- mongoc_topology_description_init (
- &topology->description, init_type, heartbeat);
+ mongoc_topology_description_init (&topology->description, heartbeat);
topology->description.set_name =
bson_strdup (mongoc_uri_get_replica_set (uri));
topology->uri = mongoc_uri_copy (uri);
topology->scanner_state = MONGOC_TOPOLOGY_SCANNER_OFF;
topology->scanner =
mongoc_topology_scanner_new (topology->uri,
_mongoc_topology_scanner_setup_err_cb,
_mongoc_topology_scanner_cb,
topology);
topology->single_threaded = single_threaded;
if (single_threaded) {
/* Server Selection Spec:
*
* "Single-threaded drivers MUST provide a "serverSelectionTryOnce"
* mode, in which the driver scans the topology exactly once after
* server selection fails, then either selects a server or raises an
* error.
*
* "The serverSelectionTryOnce option MUST be true by default."
*/
topology->server_selection_try_once = mongoc_uri_get_option_as_bool (
uri, MONGOC_URI_SERVERSELECTIONTRYONCE, true);
} else {
topology->server_selection_try_once = false;
}
topology->server_selection_timeout_msec = mongoc_uri_get_option_as_int32 (
topology->uri,
MONGOC_URI_SERVERSELECTIONTIMEOUTMS,
MONGOC_TOPOLOGY_SERVER_SELECTION_TIMEOUT_MS);
topology->local_threshold_msec =
mongoc_uri_get_local_threshold_option (topology->uri);
/* Total time allowed to check a server is connectTimeoutMS.
* Server Discovery And Monitoring Spec:
*
* "The socket used to check a server MUST use the same connectTimeoutMS as
* regular sockets. Multi-threaded clients SHOULD set monitoring sockets'
* socketTimeoutMS to the connectTimeoutMS."
*/
topology->connect_timeout_msec =
mongoc_uri_get_option_as_int32 (topology->uri,
MONGOC_URI_CONNECTTIMEOUTMS,
MONGOC_DEFAULT_CONNECTTIMEOUTMS);
mongoc_mutex_init (&topology->mutex);
mongoc_cond_init (&topology->cond_client);
mongoc_cond_init (&topology->cond_server);
- for (hl = mongoc_uri_get_hosts (uri); hl; hl = hl->next) {
+ topology_valid = true;
+ service = mongoc_uri_get_service (uri);
+ if (service) {
+ /* a mongodb+srv URI. try SRV lookup, if no error then also try TXT */
+ prefixed_service = bson_strdup_printf ("_mongodb._tcp.%s", service);
+ if (!_mongoc_client_get_rr (prefixed_service,
+ MONGOC_RR_SRV,
+ topology->uri,
+ &topology->scanner->error) ||
+ !_mongoc_client_get_rr (service,
+ MONGOC_RR_TXT,
+ topology->uri,
+ &topology->scanner->error)) {
+ topology_valid = false;
+ }
+
+ bson_free (prefixed_service);
+ }
+
+ /*
+ * Set topology type from URI:
+ * - if we've got a replicaSet name, initialize to RS_NO_PRIMARY
+ * - otherwise, if the seed list has a single host, initialize to SINGLE
+ * - everything else gets initialized to UNKNOWN
+ */
+ hl = mongoc_uri_get_hosts (topology->uri);
+ if (mongoc_uri_get_replica_set (topology->uri)) {
+ init_type = MONGOC_TOPOLOGY_RS_NO_PRIMARY;
+ } else {
+ if (hl && hl->next) {
+ init_type = MONGOC_TOPOLOGY_UNKNOWN;
+ } else {
+ init_type = MONGOC_TOPOLOGY_SINGLE;
+ }
+ }
+
+ topology->description.type = init_type;
+
+ if (!topology_valid) {
+ /* add no nodes */
+ return topology;
+ }
+
+ while (hl) {
mongoc_topology_description_add_server (
&topology->description, hl->host_and_port, &id);
mongoc_topology_scanner_add (topology->scanner, hl, id);
+
+ hl = hl->next;
}
return topology;
}
/*
*-------------------------------------------------------------------------
*
* mongoc_topology_set_apm_callbacks --
*
* Set Application Performance Monitoring callbacks.
*
*-------------------------------------------------------------------------
*/
void
mongoc_topology_set_apm_callbacks (mongoc_topology_t *topology,
mongoc_apm_callbacks_t *callbacks,
void *context)
{
if (callbacks) {
memcpy (&topology->description.apm_callbacks,
callbacks,
sizeof (mongoc_apm_callbacks_t));
memcpy (&topology->scanner->apm_callbacks,
callbacks,
sizeof (mongoc_apm_callbacks_t));
}
topology->description.apm_context = context;
topology->scanner->apm_context = context;
}
/*
*-------------------------------------------------------------------------
*
* mongoc_topology_destroy --
*
* Free the memory associated with this topology object.
*
* Returns:
* None.
*
* Side effects:
* @topology will be cleaned up.
*
*-------------------------------------------------------------------------
*/
void
mongoc_topology_destroy (mongoc_topology_t *topology)
{
+ mongoc_server_session_t *ss, *tmp1, *tmp2;
+
if (!topology) {
return;
}
_mongoc_topology_background_thread_stop (topology);
_mongoc_topology_description_monitor_closed (&topology->description);
mongoc_uri_destroy (topology->uri);
mongoc_topology_description_destroy (&topology->description);
mongoc_topology_scanner_destroy (topology->scanner);
+
+ CDL_FOREACH_SAFE (topology->session_pool, ss, tmp1, tmp2)
+ {
+ _mongoc_server_session_destroy (ss);
+ }
+
mongoc_cond_destroy (&topology->cond_client);
mongoc_cond_destroy (&topology->cond_server);
mongoc_mutex_destroy (&topology->mutex);
bson_free (topology);
}
/*
*--------------------------------------------------------------------------
*
* _mongoc_topology_do_blocking_scan --
*
* Monitoring entry for single-threaded use case. Assumes the caller
* has checked that it's the right time to scan.
*
*--------------------------------------------------------------------------
*/
static void
_mongoc_topology_do_blocking_scan (mongoc_topology_t *topology,
bson_error_t *error)
{
mongoc_topology_scanner_t *scanner;
topology->scanner_state = MONGOC_TOPOLOGY_SCANNER_SINGLE_THREADED;
_mongoc_handshake_freeze ();
scanner = topology->scanner;
mongoc_topology_scanner_start (
scanner, (int32_t) topology->connect_timeout_msec, true);
mongoc_topology_scanner_work (topology->scanner);
mongoc_mutex_lock (&topology->mutex);
_mongoc_topology_scanner_finish (scanner);
mongoc_topology_scanner_get_error (scanner, error);
/* "retired" nodes can be checked again in the next scan */
mongoc_topology_scanner_reset (scanner);
topology->last_scan = bson_get_monotonic_time ();
topology->stale = false;
mongoc_mutex_unlock (&topology->mutex);
}
bool
mongoc_topology_compatible (const mongoc_topology_description_t *td,
const mongoc_read_prefs_t *read_prefs,
bson_error_t *error)
{
int64_t max_staleness_seconds;
int32_t max_wire_version;
if (td->compatibility_error.code) {
- memcpy (error, &td->compatibility_error, sizeof (bson_error_t));
+ if (error) {
+ memcpy (error, &td->compatibility_error, sizeof (bson_error_t));
+ }
return false;
}
if (!read_prefs) {
/* NULL means read preference Primary */
return true;
}
max_staleness_seconds =
mongoc_read_prefs_get_max_staleness_seconds (read_prefs);
if (max_staleness_seconds != MONGOC_NO_MAX_STALENESS) {
max_wire_version =
mongoc_topology_description_lowest_max_wire_version (td);
if (max_wire_version < WIRE_VERSION_MAX_STALENESS) {
bson_set_error (error,
MONGOC_ERROR_COMMAND,
MONGOC_ERROR_PROTOCOL_BAD_WIRE_VERSION,
"Not all servers support maxStalenessSeconds");
return false;
}
/* shouldn't happen if we've properly enforced wire version */
if (!mongoc_topology_description_all_sds_have_write_date (td)) {
bson_set_error (error,
MONGOC_ERROR_COMMAND,
MONGOC_ERROR_PROTOCOL_BAD_WIRE_VERSION,
"Not all servers have lastWriteDate");
return false;
}
if (!_mongoc_topology_description_validate_max_staleness (
td, max_staleness_seconds, error)) {
return false;
}
}
return true;
}
static void
_mongoc_server_selection_error (const char *msg,
const bson_error_t *scanner_error,
bson_error_t *error)
{
if (scanner_error && scanner_error->code) {
bson_set_error (error,
MONGOC_ERROR_SERVER_SELECTION,
MONGOC_ERROR_SERVER_SELECTION_FAILURE,
"%s: %s",
msg,
scanner_error->message);
} else {
bson_set_error (error,
MONGOC_ERROR_SERVER_SELECTION,
MONGOC_ERROR_SERVER_SELECTION_FAILURE,
"%s",
msg);
}
}
/*
*-------------------------------------------------------------------------
*
* mongoc_topology_select --
*
* Selects a server description for an operation based on @optype
* and @read_prefs.
*
* NOTE: this method returns a copy of the original server
* description. Callers must own and clean up this copy.
*
* NOTE: this method locks and unlocks @topology's mutex.
*
* Parameters:
* @topology: The topology.
* @optype: Whether we are selecting for a read or write operation.
* @read_prefs: Required, the read preferences for the command.
* @error: Required, out pointer for error info.
*
* Returns:
* A mongoc_server_description_t, or NULL on failure, in which case
* @error will be set.
*
* Side effects:
* @error may be set.
*
*-------------------------------------------------------------------------
*/
mongoc_server_description_t *
mongoc_topology_select (mongoc_topology_t *topology,
mongoc_ss_optype_t optype,
const mongoc_read_prefs_t *read_prefs,
bson_error_t *error)
{
uint32_t server_id =
mongoc_topology_select_server_id (topology, optype, read_prefs, error);
if (server_id) {
/* new copy of the server description */
return mongoc_topology_server_by_id (topology, server_id, error);
} else {
return NULL;
}
}
/*
*-------------------------------------------------------------------------
*
* mongoc_topology_select_server_id --
*
* Alternative to mongoc_topology_select when you only need the id.
*
* Returns:
* A server id, or 0 on failure, in which case @error will be set.
*
*-------------------------------------------------------------------------
*/
uint32_t
mongoc_topology_select_server_id (mongoc_topology_t *topology,
mongoc_ss_optype_t optype,
const mongoc_read_prefs_t *read_prefs,
bson_error_t *error)
{
static const char *timeout_msg =
"No suitable servers found: `serverSelectionTimeoutMS` expired";
int r;
int64_t local_threshold_ms;
mongoc_server_description_t *selected_server = NULL;
bool try_once;
int64_t sleep_usec;
bool tried_once;
bson_error_t scanner_error = {0};
int64_t heartbeat_msec;
uint32_t server_id;
/* These names come from the Server Selection Spec pseudocode */
int64_t loop_start; /* when we entered this function */
int64_t loop_end; /* when we last completed a loop (single-threaded) */
int64_t scan_ready; /* the soonest we can do a blocking scan */
int64_t next_update; /* the latest we must do a blocking scan */
int64_t expire_at; /* when server selection timeout expires */
BSON_ASSERT (topology);
+ if (!mongoc_topology_scanner_valid (topology->scanner)) {
+ if (error) {
+ mongoc_topology_scanner_get_error (topology->scanner, error);
+ error->domain = MONGOC_ERROR_SERVER_SELECTION;
+ error->code = MONGOC_ERROR_SERVER_SELECTION_FAILURE;
+ }
+
+ return 0;
+ }
heartbeat_msec = topology->description.heartbeat_msec;
local_threshold_ms = topology->local_threshold_msec;
try_once = topology->server_selection_try_once;
loop_start = loop_end = bson_get_monotonic_time ();
expire_at =
loop_start + ((int64_t) topology->server_selection_timeout_msec * 1000);
if (topology->single_threaded) {
_mongoc_topology_description_monitor_opening (&topology->description);
tried_once = false;
next_update = topology->last_scan + heartbeat_msec * 1000;
if (next_update < loop_start) {
/* we must scan now */
topology->stale = true;
}
/* until we find a server or time out */
for (;;) {
if (topology->stale) {
/* how soon are we allowed to scan? */
scan_ready = topology->last_scan +
MONGOC_TOPOLOGY_MIN_HEARTBEAT_FREQUENCY_MS * 1000;
if (scan_ready > expire_at && !try_once) {
/* selection timeout will expire before min heartbeat passes */
_mongoc_server_selection_error (
"No suitable servers found: "
"`serverselectiontimeoutms` timed out",
&scanner_error,
error);
topology->stale = true;
return 0;
}
sleep_usec = scan_ready - loop_end;
if (sleep_usec > 0) {
_mongoc_usleep (sleep_usec);
}
/* takes up to connectTimeoutMS. sets "last_scan", clears "stale" */
_mongoc_topology_do_blocking_scan (topology, &scanner_error);
loop_end = topology->last_scan;
tried_once = true;
}
if (!mongoc_topology_compatible (
&topology->description, read_prefs, error)) {
return 0;
}
selected_server = mongoc_topology_description_select (
&topology->description, optype, read_prefs, local_threshold_ms);
if (selected_server) {
return selected_server->id;
}
topology->stale = true;
if (try_once) {
if (tried_once) {
_mongoc_server_selection_error (
"No suitable servers found (`serverSelectionTryOnce` set)",
&scanner_error,
error);
return 0;
}
} else {
loop_end = bson_get_monotonic_time ();
if (loop_end > expire_at) {
/* no time left in server_selection_timeout_msec */
_mongoc_server_selection_error (
timeout_msg, &scanner_error, error);
return 0;
}
}
}
}
/* With background thread */
/* we break out when we've found a server or timed out */
for (;;) {
mongoc_mutex_lock (&topology->mutex);
if (!mongoc_topology_compatible (
&topology->description, read_prefs, error)) {
mongoc_mutex_unlock (&topology->mutex);
return 0;
}
selected_server = mongoc_topology_description_select (
&topology->description, optype, read_prefs, local_threshold_ms);
if (!selected_server) {
_mongoc_topology_request_scan (topology);
r = mongoc_cond_timedwait (&topology->cond_client,
&topology->mutex,
(expire_at - loop_start) / 1000);
mongoc_topology_scanner_get_error (topology->scanner, &scanner_error);
mongoc_mutex_unlock (&topology->mutex);
#ifdef _WIN32
if (r == WSAETIMEDOUT) {
#else
if (r == ETIMEDOUT) {
#endif
/* handle timeouts */
_mongoc_server_selection_error (timeout_msg, &scanner_error, error);
return 0;
} else if (r) {
bson_set_error (error,
MONGOC_ERROR_SERVER_SELECTION,
MONGOC_ERROR_SERVER_SELECTION_FAILURE,
"Unknown error '%d' received while waiting on "
"thread condition",
r);
return 0;
}
loop_start = bson_get_monotonic_time ();
if (loop_start > expire_at) {
_mongoc_server_selection_error (timeout_msg, &scanner_error, error);
return 0;
}
} else {
server_id = selected_server->id;
mongoc_mutex_unlock (&topology->mutex);
return server_id;
}
}
}
/*
*-------------------------------------------------------------------------
*
* mongoc_topology_server_by_id --
*
* Get the server description for @id, if that server is present
* in @description. Otherwise, return NULL and fill out the optional
* @error.
*
* NOTE: this method returns a copy of the original server
* description. Callers must own and clean up this copy.
*
* NOTE: this method locks and unlocks @topology's mutex.
*
* Returns:
* A mongoc_server_description_t, or NULL.
*
* Side effects:
* Fills out optional @error if server not found.
*
*-------------------------------------------------------------------------
*/
mongoc_server_description_t *
mongoc_topology_server_by_id (mongoc_topology_t *topology,
uint32_t id,
bson_error_t *error)
{
mongoc_server_description_t *sd;
mongoc_mutex_lock (&topology->mutex);
sd = mongoc_server_description_new_copy (
mongoc_topology_description_server_by_id (
&topology->description, id, error));
mongoc_mutex_unlock (&topology->mutex);
return sd;
}
/*
*-------------------------------------------------------------------------
*
* mongoc_topology_host_by_id --
*
* Copy the mongoc_host_list_t for @id, if that server is present
* in @description. Otherwise, return NULL and fill out the optional
* @error.
*
* NOTE: this method returns a copy of the original mongoc_host_list_t.
* Callers must own and clean up this copy.
*
* NOTE: this method locks and unlocks @topology's mutex.
*
* Returns:
* A mongoc_host_list_t, or NULL.
*
* Side effects:
* Fills out optional @error if server not found.
*
*-------------------------------------------------------------------------
*/
mongoc_host_list_t *
_mongoc_topology_host_by_id (mongoc_topology_t *topology,
uint32_t id,
bson_error_t *error)
{
mongoc_server_description_t *sd;
mongoc_host_list_t *host = NULL;
mongoc_mutex_lock (&topology->mutex);
/* not a copy - direct pointer into topology description data */
sd = mongoc_topology_description_server_by_id (
&topology->description, id, error);
if (sd) {
host = bson_malloc0 (sizeof (mongoc_host_list_t));
memcpy (host, &sd->host, sizeof (mongoc_host_list_t));
}
mongoc_mutex_unlock (&topology->mutex);
return host;
}
/*
*--------------------------------------------------------------------------
*
* _mongoc_topology_request_scan --
*
* Non-locking variant
*
*--------------------------------------------------------------------------
*/
static void
_mongoc_topology_request_scan (mongoc_topology_t *topology)
{
topology->scan_requested = true;
mongoc_cond_signal (&topology->cond_server);
}
/*
*--------------------------------------------------------------------------
*
* mongoc_topology_invalidate_server --
*
* Invalidate the given server after receiving a network error in
* another part of the client.
*
* NOTE: this method uses @topology's mutex.
*
*--------------------------------------------------------------------------
*/
void
mongoc_topology_invalidate_server (mongoc_topology_t *topology,
uint32_t id,
const bson_error_t *error)
{
BSON_ASSERT (error);
mongoc_mutex_lock (&topology->mutex);
mongoc_topology_description_invalidate_server (
&topology->description, id, error);
mongoc_mutex_unlock (&topology->mutex);
}
/*
*--------------------------------------------------------------------------
*
* _mongoc_topology_update_from_handshake --
*
* A client opens a new connection and calls ismaster on it when it
* detects a closed connection in _mongoc_cluster_check_interval, or if
* mongoc_client_pool_pop creates a new client. Update the topology
* description from the ismaster response.
*
* NOTE: this method uses @topology's mutex.
*
* Returns:
* false if the server was removed from the topology
*--------------------------------------------------------------------------
*/
bool
_mongoc_topology_update_from_handshake (mongoc_topology_t *topology,
const mongoc_server_description_t *sd)
{
bool has_server;
BSON_ASSERT (topology);
BSON_ASSERT (sd);
mongoc_mutex_lock (&topology->mutex);
mongoc_topology_description_handle_ismaster (&topology->description,
sd->id,
&sd->last_is_master,
sd->round_trip_time_msec,
NULL);
+ _mongoc_topology_scanner_set_cluster_time (
+ topology->scanner, &topology->description.cluster_time);
+
/* return false if server was removed from topology */
has_server = mongoc_topology_description_server_by_id (
&topology->description, sd->id, NULL) != NULL;
/* if pooled, wake threads waiting in mongoc_topology_server_by_id */
mongoc_cond_broadcast (&topology->cond_client);
mongoc_mutex_unlock (&topology->mutex);
return has_server;
}
+/*
+ *--------------------------------------------------------------------------
+ *
+ * _mongoc_topology_update_last_used --
+ *
+ * Internal function. In single-threaded mode only, track when the socket
+ * to a particular server was last used. This is required for
+ * mongoc_cluster_check_interval to know when a socket has been idle.
+ *
+ *--------------------------------------------------------------------------
+ */
+
+void
+_mongoc_topology_update_last_used (mongoc_topology_t *topology,
+ uint32_t server_id)
+{
+ mongoc_topology_scanner_node_t *node;
+
+ if (!topology->single_threaded) {
+ return;
+ }
+
+ node = mongoc_topology_scanner_get_node (topology->scanner, server_id);
+ if (node) {
+ node->last_used = bson_get_monotonic_time ();
+ }
+}
+
/*
*--------------------------------------------------------------------------
*
* mongoc_topology_server_timestamp --
*
* Return the topology's scanner's timestamp for the given server,
* or -1 if there is no scanner node for the given server.
*
* NOTE: this method uses @topology's mutex.
*
* Returns:
* Timestamp, or -1
*
*--------------------------------------------------------------------------
*/
int64_t
mongoc_topology_server_timestamp (mongoc_topology_t *topology, uint32_t id)
{
mongoc_topology_scanner_node_t *node;
int64_t timestamp = -1;
mongoc_mutex_lock (&topology->mutex);
node = mongoc_topology_scanner_get_node (topology->scanner, id);
if (node) {
timestamp = node->timestamp;
}
mongoc_mutex_unlock (&topology->mutex);
return timestamp;
}
/*
*--------------------------------------------------------------------------
*
* _mongoc_topology_get_type --
*
* Return the topology's description's type.
*
* NOTE: this method uses @topology's mutex.
*
* Returns:
* The topology description type.
*
*--------------------------------------------------------------------------
*/
mongoc_topology_description_type_t
_mongoc_topology_get_type (mongoc_topology_t *topology)
{
mongoc_topology_description_type_t td_type;
mongoc_mutex_lock (&topology->mutex);
td_type = topology->description.type;
mongoc_mutex_unlock (&topology->mutex);
return td_type;
}
/*
*--------------------------------------------------------------------------
*
* _mongoc_topology_run_background --
*
* The background topology monitoring thread runs in this loop.
*
* NOTE: this method uses @topology's mutex.
*
*--------------------------------------------------------------------------
*/
static void *
_mongoc_topology_run_background (void *data)
{
mongoc_topology_t *topology;
int64_t now;
int64_t last_scan;
int64_t timeout;
int64_t force_timeout;
int64_t heartbeat_msec;
int r;
BSON_ASSERT (data);
last_scan = 0;
topology = (mongoc_topology_t *) data;
heartbeat_msec = topology->description.heartbeat_msec;
/* we exit this loop when shutdown_requested, or on error */
for (;;) {
/* unlocked after starting a scan or after breaking out of the loop */
mongoc_mutex_lock (&topology->mutex);
+ if (!mongoc_topology_scanner_valid (topology->scanner)) {
+ mongoc_mutex_unlock (&topology->mutex);
+ goto DONE;
+ }
/* we exit this loop on error, or when we should scan immediately */
for (;;) {
if (topology->shutdown_requested)
goto DONE;
now = bson_get_monotonic_time ();
if (last_scan == 0) {
/* set up the "last scan" as exactly long enough to force an
* immediate scan on the first pass */
last_scan = now - (heartbeat_msec * 1000);
}
timeout = heartbeat_msec - ((now - last_scan) / 1000);
/* if someone's specifically asked for a scan, use a shorter interval
*/
if (topology->scan_requested) {
force_timeout = MONGOC_TOPOLOGY_MIN_HEARTBEAT_FREQUENCY_MS -
((now - last_scan) / 1000);
timeout = BSON_MIN (timeout, force_timeout);
}
/* if we can start scanning, do so immediately */
if (timeout <= 0) {
mongoc_topology_scanner_start (
topology->scanner, topology->connect_timeout_msec, false);
break;
} else {
/* otherwise wait until someone:
* o requests a scan
* o we time out
* o requests a shutdown
*/
r = mongoc_cond_timedwait (
&topology->cond_server, &topology->mutex, timeout);
#ifdef _WIN32
if (!(r == 0 || r == WSAETIMEDOUT)) {
#else
if (!(r == 0 || r == ETIMEDOUT)) {
#endif
/* handle errors */
goto DONE;
}
/* if we timed out, or were woken up, check if it's time to scan
* again, or bail out */
}
}
topology->scan_requested = false;
/* scanning locks and unlocks the mutex itself until the scan is done */
mongoc_mutex_unlock (&topology->mutex);
mongoc_topology_scanner_work (topology->scanner);
mongoc_mutex_lock (&topology->mutex);
_mongoc_topology_scanner_finish (topology->scanner);
/* "retired" nodes can be checked again in the next scan */
mongoc_topology_scanner_reset (topology->scanner);
topology->last_scan = bson_get_monotonic_time ();
mongoc_mutex_unlock (&topology->mutex);
last_scan = bson_get_monotonic_time ();
}
DONE:
mongoc_mutex_unlock (&topology->mutex);
return NULL;
}
/*
*--------------------------------------------------------------------------
*
* mongoc_topology_start_background_scanner
*
* Start the topology background thread running. This should only be
* called once per pool. If clients are created separately (not
* through a pool) the SDAM logic will not be run in a background
* thread. Returns whether or not the scanner is running on termination
* of the function.
*
* NOTE: this method uses @topology's mutex.
*
*--------------------------------------------------------------------------
*/
bool
_mongoc_topology_start_background_scanner (mongoc_topology_t *topology)
{
int r;
if (topology->single_threaded) {
return false;
}
mongoc_mutex_lock (&topology->mutex);
if (topology->scanner_state == MONGOC_TOPOLOGY_SCANNER_OFF) {
topology->scanner_state = MONGOC_TOPOLOGY_SCANNER_BG_RUNNING;
_mongoc_handshake_freeze ();
_mongoc_topology_description_monitor_opening (&topology->description);
r = mongoc_thread_create (
&topology->thread, _mongoc_topology_run_background, topology);
if (r != 0) {
MONGOC_ERROR ("could not start topology scanner thread: %s",
strerror (r));
abort ();
}
}
mongoc_mutex_unlock (&topology->mutex);
return true;
}
/*
*--------------------------------------------------------------------------
*
* mongoc_topology_background_thread_stop --
*
* Stop the topology background thread. Called by the owning pool at
* its destruction.
*
* NOTE: this method uses @topology's mutex.
*
*--------------------------------------------------------------------------
*/
static void
_mongoc_topology_background_thread_stop (mongoc_topology_t *topology)
{
bool join_thread = false;
if (topology->single_threaded) {
return;
}
mongoc_mutex_lock (&topology->mutex);
if (topology->scanner_state == MONGOC_TOPOLOGY_SCANNER_BG_RUNNING) {
/* if the background thread is running, request a shutdown and signal the
* thread */
topology->shutdown_requested = true;
mongoc_cond_signal (&topology->cond_server);
topology->scanner_state = MONGOC_TOPOLOGY_SCANNER_SHUTTING_DOWN;
join_thread = true;
} else if (topology->scanner_state ==
MONGOC_TOPOLOGY_SCANNER_SHUTTING_DOWN) {
/* if we're mid shutdown, wait until it shuts down */
while (topology->scanner_state != MONGOC_TOPOLOGY_SCANNER_OFF) {
mongoc_cond_wait (&topology->cond_client, &topology->mutex);
}
} else {
/* nothing to do if it's already off */
}
mongoc_mutex_unlock (&topology->mutex);
if (join_thread) {
/* if we're joining the thread, wait for it to come back and broadcast
* all listeners */
mongoc_thread_join (topology->thread);
mongoc_cond_broadcast (&topology->cond_client);
}
}
bool
_mongoc_topology_set_appname (mongoc_topology_t *topology, const char *appname)
{
bool ret = false;
mongoc_mutex_lock (&topology->mutex);
if (topology->scanner_state == MONGOC_TOPOLOGY_SCANNER_OFF) {
ret = _mongoc_topology_scanner_set_appname (topology->scanner, appname);
} else {
MONGOC_ERROR ("Cannot set appname after handshake initiated");
}
mongoc_mutex_unlock (&topology->mutex);
return ret;
}
+
+/*
+ *--------------------------------------------------------------------------
+ *
+ * _mongoc_topology_update_cluster_time --
+ *
+ * Internal function. If the server reply has a later $clusterTime than
+ * any seen before, update the topology's clusterTime. See the Driver
+ * Sessions Spec.
+ *
+ *--------------------------------------------------------------------------
+ */
+
+void
+_mongoc_topology_update_cluster_time (mongoc_topology_t *topology,
+ const bson_t *reply)
+{
+ mongoc_mutex_lock (&topology->mutex);
+ mongoc_topology_description_update_cluster_time (&topology->description,
+ reply);
+ _mongoc_topology_scanner_set_cluster_time (
+ topology->scanner, &topology->description.cluster_time);
+ mongoc_mutex_unlock (&topology->mutex);
+}
+
+
+/*
+ *--------------------------------------------------------------------------
+ *
+ * _mongoc_topology_pop_server_session --
+ *
+ * Internal function. Get a server session from the pool or create
+ * one. On error, return NULL and fill out @error.
+ *
+ *--------------------------------------------------------------------------
+ */
+
+mongoc_server_session_t *
+_mongoc_topology_pop_server_session (mongoc_topology_t *topology,
+ bson_error_t *error)
+{
+ int64_t timeout;
+ mongoc_server_session_t *ss = NULL;
+ mongoc_topology_description_t *td;
+
+ ENTRY;
+
+ mongoc_mutex_lock (&topology->mutex);
+
+ td = &topology->description;
+ timeout = td->session_timeout_minutes;
+
+ if (timeout == MONGOC_NO_SESSIONS) {
+ /* if needed, connect and check for session timeout again */
+ if (!mongoc_topology_description_has_data_node (td)) {
+ mongoc_mutex_unlock (&topology->mutex);
+ if (!mongoc_topology_select_server_id (
+ topology, MONGOC_SS_READ, NULL, error)) {
+ RETURN (NULL);
+ }
+
+ mongoc_mutex_lock (&topology->mutex);
+ timeout = td->session_timeout_minutes;
+ }
+
+ if (timeout == MONGOC_NO_SESSIONS) {
+ mongoc_mutex_unlock (&topology->mutex);
+ bson_set_error (error,
+ MONGOC_ERROR_CLIENT,
+ MONGOC_ERROR_CLIENT_SESSION_FAILURE,
+ "Server does not support sessions");
+ RETURN (NULL);
+ }
+ }
+
+ while (topology->session_pool) {
+ ss = topology->session_pool;
+ CDL_DELETE (topology->session_pool, ss);
+ if (_mongoc_server_session_timed_out (ss, timeout)) {
+ _mongoc_server_session_destroy (ss);
+ ss = NULL;
+ } else {
+ break;
+ }
+ }
+
+ mongoc_mutex_unlock (&topology->mutex);
+
+ if (!ss) {
+ ss = _mongoc_server_session_new (error);
+ }
+
+ RETURN (ss);
+}
+
+/*
+ *--------------------------------------------------------------------------
+ *
+ * _mongoc_topology_push_server_session --
+ *
+ * Internal function. Return a server session to the pool.
+ *
+ *--------------------------------------------------------------------------
+ */
+
+void
+_mongoc_topology_push_server_session (mongoc_topology_t *topology,
+ mongoc_server_session_t *server_session)
+{
+ int64_t timeout;
+ mongoc_server_session_t *ss;
+
+ ENTRY;
+
+ mongoc_mutex_lock (&topology->mutex);
+
+ timeout = topology->description.session_timeout_minutes;
+
+ /* start at back of queue and reap timed-out sessions */
+ while (topology->session_pool && topology->session_pool->prev) {
+ ss = topology->session_pool->prev;
+ if (_mongoc_server_session_timed_out (ss, timeout)) {
+ BSON_ASSERT (ss->next); /* silences clang scan-build */
+ CDL_DELETE (topology->session_pool, ss);
+ _mongoc_server_session_destroy (ss);
+ } else {
+ /* if ss is not timed out, sessions in front of it are ok too */
+ break;
+ }
+ }
+
+ if (_mongoc_server_session_timed_out (server_session, timeout)) {
+ _mongoc_server_session_destroy (server_session);
+ } else {
+ /* silences clang scan-build */
+ BSON_ASSERT (!topology->session_pool || (topology->session_pool->next &&
+ topology->session_pool->prev));
+ CDL_PREPEND (topology->session_pool, server_session);
+ }
+
+ mongoc_mutex_unlock (&topology->mutex);
+
+ EXIT;
+}
+
+
+/*
+ *--------------------------------------------------------------------------
+ *
+ * _mongoc_topology_end_sessions --
+ *
+ * Internal function. End all server sessions. @cmd is an
+ * uninitialized document.
+ *
+ *--------------------------------------------------------------------------
+ */
+
+void
+_mongoc_topology_end_sessions_cmd (mongoc_topology_t *topology, bson_t *cmd)
+{
+ char buf[16];
+ const char *key;
+ uint32_t i;
+ mongoc_server_session_t *ss;
+ bson_t ar;
+
+ bson_init (cmd);
+ BSON_APPEND_ARRAY_BEGIN (cmd, "endSessions", &ar);
+
+ i = 0;
+ CDL_FOREACH (topology->session_pool, ss)
+ {
+ bson_uint32_to_string (i, &key, buf, sizeof buf);
+ BSON_APPEND_DOCUMENT (&ar, key, &ss->lsid);
+ i++;
+ }
+
+ bson_append_array_end (cmd, &ar);
+}
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-trace-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-trace-private.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-trace-private.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-trace-private.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-uri-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-uri-private.h
similarity index 74%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-uri-private.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-uri-private.h
index 061db048..bf1a54ba 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-uri-private.h
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-uri-private.h
@@ -1,41 +1,48 @@
/*
* Copyright 2015 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MONGOC_URI_PRIVATE_H
#define MONGOC_URI_PRIVATE_H
#if !defined(MONGOC_COMPILATION)
#error "Only <mongoc.h> can be included directly."
#endif
#include "mongoc-uri.h"
BSON_BEGIN_DECLS
bool
-mongoc_uri_append_host (mongoc_uri_t *uri, const char *host, uint16_t port);
+mongoc_uri_append_host (mongoc_uri_t *uri,
+ const char *host,
+ uint16_t port,
+ bson_error_t *error);
bool
mongoc_uri_parse_host (mongoc_uri_t *uri, const char *str, bool downcase);
-
+bool
+mongoc_uri_parse_options (mongoc_uri_t *uri,
+ const char *str,
+ bool from_dns,
+ bson_error_t *error);
int32_t
mongoc_uri_get_local_threshold_option (const mongoc_uri_t *uri);
BSON_END_DECLS
#endif /* MONGOC_URI_PRIVATE_H */
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-uri.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-uri.c
similarity index 84%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-uri.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-uri.c
index 36ce85cb..38f9e8c6 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-uri.c
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-uri.c
@@ -1,2087 +1,2332 @@
/*
* Copyright 2013 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <ctype.h>
#include <stdlib.h>
#include <string.h>
#include <sys/types.h>
#include <math.h>
/* strcasecmp on windows */
#include "mongoc-util-private.h"
#include "mongoc-config.h"
#include "mongoc-host-list.h"
#include "mongoc-host-list-private.h"
#include "mongoc-log.h"
#include "mongoc-handshake-private.h"
#include "mongoc-socket.h"
#include "mongoc-topology-private.h"
#include "mongoc-uri-private.h"
#include "mongoc-read-concern-private.h"
#include "mongoc-write-concern-private.h"
#include "mongoc-compression-private.h"
struct _mongoc_uri_t {
char *str;
+ bool is_srv;
+ char srv[BSON_HOST_NAME_MAX + 1];
mongoc_host_list_t *hosts;
char *username;
char *password;
char *database;
bson_t options;
bson_t credentials;
bson_t compressors;
mongoc_read_prefs_t *read_prefs;
mongoc_read_concern_t *read_concern;
mongoc_write_concern_t *write_concern;
};
#define MONGOC_URI_ERROR(error, format, ...) \
bson_set_error (error, \
MONGOC_ERROR_COMMAND, \
MONGOC_ERROR_COMMAND_INVALID_ARG, \
format, \
__VA_ARGS__);
static const char *escape_instructions = "Percent-encode username and password"
- " according to RFC 3986.";
+ " according to RFC 3986";
bool
_mongoc_uri_set_option_as_int32 (mongoc_uri_t *uri,
const char *option,
int32_t value);
+static bool
+ends_with (const char *str, const char *suffix);
+
static void
mongoc_uri_do_unescape (char **str)
{
char *tmp;
if ((tmp = *str)) {
*str = mongoc_uri_unescape (tmp);
bson_free (tmp);
}
}
+
+#define VALIDATE_SRV_ERR() \
+ do { \
+ bson_set_error (error, \
+ MONGOC_ERROR_STREAM, \
+ MONGOC_ERROR_STREAM_NAME_RESOLUTION, \
+ "Invalid host \"%s\" returned for service \"%s\": " \
+ "host must be subdomain of service name", \
+ host, \
+ service); \
+ return false; \
+ } while (0)
+
+
+static int
+count_dots (const char *s)
+{
+ int n = 0;
+ const char *dot = s;
+
+ while ((dot = strchr (dot + 1, '.'))) {
+ n++;
+ }
+
+ return n;
+}
+
+
+/* at least one character, and does not start or end with dot */
+static bool
+valid_hostname (const char *s)
+{
+ size_t len = strlen (s);
+
+ return len > 1 && s[0] != '.' && s[len - 1] != '.';
+}
+
+
+static bool
+validate_srv_result (mongoc_uri_t *uri, const char *host, bson_error_t *error)
+{
+ const char *service;
+ const char *service_root;
+
+ service = mongoc_uri_get_service (uri);
+ BSON_ASSERT (service);
+
+ if (!valid_hostname (host)) {
+ VALIDATE_SRV_ERR ();
+ }
+
+ service_root = strchr (service, '.');
+ BSON_ASSERT (service_root);
+
+ /* host must be descendent of service root: if service is
+ * "a.foo.co" host can be like "a.foo.co", "b.foo.co", "a.b.foo.co", etc.
+ */
+ if (strlen (host) < strlen (service_root)) {
+ VALIDATE_SRV_ERR ();
+ }
+
+ if (!ends_with (host, service_root)) {
+ VALIDATE_SRV_ERR ();
+ }
+
+ return true;
+}
+
+
bool
-mongoc_uri_append_host (mongoc_uri_t *uri, const char *host, uint16_t port)
+mongoc_uri_append_host (mongoc_uri_t *uri,
+ const char *host,
+ uint16_t port,
+ bson_error_t *error)
{
mongoc_host_list_t *iter;
mongoc_host_list_t *link_;
if (strlen (host) > BSON_HOST_NAME_MAX) {
- MONGOC_ERROR ("Hostname provided in URI is too long, max is %d chars",
- BSON_HOST_NAME_MAX);
+ bson_set_error (error,
+ MONGOC_ERROR_STREAM,
+ MONGOC_ERROR_STREAM_NAME_RESOLUTION,
+ "Hostname provided in URI is too long, max is %d chars",
+ BSON_HOST_NAME_MAX);
+ return false;
+ }
+
+ if (uri->is_srv && !validate_srv_result (uri, host, error)) {
return false;
}
link_ = (mongoc_host_list_t *) bson_malloc0 (sizeof *link_);
bson_strncpy (link_->host, host, sizeof link_->host);
if (strchr (host, ':')) {
bson_snprintf (link_->host_and_port,
sizeof link_->host_and_port,
"[%s]:%hu",
host,
port);
link_->family = AF_INET6;
} else if (strstr (host, ".sock")) {
bson_snprintf (
link_->host_and_port, sizeof link_->host_and_port, "%s", host);
link_->family = AF_UNIX;
} else {
bson_snprintf (link_->host_and_port,
sizeof link_->host_and_port,
"%s:%hu",
host,
port);
link_->family = AF_INET;
}
link_->host_and_port[sizeof link_->host_and_port - 1] = '\0';
link_->port = port;
if ((iter = uri->hosts)) {
for (; iter && iter->next; iter = iter->next) {
}
iter->next = link_;
} else {
uri->hosts = link_;
}
return true;
}
/*
*--------------------------------------------------------------------------
*
* scan_to_unichar --
*
* Scans 'str' until either a character matching 'match' is found,
* until one of the characters in 'terminators' is encountered, or
* until we reach the end of 'str'.
*
* NOTE: 'terminators' may not include multibyte UTF-8 characters.
*
* Returns:
* If 'match' is found, returns a copy of the section of 'str' before
* that character. Otherwise, returns NULL.
*
* Side Effects:
* If 'match' is found, sets 'end' to begin at the matching character
* in 'str'.
*
*--------------------------------------------------------------------------
*/
static char *
scan_to_unichar (const char *str,
bson_unichar_t match,
const char *terminators,
const char **end)
{
bson_unichar_t c;
const char *iter;
for (iter = str; iter && *iter && (c = bson_utf8_get_char (iter));
iter = bson_utf8_next_char (iter)) {
if (c == match) {
*end = iter;
return bson_strndup (str, iter - str);
} else if (c == '\\') {
iter = bson_utf8_next_char (iter);
if (!bson_utf8_get_char (iter)) {
break;
}
} else {
const char *term_iter;
for (term_iter = terminators; *term_iter; term_iter++) {
if (c == *term_iter) {
return NULL;
}
}
}
}
return NULL;
}
/*
*--------------------------------------------------------------------------
*
* ends_with --
*
* Return true if str ends with suffix.
*
*--------------------------------------------------------------------------
*/
static bool
ends_with (const char *str, const char *suffix)
{
size_t str_len = strlen (str);
size_t suffix_len = strlen (suffix);
const char *s1, *s2;
if (str_len < suffix_len) {
return false;
}
/* start at the ends of both strings */
s1 = str + str_len;
s2 = suffix + suffix_len;
/* until either pointer reaches start of its string, compare the pointers */
for (; s1 >= str && s2 >= suffix; s1--, s2--) {
if (*s1 != *s2) {
return false;
}
}
return true;
}
static bool
-mongoc_uri_parse_scheme (const char *str, const char **end)
+mongoc_uri_parse_scheme (mongoc_uri_t *uri, const char *str, const char **end)
{
- if (!!strncmp (str, "mongodb://", 10)) {
- return false;
+ if (!strncmp (str, "mongodb+srv://", 14)) {
+ uri->is_srv = true;
+ *end = str + 14;
+ return true;
}
- *end = str + 10;
+ if (!strncmp (str, "mongodb://", 10)) {
+ uri->is_srv = false;
+ *end = str + 10;
+ return true;
+ }
- return true;
+ return false;
}
static bool
mongoc_uri_has_unescaped_chars (const char *str, const char *chars)
{
const char *c;
const char *tmp;
char *s;
for (c = chars; *c; c++) {
s = scan_to_unichar (str, (bson_unichar_t) *c, "", &tmp);
if (s) {
bson_free (s);
return true;
}
}
return false;
}
/* "str" is non-NULL, the part of URI between "mongodb://" and first "@" */
static bool
mongoc_uri_parse_userpass (mongoc_uri_t *uri,
const char *str,
bson_error_t *error)
{
const char *prohibited = "@:/";
const char *end_user;
BSON_ASSERT (str);
if ((uri->username = scan_to_unichar (str, ':', "", &end_user))) {
uri->password = bson_strdup (end_user + 1);
} else {
uri->username = bson_strdup (str);
uri->password = NULL;
}
if (mongoc_uri_has_unescaped_chars (uri->username, prohibited)) {
MONGOC_URI_ERROR (error,
"Username \"%s\" must not have unescaped chars. %s",
uri->username,
escape_instructions);
return false;
}
mongoc_uri_do_unescape (&uri->username);
if (!uri->username) {
MONGOC_URI_ERROR (
error, "Incorrect URI escapes in username. %s", escape_instructions);
return false;
}
/* Providing password at all is optional */
if (uri->password) {
if (mongoc_uri_has_unescaped_chars (uri->password, prohibited)) {
MONGOC_URI_ERROR (error,
"Password \"%s\" must not have unescaped chars. %s",
uri->password,
escape_instructions);
return false;
}
mongoc_uri_do_unescape (&uri->password);
if (!uri->password) {
- MONGOC_URI_ERROR (error, "%s", "Incorrect URI escapes in password.");
+ MONGOC_URI_ERROR (error, "%s", "Incorrect URI escapes in password");
return false;
}
}
return true;
}
static bool
mongoc_uri_parse_host6 (mongoc_uri_t *uri, const char *str)
{
uint16_t port = MONGOC_DEFAULT_PORT;
const char *portstr;
const char *end_host;
char *hostname;
+ bson_error_t error;
bool r;
if ((portstr = strrchr (str, ':')) && !strstr (portstr, "]")) {
if (!mongoc_parse_port (&port, portstr + 1)) {
return false;
}
}
hostname = scan_to_unichar (str + 1, ']', "", &end_host);
mongoc_uri_do_unescape (&hostname);
if (!hostname) {
return false;
}
mongoc_lowercase (hostname, hostname);
- r = mongoc_uri_append_host (uri, hostname, port);
+ r = mongoc_uri_append_host (uri, hostname, port, &error);
+ if (!r) {
+ MONGOC_ERROR ("%s", error.message);
+ }
+
bson_free (hostname);
return r;
}
bool
mongoc_uri_parse_host (mongoc_uri_t *uri, const char *str, bool downcase)
{
uint16_t port;
const char *end_host;
char *hostname;
+ bson_error_t error;
bool r;
if (*str == '\0') {
MONGOC_WARNING ("Empty hostname in URI");
return false;
}
if (*str == '[' && strchr (str, ']')) {
return mongoc_uri_parse_host6 (uri, str);
}
if ((hostname = scan_to_unichar (str, ':', "?/,", &end_host))) {
end_host++;
if (!mongoc_parse_port (&port, end_host)) {
bson_free (hostname);
return false;
}
} else {
hostname = bson_strdup (str);
port = MONGOC_DEFAULT_PORT;
}
if (mongoc_uri_has_unescaped_chars (hostname, "/")) {
MONGOC_WARNING ("Unix Domain Sockets must be escaped (e.g. / = %%2F)");
bson_free (hostname);
return false;
}
mongoc_uri_do_unescape (&hostname);
if (!hostname) {
/* invalid */
return false;
}
if (downcase) {
mongoc_lowercase (hostname, hostname);
}
- r = mongoc_uri_append_host (uri, hostname, port);
+ r = mongoc_uri_append_host (uri, hostname, port, &error);
+ if (!r) {
+ MONGOC_ERROR ("%s", error.message);
+ }
+
bson_free (hostname);
return r;
}
+bool
+mongoc_uri_parse_srv (mongoc_uri_t *uri, const char *str)
+{
+ char *service;
+
+ if (*str == '\0') {
+ return false;
+ }
+
+ service = bson_strdup (str);
+ mongoc_uri_do_unescape (&service);
+ if (!service) {
+ /* invalid */
+ return false;
+ }
+
+ if (!valid_hostname (service) || count_dots (service) < 2) {
+ bson_free (service);
+ return false;
+ }
+
+ bson_strncpy (uri->srv, service, sizeof uri->srv);
+ bson_free (service);
+
+ if (strchr (uri->srv, ',') || strchr (uri->srv, ':')) {
+ /* prohibit port number or multiple service names */
+ return false;
+ }
+
+ return true;
+}
+
+
/* "hosts" is non-NULL, the part between "mongodb://" or "@" and last "/" */
static bool
mongoc_uri_parse_hosts (mongoc_uri_t *uri, const char *hosts)
{
const char *next;
const char *end_hostport;
char *s;
BSON_ASSERT (hosts);
/*
* Parsing the series of hosts is a lot more complicated than you might
* imagine. This is due to some characters being both separators as well as
* valid characters within the "hostname". In particularly, we can have file
* paths to specify paths to UNIX domain sockets. We impose the restriction
* that they must be suffixed with ".sock" to simplify the parsing.
*
* You can separate hosts and file system paths to UNIX domain sockets with
* ",".
*/
s = scan_to_unichar (hosts, '?', "", &end_hostport);
if (s) {
MONGOC_WARNING (
"%s", "A '/' is required between the host list and any options.");
goto error;
}
next = hosts;
do {
/* makes a copy of the section of the string */
s = scan_to_unichar (next, ',', "", &end_hostport);
if (s) {
next = (char *) end_hostport + 1;
} else {
s = bson_strdup (next);
next = NULL;
}
if (ends_with (s, ".sock")) {
if (!mongoc_uri_parse_host (uri, s, false /* downcase */)) {
goto error;
}
} else if (!mongoc_uri_parse_host (uri, s, true /* downcase */)) {
goto error;
}
bson_free (s);
} while (next);
return true;
error:
bson_free (s);
return false;
}
static bool
mongoc_uri_parse_database (mongoc_uri_t *uri, const char *str, const char **end)
{
const char *end_database;
const char *c;
char *invalid_c;
const char *tmp;
if ((uri->database = scan_to_unichar (str, '?', "", &end_database))) {
*end = end_database;
} else if (*str) {
uri->database = bson_strdup (str);
*end = str + strlen (str);
}
mongoc_uri_do_unescape (&uri->database);
if (!uri->database) {
/* invalid */
return false;
}
/* invalid characters in database name */
for (c = "/\\. \"$"; *c; c++) {
invalid_c =
scan_to_unichar (uri->database, (bson_unichar_t) *c, "", &tmp);
if (invalid_c) {
bson_free (invalid_c);
return false;
}
}
return true;
}
static bool
mongoc_uri_parse_auth_mechanism_properties (mongoc_uri_t *uri, const char *str)
{
char *field;
char *value;
const char *end_scan;
bson_t properties;
bson_init (&properties);
/* build up the properties document */
while ((field = scan_to_unichar (str, ':', "&", &end_scan))) {
str = end_scan + 1;
if (!(value = scan_to_unichar (str, ',', ":&", &end_scan))) {
value = bson_strdup (str);
str = "";
} else {
str = end_scan + 1;
}
bson_append_utf8 (&properties, field, -1, value, -1);
bson_free (field);
bson_free (value);
}
/* append our auth properties to our credentials */
mongoc_uri_set_mechanism_properties (uri, &properties);
return true;
}
static bool
mongoc_uri_parse_tags (mongoc_uri_t *uri, /* IN */
const char *str) /* IN */
{
const char *end_keyval;
const char *end_key;
bson_t b;
char *keyval;
char *key;
bson_init (&b);
again:
if ((keyval = scan_to_unichar (str, ',', "", &end_keyval))) {
if (!(key = scan_to_unichar (keyval, ':', "", &end_key))) {
bson_free (keyval);
goto fail;
}
bson_append_utf8 (&b, key, -1, end_key + 1, -1);
bson_free (key);
bson_free (keyval);
str = end_keyval + 1;
goto again;
} else if ((key = scan_to_unichar (str, ':', "", &end_key))) {
bson_append_utf8 (&b, key, -1, end_key + 1, -1);
bson_free (key);
} else if (strlen (str)) {
/* we're not finished but we couldn't parse the string */
goto fail;
}
mongoc_read_prefs_add_tag (uri->read_prefs, &b);
bson_destroy (&b);
return true;
fail:
MONGOC_WARNING ("Unsupported value for \"" MONGOC_URI_READPREFERENCETAGS
"\": \"%s\"",
str);
bson_destroy (&b);
return false;
}
/*
*--------------------------------------------------------------------------
*
* mongoc_uri_bson_append_or_replace_key --
*
*
* Appends 'option' to the end of 'options' if not already set.
*
* Since we cannot grow utf8 strings inline, we have to allocate a
* temporary bson variable and splice in the new value if the key
* is already set.
*
* NOTE: This function keeps the order of the BSON keys.
*
* NOTE: 'option' is case*in*sensitive.
*
*
*--------------------------------------------------------------------------
*/
static void
mongoc_uri_bson_append_or_replace_key (bson_t *options,
const char *option,
const char *value)
{
bson_iter_t iter;
bool found = false;
if (bson_iter_init (&iter, options)) {
bson_t tmp = BSON_INITIALIZER;
while (bson_iter_next (&iter)) {
const bson_value_t *bvalue;
if (!strcasecmp (bson_iter_key (&iter), option)) {
bson_append_utf8 (&tmp, option, -1, value, -1);
found = true;
continue;
}
bvalue = bson_iter_value (&iter);
BSON_APPEND_VALUE (&tmp, bson_iter_key (&iter), bvalue);
}
if (!found) {
bson_append_utf8 (&tmp, option, -1, value, -1);
}
bson_destroy (options);
bson_copy_to (&tmp, options);
bson_destroy (&tmp);
}
}
bool
mongoc_uri_option_is_int32 (const char *key)
{
return !strcasecmp (key, MONGOC_URI_CONNECTTIMEOUTMS) ||
!strcasecmp (key, MONGOC_URI_HEARTBEATFREQUENCYMS) ||
!strcasecmp (key, MONGOC_URI_SERVERSELECTIONTIMEOUTMS) ||
!strcasecmp (key, MONGOC_URI_SOCKETCHECKINTERVALMS) ||
!strcasecmp (key, MONGOC_URI_SOCKETTIMEOUTMS) ||
!strcasecmp (key, MONGOC_URI_LOCALTHRESHOLDMS) ||
!strcasecmp (key, MONGOC_URI_MAXPOOLSIZE) ||
!strcasecmp (key, MONGOC_URI_MAXSTALENESSSECONDS) ||
!strcasecmp (key, MONGOC_URI_MINPOOLSIZE) ||
!strcasecmp (key, MONGOC_URI_MAXIDLETIMEMS) ||
!strcasecmp (key, MONGOC_URI_WAITQUEUEMULTIPLE) ||
!strcasecmp (key, MONGOC_URI_WAITQUEUETIMEOUTMS) ||
!strcasecmp (key, MONGOC_URI_WTIMEOUTMS) ||
!strcasecmp (key, MONGOC_URI_ZLIBCOMPRESSIONLEVEL);
}
bool
mongoc_uri_option_is_bool (const char *key)
{
return !strcasecmp (key, MONGOC_URI_CANONICALIZEHOSTNAME) ||
!strcasecmp (key, MONGOC_URI_JOURNAL) ||
+ !strcasecmp (key, MONGOC_URI_RETRYWRITES) ||
!strcasecmp (key, MONGOC_URI_SAFE) ||
!strcasecmp (key, MONGOC_URI_SERVERSELECTIONTRYONCE) ||
!strcasecmp (key, MONGOC_URI_SLAVEOK) ||
!strcasecmp (key, MONGOC_URI_SSL) ||
!strcasecmp (key, MONGOC_URI_SSLALLOWINVALIDCERTIFICATES) ||
!strcasecmp (key, MONGOC_URI_SSLALLOWINVALIDHOSTNAMES);
}
bool
mongoc_uri_option_is_utf8 (const char *key)
{
return !strcasecmp (key, MONGOC_URI_APPNAME) ||
- !strcasecmp (key, MONGOC_URI_GSSAPISERVICENAME) ||
!strcasecmp (key, MONGOC_URI_REPLICASET) ||
!strcasecmp (key, MONGOC_URI_READPREFERENCE) ||
!strcasecmp (key, MONGOC_URI_SSLCLIENTCERTIFICATEKEYFILE) ||
!strcasecmp (key, MONGOC_URI_SSLCLIENTCERTIFICATEKEYPASSWORD) ||
!strcasecmp (key, MONGOC_URI_SSLCERTIFICATEAUTHORITYFILE);
}
static bool
mongoc_uri_parse_int32 (const char *key, const char *value, int32_t *result)
{
char *endptr;
int64_t i;
errno = 0;
i = bson_ascii_strtoll (value, &endptr, 10);
if (errno || endptr < value + strlen (value)) {
MONGOC_WARNING ("Invalid %s: cannot parse integer\n", key);
return false;
}
if (i > INT32_MAX || i < INT32_MIN) {
MONGOC_WARNING ("Invalid %s: cannot fit in int32\n", key);
return false;
}
*result = (int32_t) i;
return true;
}
+
+static bool
+dns_option_allowed (const char *lkey)
+{
+ /* Initial DNS Seedlist Discovery Spec: "A Client MUST only support the
+ * authSource and replicaSet options through a TXT record, and MUST raise an
+ * error if any other option is encountered."
+ */
+ return !strcmp (lkey, MONGOC_URI_AUTHSOURCE) ||
+ !strcmp (lkey, MONGOC_URI_REPLICASET);
+}
+
+
+/* Initial DNS Seedlist Discovery Spec: "A Client MUST use options specified in
+ * the Connection String to override options provided through TXT records."
+ * Log an error but return true. Parsing continues and the topology is valid.
+ */
+#define HANDLE_DUPE() \
+ if (from_dns) { \
+ MONGOC_WARNING ( \
+ "Cannot override URI option \"%s\" from TXT record \"%s\"", \
+ key, \
+ str); \
+ ret = true; \
+ goto CLEANUP; \
+ } \
+ MONGOC_WARNING ("Overwriting previously provided value for '%s'", key)
+
+
static bool
-mongoc_uri_parse_option (mongoc_uri_t *uri, const char *str)
+mongoc_uri_parse_option (mongoc_uri_t *uri,
+ const char *str,
+ bool from_dns,
+ bson_error_t *error)
{
int32_t v_int;
const char *end_key;
char *lkey = NULL;
char *key = NULL;
char *value = NULL;
bool ret = false;
if (!(key = scan_to_unichar (str, '=', "", &end_key))) {
+ MONGOC_URI_ERROR (error, "URI option \"%s\" contains no \"=\" sign", str);
goto CLEANUP;
}
value = bson_strdup (end_key + 1);
mongoc_uri_do_unescape (&value);
if (!value) {
/* do_unescape detected invalid UTF-8 and freed value */
+ MONGOC_URI_ERROR (
+ error, "Value for URI option \"%s\" contains invalid UTF-8", key);
goto CLEANUP;
}
lkey = bson_strdup (key);
mongoc_lowercase (key, lkey);
+ /* Initial DNS Seedlist Discovery Spec: "A Client MUST only support the
+ * authSource and replicaSet options through a TXT record, and MUST raise an
+ * error if any other option is encountered."*/
+ if (from_dns && !dns_option_allowed (lkey)) {
+ MONGOC_URI_ERROR (
+ error, "URI option \"%s\" prohibited in TXT record", lkey);
+ goto CLEANUP;
+ }
+
if (bson_has_field (&uri->options, lkey)) {
- MONGOC_WARNING ("Overwriting previously provided value for '%s'", key);
+ /* Initial DNS Seedlist Discovery Spec: "Client MUST use options
+ * specified in the Connection String to override options provided
+ * through TXT records." So, do NOT override existing options with TXT
+ * options. */
+ HANDLE_DUPE ();
}
if (mongoc_uri_option_is_int32 (lkey)) {
if (!mongoc_uri_parse_int32 (lkey, value, &v_int)) {
goto UNSUPPORTED_VALUE;
}
if (!mongoc_uri_set_option_as_int32 (uri, lkey, v_int)) {
goto CLEANUP;
}
} else if (!strcmp (lkey, MONGOC_URI_W)) {
if (*value == '-' || isdigit (*value)) {
v_int = (int) strtol (value, NULL, 10);
_mongoc_uri_set_option_as_int32 (uri, MONGOC_URI_W, v_int);
} else if (0 == strcasecmp (value, "majority")) {
mongoc_uri_bson_append_or_replace_key (
&uri->options, MONGOC_URI_W, "majority");
} else if (*value) {
mongoc_uri_bson_append_or_replace_key (
&uri->options, MONGOC_URI_W, value);
}
} else if (mongoc_uri_option_is_bool (lkey)) {
if (0 == strcasecmp (value, "true")) {
mongoc_uri_set_option_as_bool (uri, lkey, true);
} else if (0 == strcasecmp (value, "false")) {
mongoc_uri_set_option_as_bool (uri, lkey, false);
} else if ((0 == strcmp (value, "1")) ||
(0 == strcasecmp (value, "yes")) ||
(0 == strcasecmp (value, "y")) ||
(0 == strcasecmp (value, "t"))) {
- MONGOC_WARNING ("Deprecated boolean value for \"%1$s\": \"%2$s\", "
- "please update to \"%1$s=true\"",
+ MONGOC_WARNING ("Deprecated boolean value for \"%s\": \"%s\", "
+ "please update to \"%s=true\"",
key,
- value);
+ value,
+ key);
mongoc_uri_set_option_as_bool (uri, lkey, true);
} else if ((0 == strcasecmp (value, "0")) ||
(0 == strcasecmp (value, "-1")) ||
(0 == strcmp (value, "no")) || (0 == strcmp (value, "n")) ||
(0 == strcmp (value, "f"))) {
- MONGOC_WARNING ("Deprecated boolean value for \"%1$s\": \"%2$s\", "
- "please update to \"%1$s=false\"",
+ MONGOC_WARNING ("Deprecated boolean value for \"%s\": \"%s\", "
+ "please update to \"%s=false\"",
key,
- value);
+ value,
+ key);
mongoc_uri_set_option_as_bool (uri, lkey, false);
} else {
goto UNSUPPORTED_VALUE;
}
} else if (!strcmp (lkey, MONGOC_URI_READPREFERENCETAGS)) {
/* Allows providing this key multiple times */
if (!mongoc_uri_parse_tags (uri, value)) {
goto UNSUPPORTED_VALUE;
}
} else if (!strcmp (lkey, MONGOC_URI_AUTHMECHANISM) ||
!strcmp (lkey, MONGOC_URI_AUTHSOURCE)) {
if (bson_has_field (&uri->credentials, lkey)) {
- MONGOC_WARNING ("Overwriting previously provided value for '%s'", key);
+ HANDLE_DUPE ();
}
mongoc_uri_bson_append_or_replace_key (&uri->credentials, lkey, value);
} else if (!strcmp (lkey, MONGOC_URI_READCONCERNLEVEL)) {
if (!mongoc_read_concern_is_default (uri->read_concern)) {
- MONGOC_WARNING ("Overwriting previously provided value for '%s'", key);
+ HANDLE_DUPE ();
}
mongoc_read_concern_set_level (uri->read_concern, value);
+ } else if (!strcmp (lkey, MONGOC_URI_GSSAPISERVICENAME)) {
+ char *tmp = bson_strdup_printf ("SERVICE_NAME:%s", value);
+ if (bson_has_field (&uri->credentials,
+ MONGOC_URI_AUTHMECHANISMPROPERTIES)) {
+ MONGOC_WARNING (
+ "authMechanismProperties SERVICE_NAME already set, ignoring '%s'",
+ lkey);
+ } else if (!mongoc_uri_parse_auth_mechanism_properties (uri, tmp)) {
+ bson_free (tmp);
+ goto UNSUPPORTED_VALUE;
+ }
+ bson_free (tmp);
} else if (!strcmp (lkey, MONGOC_URI_AUTHMECHANISMPROPERTIES)) {
if (bson_has_field (&uri->credentials, lkey)) {
- MONGOC_WARNING ("Overwriting previously provided value for '%s'", key);
+ HANDLE_DUPE ();
}
if (!mongoc_uri_parse_auth_mechanism_properties (uri, value)) {
goto UNSUPPORTED_VALUE;
}
} else if (!strcmp (lkey, MONGOC_URI_APPNAME)) {
/* Part of uri->options */
if (!mongoc_uri_set_appname (uri, value)) {
goto UNSUPPORTED_VALUE;
}
} else if (!strcmp (lkey, MONGOC_URI_COMPRESSORS)) {
if (!mongoc_uri_set_compressors (uri, value)) {
goto UNSUPPORTED_VALUE;
}
} else if (mongoc_uri_option_is_utf8 (lkey)) {
mongoc_uri_bson_append_or_replace_key (&uri->options, lkey, value);
} else {
/*
* Keys that aren't supported by a driver MUST be ignored.
*
* A WARN level logging message MUST be issued
* https://github.com/mongodb/specifications/blob/master/source/connection-string/connection-string-spec.rst#keys
*/
MONGOC_WARNING ("Unsupported URI option \"%s\"", key);
}
ret = true;
UNSUPPORTED_VALUE:
if (!ret) {
- MONGOC_WARNING ("Unsupported value for \"%s\": \"%s\"", key, value);
+ MONGOC_URI_ERROR (
+ error, "Unsupported value for \"%s\": \"%s\"", key, value);
}
CLEANUP:
bson_free (key);
bson_free (lkey);
bson_free (value);
return ret;
}
-static bool
+bool
mongoc_uri_parse_options (mongoc_uri_t *uri,
const char *str,
+ bool from_dns,
bson_error_t *error)
{
const char *end_option;
char *option;
again:
if ((option = scan_to_unichar (str, '&', "", &end_option))) {
- if (!mongoc_uri_parse_option (uri, option)) {
- MONGOC_URI_ERROR (error, "Unknown option or value for '%s'", option);
+ if (!mongoc_uri_parse_option (uri, option, from_dns, error)) {
bson_free (option);
return false;
}
bson_free (option);
str = end_option + 1;
goto again;
} else if (*str) {
- if (!mongoc_uri_parse_option (uri, str)) {
- MONGOC_URI_ERROR (error, "Unknown option or value for '%s'", str);
+ if (!mongoc_uri_parse_option (uri, str, from_dns, error)) {
return false;
}
}
return true;
}
static bool
mongoc_uri_finalize_auth (mongoc_uri_t *uri, bson_error_t *error)
{
bson_iter_t iter;
const char *source = NULL;
const char *mechanism = mongoc_uri_get_auth_mechanism (uri);
if (bson_iter_init_find_case (
&iter, &uri->credentials, MONGOC_URI_AUTHSOURCE)) {
source = bson_iter_utf8 (&iter, NULL);
}
/* authSource with GSSAPI or X509 should always be external */
if (mechanism) {
if (!strcasecmp (mechanism, "GSSAPI") ||
!strcasecmp (mechanism, "MONGODB-X509")) {
if (source) {
if (strcasecmp (source, "$external")) {
MONGOC_URI_ERROR (
error,
"%s",
"GSSAPI and X509 require \"$external\" authSource");
return false;
}
} else {
bson_append_utf8 (
&uri->credentials, MONGOC_URI_AUTHSOURCE, -1, "$external", -1);
}
}
+ /* MONGODB-X509 is the only mechanism that doesn't require username */
+ if (strcasecmp (mechanism, "MONGODB-X509")) {
+ if (!mongoc_uri_get_username (uri)) {
+ MONGOC_URI_ERROR (error,
+ "'%s' authentication mechanism requires username",
+ mechanism);
+ return false;
+ }
+ }
}
return true;
}
static bool
mongoc_uri_parse_before_slash (mongoc_uri_t *uri,
const char *before_slash,
bson_error_t *error)
{
char *userpass;
const char *hosts;
userpass = scan_to_unichar (before_slash, '@', "", &hosts);
if (userpass) {
if (!mongoc_uri_parse_userpass (uri, userpass, error)) {
goto error;
}
hosts++; /* advance past "@" */
if (*hosts == '@') {
/* special case: "mongodb://alice@@localhost" */
MONGOC_URI_ERROR (
error, "Invalid username or password. %s", escape_instructions);
goto error;
}
} else {
hosts = before_slash;
}
- if (!mongoc_uri_parse_hosts (uri, hosts)) {
- MONGOC_URI_ERROR (error, "%s", "Invalid host string in URI");
- goto error;
+ if (uri->is_srv) {
+ if (!mongoc_uri_parse_srv (uri, hosts)) {
+ MONGOC_URI_ERROR (error, "%s", "Invalid service name in URI");
+ goto error;
+ }
+ } else {
+ if (!mongoc_uri_parse_hosts (uri, hosts)) {
+ MONGOC_URI_ERROR (error, "%s", "Invalid host string in URI");
+ goto error;
+ }
}
bson_free (userpass);
return true;
error:
bson_free (userpass);
return false;
}
static bool
mongoc_uri_parse (mongoc_uri_t *uri, const char *str, bson_error_t *error)
{
char *before_slash = NULL;
const char *tmp;
- if (!mongoc_uri_parse_scheme (str, &str)) {
+ if (!bson_utf8_validate (str, strlen (str), false /* allow_null */)) {
+ MONGOC_URI_ERROR (error, "%s", "Invalid UTF-8 in URI");
+ goto error;
+ }
+
+ if (!mongoc_uri_parse_scheme (uri, str, &str)) {
MONGOC_URI_ERROR (
- error, "%s", "Invalid URI Schema, expecting 'mongodb://'");
+ error,
+ "%s",
+ "Invalid URI Schema, expecting 'mongodb://' or 'mongodb+srv://'");
goto error;
}
before_slash = scan_to_unichar (str, '/', "", &tmp);
if (!before_slash) {
before_slash = bson_strdup (str);
str += strlen (before_slash);
} else {
str = tmp;
}
if (!mongoc_uri_parse_before_slash (uri, before_slash, error)) {
goto error;
}
if (*str) {
if (*str == '/') {
str++;
if (*str) {
if (!mongoc_uri_parse_database (uri, str, &str)) {
MONGOC_URI_ERROR (error, "%s", "Invalid database name in URI");
goto error;
}
}
if (*str == '?') {
str++;
if (*str) {
- if (!mongoc_uri_parse_options (uri, str, error)) {
+ if (!mongoc_uri_parse_options (
+ uri, str, false /* from DNS */, error)) {
goto error;
}
}
}
} else {
MONGOC_URI_ERROR (error, "%s", "Expected end of hostname delimiter");
goto error;
}
}
+ /* Initial DNS Seedlist Discovery Spec: "If mongodb+srv is used, a driver
+ * MUST implicitly also enable TLS." */
+ if (uri->is_srv && !bson_has_field (&uri->options, "ssl")) {
+ mongoc_uri_set_option_as_bool (uri, "ssl", true);
+ }
+
if (!mongoc_uri_finalize_auth (uri, error)) {
goto error;
}
bson_free (before_slash);
return true;
error:
bson_free (before_slash);
return false;
}
const mongoc_host_list_t *
mongoc_uri_get_hosts (const mongoc_uri_t *uri)
{
BSON_ASSERT (uri);
return uri->hosts;
}
const char *
mongoc_uri_get_replica_set (const mongoc_uri_t *uri)
{
bson_iter_t iter;
BSON_ASSERT (uri);
if (bson_iter_init_find_case (&iter, &uri->options, MONGOC_URI_REPLICASET) &&
BSON_ITER_HOLDS_UTF8 (&iter)) {
return bson_iter_utf8 (&iter, NULL);
}
return NULL;
}
const bson_t *
mongoc_uri_get_credentials (const mongoc_uri_t *uri)
{
BSON_ASSERT (uri);
return &uri->credentials;
}
const char *
mongoc_uri_get_auth_mechanism (const mongoc_uri_t *uri)
{
bson_iter_t iter;
BSON_ASSERT (uri);
if (bson_iter_init_find_case (
&iter, &uri->credentials, MONGOC_URI_AUTHMECHANISM) &&
BSON_ITER_HOLDS_UTF8 (&iter)) {
return bson_iter_utf8 (&iter, NULL);
}
return NULL;
}
bool
mongoc_uri_set_auth_mechanism (mongoc_uri_t *uri, const char *value)
{
size_t len;
BSON_ASSERT (value);
len = strlen (value);
if (!bson_utf8_validate (value, len, false)) {
return false;
}
mongoc_uri_bson_append_or_replace_key (
&uri->credentials, MONGOC_URI_AUTHMECHANISM, value);
return true;
}
bool
mongoc_uri_get_mechanism_properties (const mongoc_uri_t *uri,
bson_t *properties /* OUT */)
{
bson_iter_t iter;
BSON_ASSERT (uri);
BSON_ASSERT (properties);
if (bson_iter_init_find_case (
&iter, &uri->credentials, MONGOC_URI_AUTHMECHANISMPROPERTIES) &&
BSON_ITER_HOLDS_DOCUMENT (&iter)) {
uint32_t len = 0;
const uint8_t *data = NULL;
bson_iter_document (&iter, &len, &data);
bson_init_static (properties, data, len);
return true;
}
return false;
}
bool
mongoc_uri_set_mechanism_properties (mongoc_uri_t *uri,
const bson_t *properties)
{
bson_iter_t iter;
bson_t tmp = BSON_INITIALIZER;
bool r;
BSON_ASSERT (uri);
BSON_ASSERT (properties);
if (bson_iter_init_find (
&iter, &uri->credentials, MONGOC_URI_AUTHMECHANISMPROPERTIES)) {
/* copy all elements to tmp besides authMechanismProperties */
bson_copy_to_excluding_noinit (&uri->credentials,
&tmp,
MONGOC_URI_AUTHMECHANISMPROPERTIES,
(char *) NULL);
r = BSON_APPEND_DOCUMENT (
&tmp, MONGOC_URI_AUTHMECHANISMPROPERTIES, properties);
if (!r) {
bson_destroy (&tmp);
return false;
}
bson_destroy (&uri->credentials);
bson_copy_to (&tmp, &uri->credentials);
bson_destroy (&tmp);
return true;
} else {
return BSON_APPEND_DOCUMENT (
&uri->credentials, MONGOC_URI_AUTHMECHANISMPROPERTIES, properties);
}
}
static bool
_mongoc_uri_assign_read_prefs_mode (mongoc_uri_t *uri, bson_error_t *error)
{
const char *str;
bson_iter_t iter;
BSON_ASSERT (uri);
if (mongoc_uri_get_option_as_bool (uri, MONGOC_URI_SLAVEOK, false)) {
mongoc_read_prefs_set_mode (uri->read_prefs,
MONGOC_READ_SECONDARY_PREFERRED);
}
if (bson_iter_init_find_case (
&iter, &uri->options, MONGOC_URI_READPREFERENCE) &&
BSON_ITER_HOLDS_UTF8 (&iter)) {
str = bson_iter_utf8 (&iter, NULL);
if (0 == strcasecmp ("primary", str)) {
mongoc_read_prefs_set_mode (uri->read_prefs, MONGOC_READ_PRIMARY);
} else if (0 == strcasecmp ("primarypreferred", str)) {
mongoc_read_prefs_set_mode (uri->read_prefs,
MONGOC_READ_PRIMARY_PREFERRED);
} else if (0 == strcasecmp ("secondary", str)) {
mongoc_read_prefs_set_mode (uri->read_prefs, MONGOC_READ_SECONDARY);
} else if (0 == strcasecmp ("secondarypreferred", str)) {
mongoc_read_prefs_set_mode (uri->read_prefs,
MONGOC_READ_SECONDARY_PREFERRED);
} else if (0 == strcasecmp ("nearest", str)) {
mongoc_read_prefs_set_mode (uri->read_prefs, MONGOC_READ_NEAREST);
} else {
MONGOC_URI_ERROR (
- error,
- "Unsupported readPreference value [readPreference=%s].",
- str);
+ error, "Unsupported readPreference value [readPreference=%s]", str);
return false;
}
}
return true;
}
-static void
-_mongoc_uri_build_write_concern (mongoc_uri_t *uri) /* IN */
+static bool
+_mongoc_uri_build_write_concern (mongoc_uri_t *uri, bson_error_t *error)
{
mongoc_write_concern_t *write_concern;
const char *str;
bson_iter_t iter;
int32_t wtimeoutms;
int value;
BSON_ASSERT (uri);
write_concern = mongoc_write_concern_new ();
+ uri->write_concern = write_concern;
if (bson_iter_init_find_case (&iter, &uri->options, MONGOC_URI_SAFE) &&
BSON_ITER_HOLDS_BOOL (&iter)) {
mongoc_write_concern_set_w (
write_concern,
bson_iter_bool (&iter) ? 1 : MONGOC_WRITE_CONCERN_W_UNACKNOWLEDGED);
}
wtimeoutms = mongoc_uri_get_option_as_int32 (uri, MONGOC_URI_WTIMEOUTMS, 0);
if (bson_iter_init_find_case (&iter, &uri->options, MONGOC_URI_JOURNAL) &&
BSON_ITER_HOLDS_BOOL (&iter)) {
mongoc_write_concern_set_journal (write_concern, bson_iter_bool (&iter));
}
if (bson_iter_init_find_case (&iter, &uri->options, MONGOC_URI_W)) {
if (BSON_ITER_HOLDS_INT32 (&iter)) {
value = bson_iter_int32 (&iter);
switch (value) {
case MONGOC_WRITE_CONCERN_W_ERRORS_IGNORED:
case MONGOC_WRITE_CONCERN_W_UNACKNOWLEDGED:
- /* Warn on conflict, since write concern will be validated later */
if (mongoc_write_concern_get_journal (write_concern)) {
- MONGOC_WARNING ("Journal conflicts with w value [w=%d].", value);
+ MONGOC_URI_ERROR (
+ error, "Journal conflicts with w value [w=%d]", value);
+ return false;
}
mongoc_write_concern_set_w (write_concern, value);
break;
default:
if (value > 0) {
mongoc_write_concern_set_w (write_concern, value);
if (value > 1) {
mongoc_write_concern_set_wtimeout (write_concern, wtimeoutms);
}
break;
}
- MONGOC_WARNING ("Unsupported w value [w=%d].", value);
- break;
+ MONGOC_URI_ERROR (error, "Unsupported w value [w=%d]", value);
+ return false;
}
} else if (BSON_ITER_HOLDS_UTF8 (&iter)) {
str = bson_iter_utf8 (&iter, NULL);
if (0 == strcasecmp ("majority", str)) {
mongoc_write_concern_set_wmajority (write_concern, wtimeoutms);
} else {
mongoc_write_concern_set_wtag (write_concern, str);
mongoc_write_concern_set_wtimeout (write_concern, wtimeoutms);
}
} else {
BSON_ASSERT (false);
+ return false;
}
}
- uri->write_concern = write_concern;
+ return true;
}
/* can't use mongoc_uri_get_option_as_int32, it treats 0 specially */
static int32_t
_mongoc_uri_get_max_staleness_option (const mongoc_uri_t *uri)
{
const bson_t *options;
bson_iter_t iter;
int32_t retval = MONGOC_NO_MAX_STALENESS;
if ((options = mongoc_uri_get_options (uri)) &&
bson_iter_init_find_case (
&iter, options, MONGOC_URI_MAXSTALENESSSECONDS) &&
BSON_ITER_HOLDS_INT32 (&iter)) {
retval = bson_iter_int32 (&iter);
if (retval == 0) {
MONGOC_WARNING (
"Unsupported value for \"" MONGOC_URI_MAXSTALENESSSECONDS
"\": \"%d\"",
retval);
retval = -1;
} else if (retval < 0 && retval != -1) {
MONGOC_WARNING (
"Unsupported value for \"" MONGOC_URI_MAXSTALENESSSECONDS
"\": \"%d\"",
retval);
retval = MONGOC_NO_MAX_STALENESS;
}
}
return retval;
}
mongoc_uri_t *
mongoc_uri_new_with_error (const char *uri_string, bson_error_t *error)
{
mongoc_uri_t *uri;
int32_t max_staleness_seconds;
uri = (mongoc_uri_t *) bson_malloc0 (sizeof *uri);
bson_init (&uri->options);
bson_init (&uri->credentials);
bson_init (&uri->compressors);
/* Initialize read_prefs, since parsing may add to it */
uri->read_prefs = mongoc_read_prefs_new (MONGOC_READ_PRIMARY);
/* Initialize empty read_concern */
uri->read_concern = mongoc_read_concern_new ();
if (!uri_string) {
uri_string = "mongodb://127.0.0.1/";
}
if (!mongoc_uri_parse (uri, uri_string, error)) {
mongoc_uri_destroy (uri);
return NULL;
}
uri->str = bson_strdup (uri_string);
if (!_mongoc_uri_assign_read_prefs_mode (uri, error)) {
mongoc_uri_destroy (uri);
return NULL;
}
max_staleness_seconds = _mongoc_uri_get_max_staleness_option (uri);
mongoc_read_prefs_set_max_staleness_seconds (uri->read_prefs,
max_staleness_seconds);
if (!mongoc_read_prefs_is_valid (uri->read_prefs)) {
mongoc_uri_destroy (uri);
MONGOC_URI_ERROR (error, "%s", "Invalid readPreferences");
return NULL;
}
- _mongoc_uri_build_write_concern (uri);
+ if (!_mongoc_uri_build_write_concern (uri, error)) {
+ mongoc_uri_destroy (uri);
+ return NULL;
+ }
if (!mongoc_write_concern_is_valid (uri->write_concern)) {
mongoc_uri_destroy (uri);
MONGOC_URI_ERROR (error, "%s", "Invalid writeConcern");
return NULL;
}
return uri;
}
mongoc_uri_t *
mongoc_uri_new (const char *uri_string)
{
bson_error_t error = {0};
mongoc_uri_t *uri;
uri = mongoc_uri_new_with_error (uri_string, &error);
if (error.domain) {
MONGOC_WARNING ("Error parsing URI: '%s'", error.message);
}
return uri;
}
mongoc_uri_t *
mongoc_uri_new_for_host_port (const char *hostname, uint16_t port)
{
mongoc_uri_t *uri;
char *str;
BSON_ASSERT (hostname);
BSON_ASSERT (port);
str = bson_strdup_printf ("mongodb://%s:%hu/", hostname, port);
uri = mongoc_uri_new (str);
bson_free (str);
return uri;
}
const char *
mongoc_uri_get_username (const mongoc_uri_t *uri)
{
BSON_ASSERT (uri);
return uri->username;
}
bool
mongoc_uri_set_username (mongoc_uri_t *uri, const char *username)
{
size_t len;
BSON_ASSERT (username);
len = strlen (username);
if (!bson_utf8_validate (username, len, false)) {
return false;
}
if (uri->username) {
bson_free (uri->username);
}
uri->username = bson_strdup (username);
return true;
}
const char *
mongoc_uri_get_password (const mongoc_uri_t *uri)
{
BSON_ASSERT (uri);
return uri->password;
}
bool
mongoc_uri_set_password (mongoc_uri_t *uri, const char *password)
{
size_t len;
BSON_ASSERT (password);
len = strlen (password);
if (!bson_utf8_validate (password, len, false)) {
return false;
}
if (uri->password) {
bson_free (uri->password);
}
uri->password = bson_strdup (password);
return true;
}
const char *
mongoc_uri_get_database (const mongoc_uri_t *uri)
{
BSON_ASSERT (uri);
return uri->database;
}
bool
mongoc_uri_set_database (mongoc_uri_t *uri, const char *database)
{
size_t len;
BSON_ASSERT (database);
len = strlen (database);
if (!bson_utf8_validate (database, len, false)) {
return false;
}
if (uri->database) {
bson_free (uri->database);
}
uri->database = bson_strdup (database);
return true;
}
const char *
mongoc_uri_get_auth_source (const mongoc_uri_t *uri)
{
bson_iter_t iter;
BSON_ASSERT (uri);
if (bson_iter_init_find_case (
&iter, &uri->credentials, MONGOC_URI_AUTHSOURCE)) {
return bson_iter_utf8 (&iter, NULL);
}
return uri->database ? uri->database : "admin";
}
bool
mongoc_uri_set_auth_source (mongoc_uri_t *uri, const char *value)
{
size_t len;
BSON_ASSERT (value);
len = strlen (value);
if (!bson_utf8_validate (value, len, false)) {
return false;
}
mongoc_uri_bson_append_or_replace_key (
&uri->credentials, MONGOC_URI_AUTHSOURCE, value);
return true;
}
const char *
mongoc_uri_get_appname (const mongoc_uri_t *uri)
{
BSON_ASSERT (uri);
return mongoc_uri_get_option_as_utf8 (uri, MONGOC_URI_APPNAME, NULL);
}
bool
mongoc_uri_set_appname (mongoc_uri_t *uri, const char *value)
{
BSON_ASSERT (value);
if (!bson_utf8_validate (value, strlen (value), false)) {
return false;
}
if (!_mongoc_handshake_appname_is_valid (value)) {
return false;
}
mongoc_uri_bson_append_or_replace_key (
&uri->options, MONGOC_URI_APPNAME, value);
return true;
}
bool
mongoc_uri_set_compressors (mongoc_uri_t *uri, const char *value)
{
const char *end_compressor;
char *entry;
bson_destroy (&uri->compressors);
bson_init (&uri->compressors);
if (value && !bson_utf8_validate (value, strlen (value), false)) {
return false;
}
while ((entry = scan_to_unichar (value, ',', "", &end_compressor))) {
if (mongoc_compressor_supported (entry)) {
mongoc_uri_bson_append_or_replace_key (
&uri->compressors, entry, "yes");
} else {
MONGOC_WARNING ("Unsupported compressor: '%s'", entry);
}
value = end_compressor + 1;
bson_free (entry);
}
if (value) {
if (mongoc_compressor_supported (value)) {
mongoc_uri_bson_append_or_replace_key (
&uri->compressors, value, "yes");
} else {
MONGOC_WARNING ("Unsupported compressor: '%s'", value);
}
}
return true;
}
const bson_t *
mongoc_uri_get_compressors (const mongoc_uri_t *uri)
{
BSON_ASSERT (uri);
return &uri->compressors;
}
/* can't use mongoc_uri_get_option_as_int32, it treats 0 specially */
int32_t
mongoc_uri_get_local_threshold_option (const mongoc_uri_t *uri)
{
const bson_t *options;
bson_iter_t iter;
int32_t retval = MONGOC_TOPOLOGY_LOCAL_THRESHOLD_MS;
if ((options = mongoc_uri_get_options (uri)) &&
bson_iter_init_find_case (&iter, options, "localthresholdms") &&
BSON_ITER_HOLDS_INT32 (&iter)) {
retval = bson_iter_int32 (&iter);
if (retval < 0) {
MONGOC_WARNING ("Invalid localThresholdMS: %d", retval);
retval = MONGOC_TOPOLOGY_LOCAL_THRESHOLD_MS;
}
}
return retval;
}
+
+const char *
+mongoc_uri_get_service (const mongoc_uri_t *uri)
+{
+ if (uri->is_srv) {
+ return uri->srv;
+ }
+
+ return NULL;
+}
+
+
const bson_t *
mongoc_uri_get_options (const mongoc_uri_t *uri)
{
BSON_ASSERT (uri);
return &uri->options;
}
void
mongoc_uri_destroy (mongoc_uri_t *uri)
{
if (uri) {
_mongoc_host_list_destroy_all (uri->hosts);
bson_free (uri->str);
bson_free (uri->database);
bson_free (uri->username);
bson_destroy (&uri->options);
bson_destroy (&uri->credentials);
bson_destroy (&uri->compressors);
mongoc_read_prefs_destroy (uri->read_prefs);
mongoc_read_concern_destroy (uri->read_concern);
mongoc_write_concern_destroy (uri->write_concern);
if (uri->password) {
bson_zero_free (uri->password, strlen (uri->password));
}
bson_free (uri);
}
}
mongoc_uri_t *
mongoc_uri_copy (const mongoc_uri_t *uri)
{
mongoc_uri_t *copy;
mongoc_host_list_t *iter;
+ bson_error_t error;
BSON_ASSERT (uri);
copy = (mongoc_uri_t *) bson_malloc0 (sizeof (*copy));
copy->str = bson_strdup (uri->str);
+ copy->is_srv = uri->is_srv;
+ bson_strncpy (copy->srv, uri->srv, sizeof uri->srv);
copy->username = bson_strdup (uri->username);
copy->password = bson_strdup (uri->password);
copy->database = bson_strdup (uri->database);
copy->read_prefs = mongoc_read_prefs_copy (uri->read_prefs);
copy->read_concern = mongoc_read_concern_copy (uri->read_concern);
copy->write_concern = mongoc_write_concern_copy (uri->write_concern);
for (iter = uri->hosts; iter; iter = iter->next) {
- if (!mongoc_uri_append_host (copy, iter->host, iter->port)) {
+ if (!mongoc_uri_append_host (copy, iter->host, iter->port, &error)) {
+ MONGOC_ERROR ("%s", error.message);
mongoc_uri_destroy (copy);
return NULL;
}
}
bson_copy_to (&uri->options, &copy->options);
bson_copy_to (&uri->credentials, &copy->credentials);
bson_copy_to (&uri->compressors, &copy->compressors);
return copy;
}
const char *
mongoc_uri_get_string (const mongoc_uri_t *uri)
{
BSON_ASSERT (uri);
return uri->str;
}
const bson_t *
mongoc_uri_get_read_prefs (const mongoc_uri_t *uri)
{
BSON_ASSERT (uri);
return mongoc_read_prefs_get_tags (uri->read_prefs);
}
/*
*--------------------------------------------------------------------------
*
* mongoc_uri_unescape --
*
* Escapes an UTF-8 encoded string containing URI escaped segments
* such as %20.
*
* It is a programming error to call this function with a string
* that is not UTF-8 encoded!
*
* Returns:
* A newly allocated string that should be freed with bson_free()
* or NULL on failure, such as invalid % encoding.
*
* Side effects:
* None.
*
*--------------------------------------------------------------------------
*/
char *
mongoc_uri_unescape (const char *escaped_string)
{
bson_unichar_t c;
bson_string_t *str;
unsigned int hex = 0;
const char *ptr;
const char *end;
size_t len;
BSON_ASSERT (escaped_string);
len = strlen (escaped_string);
/*
* Double check that this is a UTF-8 valid string. Bail out if necessary.
*/
if (!bson_utf8_validate (escaped_string, len, false)) {
MONGOC_WARNING ("%s(): escaped_string contains invalid UTF-8", BSON_FUNC);
return NULL;
}
ptr = escaped_string;
end = ptr + len;
str = bson_string_new (NULL);
for (; *ptr; ptr = bson_utf8_next_char (ptr)) {
c = bson_utf8_get_char (ptr);
switch (c) {
case '%':
if (((end - ptr) < 2) || !isxdigit (ptr[1]) || !isxdigit (ptr[2]) ||
#ifdef _MSC_VER
(1 != sscanf_s (&ptr[1], "%02x", &hex)) ||
#else
(1 != sscanf (&ptr[1], "%02x", &hex)) ||
#endif
!isprint (hex)) {
bson_string_free (str, true);
MONGOC_WARNING ("Invalid %% escape sequence");
return NULL;
}
bson_string_append_c (str, hex);
ptr += 2;
break;
default:
bson_string_append_unichar (str, c);
break;
}
}
return bson_string_free (str, false);
}
const mongoc_read_prefs_t *
mongoc_uri_get_read_prefs_t (const mongoc_uri_t *uri) /* IN */
{
BSON_ASSERT (uri);
return uri->read_prefs;
}
void
mongoc_uri_set_read_prefs_t (mongoc_uri_t *uri,
const mongoc_read_prefs_t *prefs)
{
BSON_ASSERT (uri);
BSON_ASSERT (prefs);
mongoc_read_prefs_destroy (uri->read_prefs);
uri->read_prefs = mongoc_read_prefs_copy (prefs);
}
const mongoc_read_concern_t *
mongoc_uri_get_read_concern (const mongoc_uri_t *uri) /* IN */
{
BSON_ASSERT (uri);
return uri->read_concern;
}
void
mongoc_uri_set_read_concern (mongoc_uri_t *uri, const mongoc_read_concern_t *rc)
{
BSON_ASSERT (uri);
BSON_ASSERT (rc);
mongoc_read_concern_destroy (uri->read_concern);
uri->read_concern = mongoc_read_concern_copy (rc);
}
const mongoc_write_concern_t *
mongoc_uri_get_write_concern (const mongoc_uri_t *uri) /* IN */
{
BSON_ASSERT (uri);
return uri->write_concern;
}
void
mongoc_uri_set_write_concern (mongoc_uri_t *uri,
const mongoc_write_concern_t *wc)
{
BSON_ASSERT (uri);
BSON_ASSERT (wc);
mongoc_write_concern_destroy (uri->write_concern);
uri->write_concern = mongoc_write_concern_copy (wc);
}
bool
mongoc_uri_get_ssl (const mongoc_uri_t *uri) /* IN */
{
bson_iter_t iter;
BSON_ASSERT (uri);
if (bson_iter_init_find_case (&iter, &uri->options, MONGOC_URI_SSL) &&
BSON_ITER_HOLDS_BOOL (&iter)) {
return bson_iter_bool (&iter);
}
if (bson_has_field (&uri->options, MONGOC_URI_SSLCLIENTCERTIFICATEKEYFILE) ||
bson_has_field (&uri->options, MONGOC_URI_SSLCERTIFICATEAUTHORITYFILE) ||
bson_has_field (&uri->options, MONGOC_URI_SSLALLOWINVALIDCERTIFICATES) ||
bson_has_field (&uri->options, MONGOC_URI_SSLALLOWINVALIDHOSTNAMES)) {
return true;
}
return false;
}
/*
*--------------------------------------------------------------------------
*
* mongoc_uri_get_option_as_int32 --
*
* Checks if the URI 'option' is set and of correct type (int32).
* The special value '0' is considered as "unset".
* This is so users can provide
* sprintf(mongodb://localhost/?option=%d, myvalue) style connection
*strings,
* and still apply default values.
*
* If not set, or set to invalid type, 'fallback' is returned.
*
* NOTE: 'option' is case*in*sensitive.
*
* Returns:
- * The value of 'option' if available as int32 (and not 0), or 'fallback'.
+ * The value of 'option' if available as int32 (and not 0), or
+ *'fallback'.
*
*--------------------------------------------------------------------------
*/
int32_t
mongoc_uri_get_option_as_int32 (const mongoc_uri_t *uri,
const char *option,
int32_t fallback)
{
const bson_t *options;
bson_iter_t iter;
int32_t retval = fallback;
if ((options = mongoc_uri_get_options (uri)) &&
bson_iter_init_find_case (&iter, options, option) &&
BSON_ITER_HOLDS_INT32 (&iter)) {
if (!(retval = bson_iter_int32 (&iter))) {
retval = fallback;
}
}
return retval;
}
/*
*--------------------------------------------------------------------------
*
* mongoc_uri_set_option_as_int32 --
*
* Sets a URI option 'after the fact'. Allows users to set individual
* URI options without passing them as a connection string.
*
* Only allows a set of known options to be set.
* @see mongoc_uri_option_is_int32 ().
*
* Does in-place-update of the option BSON if 'option' is already set.
* Appends the option to the end otherwise.
*
* NOTE: If 'option' is already set, and is of invalid type, this
* function will return false.
*
* NOTE: 'option' is case*in*sensitive.
*
* Returns:
* true on successfully setting the option, false on failure.
*
*--------------------------------------------------------------------------
*/
bool
mongoc_uri_set_option_as_int32 (mongoc_uri_t *uri,
const char *option,
int32_t value)
{
BSON_ASSERT (option);
if (!mongoc_uri_option_is_int32 (option)) {
return false;
}
- /* Server Discovery and Monitoring Spec: "the driver MUST NOT permit users to
- * configure it less than minHeartbeatFrequencyMS (500ms)." */
+ /* Server Discovery and Monitoring Spec: "the driver MUST NOT permit users
+ * to configure it less than minHeartbeatFrequencyMS (500ms)." */
if (!bson_strcasecmp (option, MONGOC_URI_HEARTBEATFREQUENCYMS) &&
value < MONGOC_TOPOLOGY_MIN_HEARTBEAT_FREQUENCY_MS) {
MONGOC_WARNING ("Invalid \"%s\" of %d: must be at least %d",
option,
value,
MONGOC_TOPOLOGY_MIN_HEARTBEAT_FREQUENCY_MS);
return false;
}
/* zlib levels are from -1 (default) through 9 (best compression) */
if (!bson_strcasecmp (option, MONGOC_URI_ZLIBCOMPRESSIONLEVEL) &&
(value < -1 || value > 9)) {
MONGOC_WARNING (
"Invalid \"%s\" of %d: must be between -1 and 9", option, value);
return false;
}
return _mongoc_uri_set_option_as_int32 (uri, option, value);
}
/*
*--------------------------------------------------------------------------
*
* _mongoc_uri_set_option_as_int32 --
*
* Same as mongoc_uri_set_option_as_int32, except the option is not
* validated against valid int32 options
*
* Returns:
* true on successfully setting the option, false on failure.
*
*--------------------------------------------------------------------------
*/
bool
_mongoc_uri_set_option_as_int32 (mongoc_uri_t *uri,
const char *option,
int32_t value)
{
const bson_t *options;
bson_iter_t iter;
if ((options = mongoc_uri_get_options (uri)) &&
bson_iter_init_find_case (&iter, options, option)) {
if (BSON_ITER_HOLDS_INT32 (&iter)) {
bson_iter_overwrite_int32 (&iter, value);
return true;
} else {
return false;
}
}
bson_append_int32 (&uri->options, option, -1, value);
return true;
}
/*
*--------------------------------------------------------------------------
*
* mongoc_uri_get_option_as_bool --
*
* Checks if the URI 'option' is set and of correct type (bool).
*
* If not set, or set to invalid type, 'fallback' is returned.
*
* NOTE: 'option' is case*in*sensitive.
*
* Returns:
* The value of 'option' if available as bool, or 'fallback'.
*
*--------------------------------------------------------------------------
*/
bool
mongoc_uri_get_option_as_bool (const mongoc_uri_t *uri,
const char *option,
bool fallback)
{
const bson_t *options;
bson_iter_t iter;
if ((options = mongoc_uri_get_options (uri)) &&
bson_iter_init_find_case (&iter, options, option) &&
BSON_ITER_HOLDS_BOOL (&iter)) {
return bson_iter_bool (&iter);
}
return fallback;
}
/*
*--------------------------------------------------------------------------
*
* mongoc_uri_set_option_as_bool --
*
* Sets a URI option 'after the fact'. Allows users to set individual
* URI options without passing them as a connection string.
*
* Only allows a set of known options to be set.
* @see mongoc_uri_option_is_bool ().
*
* Does in-place-update of the option BSON if 'option' is already set.
* Appends the option to the end otherwise.
*
* NOTE: If 'option' is already set, and is of invalid type, this
* function will return false.
*
* NOTE: 'option' is case*in*sensitive.
*
* Returns:
* true on successfully setting the option, false on failure.
*
*--------------------------------------------------------------------------
*/
bool
mongoc_uri_set_option_as_bool (mongoc_uri_t *uri,
const char *option,
bool value)
{
const bson_t *options;
bson_iter_t iter;
BSON_ASSERT (option);
if (!mongoc_uri_option_is_bool (option)) {
return false;
}
if ((options = mongoc_uri_get_options (uri)) &&
bson_iter_init_find_case (&iter, options, option)) {
if (BSON_ITER_HOLDS_BOOL (&iter)) {
bson_iter_overwrite_bool (&iter, value);
return true;
} else {
return false;
}
}
bson_append_bool (&uri->options, option, -1, value);
return true;
}
/*
*--------------------------------------------------------------------------
*
* mongoc_uri_get_option_as_utf8 --
*
* Checks if the URI 'option' is set and of correct type (utf8).
*
* If not set, or set to invalid type, 'fallback' is returned.
*
* NOTE: 'option' is case*in*sensitive.
*
* Returns:
* The value of 'option' if available as utf8, or 'fallback'.
*
*--------------------------------------------------------------------------
*/
const char *
mongoc_uri_get_option_as_utf8 (const mongoc_uri_t *uri,
const char *option,
const char *fallback)
{
const bson_t *options;
bson_iter_t iter;
if ((options = mongoc_uri_get_options (uri)) &&
bson_iter_init_find_case (&iter, options, option) &&
BSON_ITER_HOLDS_UTF8 (&iter)) {
return bson_iter_utf8 (&iter, NULL);
}
return fallback;
}
/*
*--------------------------------------------------------------------------
*
* mongoc_uri_set_option_as_utf8 --
*
* Sets a URI option 'after the fact'. Allows users to set individual
* URI options without passing them as a connection string.
*
* Only allows a set of known options to be set.
* @see mongoc_uri_option_is_utf8 ().
*
- * If the option is not already set, this function will append it to the
- *end
- * of the options bson.
- * NOTE: If the option is already set the entire options bson will be
- * overwritten, containing the new option=value (at the same position).
+ * If the option is not already set, this function will append it to
+ *the end of the options bson. NOTE: If the option is already set the entire
+ *options bson will be overwritten, containing the new option=value
+ *(at the same position).
*
* NOTE: If 'option' is already set, and is of invalid type, this
* function will return false.
*
* NOTE: 'option' must be valid utf8.
*
* NOTE: 'option' is case*in*sensitive.
*
* Returns:
* true on successfully setting the option, false on failure.
*
*--------------------------------------------------------------------------
*/
bool
mongoc_uri_set_option_as_utf8 (mongoc_uri_t *uri,
const char *option,
const char *value)
{
size_t len;
BSON_ASSERT (option);
len = strlen (value);
if (!bson_utf8_validate (value, len, false)) {
return false;
}
if (!mongoc_uri_option_is_utf8 (option)) {
return false;
}
if (!bson_strcasecmp (option, MONGOC_URI_APPNAME)) {
return mongoc_uri_set_appname (uri, value);
} else {
mongoc_uri_bson_append_or_replace_key (&uri->options, option, value);
}
return true;
}
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-uri.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-uri.h
similarity index 98%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-uri.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-uri.h
index 3ff5bade..99a0e78c 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-uri.h
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-uri.h
@@ -1,194 +1,197 @@
/*
* Copyright 2013 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MONGOC_URI_H
#define MONGOC_URI_H
#if !defined(MONGOC_INSIDE) && !defined(MONGOC_COMPILATION)
#error "Only <mongoc.h> can be included directly."
#endif
#include <bson.h>
#include "mongoc-macros.h"
#include "mongoc-host-list.h"
#include "mongoc-read-prefs.h"
#include "mongoc-read-concern.h"
#include "mongoc-write-concern.h"
#include "mongoc-config.h"
#ifndef MONGOC_DEFAULT_PORT
#define MONGOC_DEFAULT_PORT 27017
#endif
#define MONGOC_URI_APPNAME "appname"
#define MONGOC_URI_AUTHMECHANISM "authmechanism"
#define MONGOC_URI_AUTHMECHANISMPROPERTIES "authmechanismproperties"
#define MONGOC_URI_AUTHSOURCE "authsource"
#define MONGOC_URI_CANONICALIZEHOSTNAME "canonicalizehostname"
#define MONGOC_URI_CONNECTTIMEOUTMS "connecttimeoutms"
#define MONGOC_URI_COMPRESSORS "compressors"
#define MONGOC_URI_GSSAPISERVICENAME "gssapiservicename"
#define MONGOC_URI_HEARTBEATFREQUENCYMS "heartbeatfrequencyms"
#define MONGOC_URI_JOURNAL "journal"
#define MONGOC_URI_LOCALTHRESHOLDMS "localthresholdms"
#define MONGOC_URI_MAXIDLETIMEMS "maxidletimems"
#define MONGOC_URI_MAXPOOLSIZE "maxpoolsize"
#define MONGOC_URI_MAXSTALENESSSECONDS "maxstalenessseconds"
#define MONGOC_URI_MINPOOLSIZE "minpoolsize"
#define MONGOC_URI_READCONCERNLEVEL "readconcernlevel"
#define MONGOC_URI_READPREFERENCE "readpreference"
#define MONGOC_URI_READPREFERENCETAGS "readpreferencetags"
#define MONGOC_URI_REPLICASET "replicaset"
+#define MONGOC_URI_RETRYWRITES "retrywrites"
#define MONGOC_URI_SAFE "safe"
#define MONGOC_URI_SERVERSELECTIONTIMEOUTMS "serverselectiontimeoutms"
#define MONGOC_URI_SERVERSELECTIONTRYONCE "serverselectiontryonce"
#define MONGOC_URI_SLAVEOK "slaveok"
#define MONGOC_URI_SOCKETCHECKINTERVALMS "socketcheckintervalms"
#define MONGOC_URI_SOCKETTIMEOUTMS "sockettimeoutms"
#define MONGOC_URI_SSL "ssl"
#define MONGOC_URI_SSLCLIENTCERTIFICATEKEYFILE "sslclientcertificatekeyfile"
#define MONGOC_URI_SSLCLIENTCERTIFICATEKEYPASSWORD \
"sslclientcertificatekeypassword"
#define MONGOC_URI_SSLCERTIFICATEAUTHORITYFILE "sslcertificateauthorityfile"
#define MONGOC_URI_SSLALLOWINVALIDCERTIFICATES "sslallowinvalidcertificates"
#define MONGOC_URI_SSLALLOWINVALIDHOSTNAMES "sslallowinvalidhostnames"
#define MONGOC_URI_W "w"
#define MONGOC_URI_WAITQUEUEMULTIPLE "waitqueuemultiple"
#define MONGOC_URI_WAITQUEUETIMEOUTMS "waitqueuetimeoutms"
#define MONGOC_URI_WTIMEOUTMS "wtimeoutms"
#define MONGOC_URI_ZLIBCOMPRESSIONLEVEL "zlibcompressionlevel"
BSON_BEGIN_DECLS
typedef struct _mongoc_uri_t mongoc_uri_t;
MONGOC_EXPORT (mongoc_uri_t *)
mongoc_uri_copy (const mongoc_uri_t *uri);
MONGOC_EXPORT (void)
mongoc_uri_destroy (mongoc_uri_t *uri);
MONGOC_EXPORT (mongoc_uri_t *)
mongoc_uri_new (const char *uri_string) BSON_GNUC_WARN_UNUSED_RESULT;
MONGOC_EXPORT (mongoc_uri_t *)
mongoc_uri_new_with_error (const char *uri_string,
bson_error_t *error) BSON_GNUC_WARN_UNUSED_RESULT;
MONGOC_EXPORT (mongoc_uri_t *)
mongoc_uri_new_for_host_port (const char *hostname,
uint16_t port) BSON_GNUC_WARN_UNUSED_RESULT;
MONGOC_EXPORT (const mongoc_host_list_t *)
mongoc_uri_get_hosts (const mongoc_uri_t *uri);
MONGOC_EXPORT (const char *)
+mongoc_uri_get_service (const mongoc_uri_t *uri);
+MONGOC_EXPORT (const char *)
mongoc_uri_get_database (const mongoc_uri_t *uri);
MONGOC_EXPORT (bool)
mongoc_uri_set_database (mongoc_uri_t *uri, const char *database);
MONGOC_EXPORT (const bson_t *)
mongoc_uri_get_compressors (const mongoc_uri_t *uri);
MONGOC_EXPORT (const bson_t *)
mongoc_uri_get_options (const mongoc_uri_t *uri);
MONGOC_EXPORT (const char *)
mongoc_uri_get_password (const mongoc_uri_t *uri);
MONGOC_EXPORT (bool)
mongoc_uri_set_password (mongoc_uri_t *uri, const char *password);
MONGOC_EXPORT (bool)
mongoc_uri_option_is_int32 (const char *key);
MONGOC_EXPORT (bool)
mongoc_uri_option_is_bool (const char *key);
MONGOC_EXPORT (bool)
mongoc_uri_option_is_utf8 (const char *key);
MONGOC_EXPORT (int32_t)
mongoc_uri_get_option_as_int32 (const mongoc_uri_t *uri,
const char *option,
int32_t fallback);
MONGOC_EXPORT (bool)
mongoc_uri_get_option_as_bool (const mongoc_uri_t *uri,
const char *option,
bool fallback);
MONGOC_EXPORT (const char *)
mongoc_uri_get_option_as_utf8 (const mongoc_uri_t *uri,
const char *option,
const char *fallback);
MONGOC_EXPORT (bool)
mongoc_uri_set_option_as_int32 (mongoc_uri_t *uri,
const char *option,
int32_t value);
MONGOC_EXPORT (bool)
mongoc_uri_set_option_as_bool (mongoc_uri_t *uri,
const char *option,
bool value);
MONGOC_EXPORT (bool)
mongoc_uri_set_option_as_utf8 (mongoc_uri_t *uri,
const char *option,
const char *value);
MONGOC_EXPORT (const bson_t *)
mongoc_uri_get_read_prefs (const mongoc_uri_t *uri)
BSON_GNUC_DEPRECATED_FOR (mongoc_uri_get_read_prefs_t);
MONGOC_EXPORT (const char *)
mongoc_uri_get_replica_set (const mongoc_uri_t *uri);
MONGOC_EXPORT (const char *)
mongoc_uri_get_string (const mongoc_uri_t *uri);
MONGOC_EXPORT (const char *)
mongoc_uri_get_username (const mongoc_uri_t *uri);
MONGOC_EXPORT (bool)
mongoc_uri_set_username (mongoc_uri_t *uri, const char *username);
MONGOC_EXPORT (const bson_t *)
mongoc_uri_get_credentials (const mongoc_uri_t *uri);
MONGOC_EXPORT (const char *)
mongoc_uri_get_auth_source (const mongoc_uri_t *uri);
MONGOC_EXPORT (bool)
mongoc_uri_set_auth_source (mongoc_uri_t *uri, const char *value);
MONGOC_EXPORT (const char *)
mongoc_uri_get_appname (const mongoc_uri_t *uri);
MONGOC_EXPORT (bool)
mongoc_uri_set_appname (mongoc_uri_t *uri, const char *value);
MONGOC_EXPORT (bool)
mongoc_uri_set_compressors (mongoc_uri_t *uri, const char *value);
MONGOC_EXPORT (const char *)
mongoc_uri_get_auth_mechanism (const mongoc_uri_t *uri);
MONGOC_EXPORT (bool)
mongoc_uri_set_auth_mechanism (mongoc_uri_t *uri, const char *value);
MONGOC_EXPORT (bool)
mongoc_uri_get_mechanism_properties (const mongoc_uri_t *uri,
bson_t *properties);
MONGOC_EXPORT (bool)
mongoc_uri_set_mechanism_properties (mongoc_uri_t *uri,
const bson_t *properties);
MONGOC_EXPORT (bool)
mongoc_uri_get_ssl (const mongoc_uri_t *uri);
MONGOC_EXPORT (char *)
mongoc_uri_unescape (const char *escaped_string);
MONGOC_EXPORT (const mongoc_read_prefs_t *)
mongoc_uri_get_read_prefs_t (const mongoc_uri_t *uri);
MONGOC_EXPORT (void)
mongoc_uri_set_read_prefs_t (mongoc_uri_t *uri,
const mongoc_read_prefs_t *prefs);
MONGOC_EXPORT (const mongoc_write_concern_t *)
mongoc_uri_get_write_concern (const mongoc_uri_t *uri);
MONGOC_EXPORT (void)
mongoc_uri_set_write_concern (mongoc_uri_t *uri,
const mongoc_write_concern_t *wc);
MONGOC_EXPORT (const mongoc_read_concern_t *)
mongoc_uri_get_read_concern (const mongoc_uri_t *uri);
MONGOC_EXPORT (void)
mongoc_uri_set_read_concern (mongoc_uri_t *uri,
const mongoc_read_concern_t *rc);
BSON_END_DECLS
#endif /* MONGOC_URI_H */
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-util-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-util-private.h
similarity index 86%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-util-private.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-util-private.h
index ae4e965b..8191e2e2 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-util-private.h
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-util-private.h
@@ -1,123 +1,119 @@
/*
* Copyright 2013 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MONGOC_UTIL_PRIVATE_H
#define MONGOC_UTIL_PRIVATE_H
#if !defined(MONGOC_COMPILATION)
#error "Only <mongoc.h> can be included directly."
#endif
#include <bson.h>
#include "mongoc.h"
#ifdef HAVE_STRINGS_H
#include <strings.h>
#endif
/* string comparison functions for Windows */
#ifdef _WIN32
#define strcasecmp _stricmp
#define strncasecmp _strnicmp
#endif
-/* Suppress CWE-252 ("Unchecked return value") warnings for things we can't deal
- * with */
-#if defined(__GNUC__) && __GNUC__ >= 4
-#define _ignore_value(x) \
- (({ \
- __typeof__(x) __x = (x); \
- (void) __x; \
- }))
-#else
-#define _ignore_value(x) ((void) (x))
-#endif
-
-
-#if BSON_GNUC_CHECK_VERSION (4, 6)
+#if BSON_GNUC_CHECK_VERSION(4, 6)
#define BEGIN_IGNORE_DEPRECATIONS \
_Pragma ("GCC diagnostic push") \
_Pragma ("GCC diagnostic ignored \"-Wdeprecated-declarations\"")
#define END_IGNORE_DEPRECATIONS _Pragma ("GCC diagnostic pop")
#elif defined(__clang__)
#define BEGIN_IGNORE_DEPRECATIONS \
_Pragma ("clang diagnostic push") \
_Pragma ("clang diagnostic ignored \"-Wdeprecated-declarations\"")
#define END_IGNORE_DEPRECATIONS _Pragma ("clang diagnostic pop")
#else
#define BEGIN_IGNORE_DEPRECATIONS
#define END_IGNORE_DEPRECATIONS
#endif
#define COALESCE(x, y) ((x == 0) ? (y) : (x))
/* Helper macros for stringifying things */
#define MONGOC_STR(s) #s
#define MONGOC_EVALUATE_STR(s) MONGOC_STR (s)
BSON_BEGIN_DECLS
int
_mongoc_rand_simple (unsigned int *seed);
char *
_mongoc_hex_md5 (const char *input);
void
_mongoc_usleep (int64_t usec);
const char *
_mongoc_get_command_name (const bson_t *command);
+const char *
+_mongoc_get_documents_field_name (const char *command_name);
+
+bool
+_mongoc_lookup_bool (const bson_t *bson, const char *key, bool default_value);
+
void
_mongoc_get_db_name (const char *ns, char *db /* OUT */);
+void
+_mongoc_bson_init_if_set (bson_t *bson);
+
void
_mongoc_bson_destroy_if_set (bson_t *bson);
+const char *
+_mongoc_bson_type_to_str (bson_type_t t);
+
size_t
_mongoc_strlen_or_zero (const char *s);
bool
_mongoc_get_server_id_from_opts (const bson_t *opts,
mongoc_error_domain_t domain,
mongoc_error_code_t code,
uint32_t *server_id,
bson_error_t *error);
-bool
-_mongoc_validate_legacy_index (const bson_t *doc, bson_error_t *error);
-
bool
_mongoc_validate_new_document (const bson_t *insert, bson_error_t *error);
bool
_mongoc_validate_replace (const bson_t *insert, bson_error_t *error);
bool
_mongoc_validate_update (const bson_t *update, bson_error_t *error);
void
mongoc_lowercase (const char *src, char *buf /* OUT */);
bool
mongoc_parse_port (uint16_t *port, const char *str);
BSON_END_DECLS
#endif /* MONGOC_UTIL_PRIVATE_H */
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-util.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-util.c
similarity index 76%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-util.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-util.c
index 711acbb7..a4e50eac 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-util.c
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-util.c
@@ -1,324 +1,401 @@
/*
* Copyright 2013 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
+#ifdef _WIN32
+#define _CRT_RAND_S
+#endif
#include <string.h>
#include "mongoc-util-private.h"
#include "mongoc-client.h"
#include "mongoc-trace-private.h"
int
_mongoc_rand_simple (unsigned int *seed)
{
#ifdef _WIN32
/* ignore the seed */
unsigned int ret = 0;
errno_t err;
err = rand_s (&ret);
if (0 != err) {
MONGOC_ERROR ("rand_s failed: %");
}
return (int) ret;
#else
return rand_r (seed);
#endif
}
char *
_mongoc_hex_md5 (const char *input)
{
uint8_t digest[16];
bson_md5_t md5;
char digest_str[33];
int i;
bson_md5_init (&md5);
bson_md5_append (&md5, (const uint8_t *) input, (uint32_t) strlen (input));
bson_md5_finish (&md5, digest);
for (i = 0; i < sizeof digest; i++) {
bson_snprintf (&digest_str[i * 2], 3, "%02x", digest[i]);
}
digest_str[sizeof digest_str - 1] = '\0';
return bson_strdup (digest_str);
}
void
_mongoc_usleep (int64_t usec)
{
#ifdef _WIN32
LARGE_INTEGER ft;
HANDLE timer;
BSON_ASSERT (usec >= 0);
ft.QuadPart = -(10 * usec);
timer = CreateWaitableTimer (NULL, true, NULL);
SetWaitableTimer (timer, &ft, 0, NULL, NULL, 0);
WaitForSingleObject (timer, INFINITE);
CloseHandle (timer);
#else
BSON_ASSERT (usec >= 0);
usleep ((useconds_t) usec);
#endif
}
const char *
_mongoc_get_command_name (const bson_t *command)
{
bson_iter_t iter;
const char *name;
bson_iter_t child;
const char *wrapper_name = NULL;
BSON_ASSERT (command);
if (!bson_iter_init (&iter, command) || !bson_iter_next (&iter)) {
return NULL;
}
name = bson_iter_key (&iter);
/* wrapped in "$query" or "query"?
*
* {$query: {count: "collection"}, $readPreference: {...}}
*/
if (name[0] == '$') {
wrapper_name = "$query";
} else if (!strcmp (name, "query")) {
wrapper_name = "query";
}
if (wrapper_name && bson_iter_init_find (&iter, command, wrapper_name) &&
BSON_ITER_HOLDS_DOCUMENT (&iter) && bson_iter_recurse (&iter, &child) &&
bson_iter_next (&child)) {
name = bson_iter_key (&child);
}
return name;
}
+const char *
+_mongoc_get_documents_field_name (const char *command_name)
+{
+ if (!strcmp (command_name, "insert")) {
+ return "documents";
+ }
+
+ if (!strcmp (command_name, "update")) {
+ return "updates";
+ }
+
+ if (!strcmp (command_name, "delete")) {
+ return "deletes";
+ }
+
+ return NULL;
+}
+
+bool
+_mongoc_lookup_bool (const bson_t *bson, const char *key, bool default_value)
+{
+ bson_iter_t iter;
+ bson_iter_t child;
+
+ if (!bson) {
+ return default_value;
+ }
+
+ BSON_ASSERT (bson_iter_init (&iter, bson));
+ if (!bson_iter_find_descendant (&iter, key, &child)) {
+ return default_value;
+ }
+
+ return bson_iter_as_bool (&child);
+}
+
void
_mongoc_get_db_name (const char *ns, char *db /* OUT */)
{
size_t dblen;
const char *dot;
BSON_ASSERT (ns);
dot = strstr (ns, ".");
if (dot) {
dblen = BSON_MIN (dot - ns + 1, MONGOC_NAMESPACE_MAX);
bson_strncpy (db, ns, dblen);
} else {
bson_strncpy (db, ns, MONGOC_NAMESPACE_MAX);
}
}
+void
+_mongoc_bson_init_if_set (bson_t *bson)
+{
+ if (bson) {
+ bson_init (bson);
+ }
+}
+
+const char *
+_mongoc_bson_type_to_str (bson_type_t t)
+{
+ switch (t) {
+ case BSON_TYPE_EOD:
+ return "EOD";
+ case BSON_TYPE_DOUBLE:
+ return "DOUBLE";
+ case BSON_TYPE_UTF8:
+ return "UTF8";
+ case BSON_TYPE_DOCUMENT:
+ return "DOCUMENT";
+ case BSON_TYPE_ARRAY:
+ return "ARRAY";
+ case BSON_TYPE_BINARY:
+ return "BINARY";
+ case BSON_TYPE_UNDEFINED:
+ return "UNDEFINED";
+ case BSON_TYPE_OID:
+ return "OID";
+ case BSON_TYPE_BOOL:
+ return "BOOL";
+ case BSON_TYPE_DATE_TIME:
+ return "DATE_TIME";
+ case BSON_TYPE_NULL:
+ return "NULL";
+ case BSON_TYPE_REGEX:
+ return "REGEX";
+ case BSON_TYPE_DBPOINTER:
+ return "DBPOINTER";
+ case BSON_TYPE_CODE:
+ return "CODE";
+ case BSON_TYPE_SYMBOL:
+ return "SYMBOL";
+ case BSON_TYPE_CODEWSCOPE:
+ return "CODEWSCOPE";
+ case BSON_TYPE_INT32:
+ return "INT32";
+ case BSON_TYPE_TIMESTAMP:
+ return "TIMESTAMP";
+ case BSON_TYPE_INT64:
+ return "INT64";
+ case BSON_TYPE_MAXKEY:
+ return "MAXKEY";
+ case BSON_TYPE_MINKEY:
+ return "MINKEY";
+ case BSON_TYPE_DECIMAL128:
+ return "DECIMAL128";
+ default:
+ return "Unknown";
+ }
+}
+
void
_mongoc_bson_destroy_if_set (bson_t *bson)
{
if (bson) {
bson_destroy (bson);
}
}
size_t
_mongoc_strlen_or_zero (const char *s)
{
return s ? strlen (s) : 0;
}
/* Get "serverId" from opts. Sets *server_id to the serverId from "opts" or 0
* if absent. On error, fills out *error with domain and code and return false.
*/
bool
_mongoc_get_server_id_from_opts (const bson_t *opts,
mongoc_error_domain_t domain,
mongoc_error_code_t code,
uint32_t *server_id,
bson_error_t *error)
{
bson_iter_t iter;
ENTRY;
BSON_ASSERT (server_id);
*server_id = 0;
if (!opts || !bson_iter_init_find (&iter, opts, "serverId")) {
RETURN (true);
}
if (!BSON_ITER_HOLDS_INT (&iter)) {
bson_set_error (
error, domain, code, "The serverId option must be an integer");
RETURN (false);
}
if (bson_iter_as_int64 (&iter) <= 0) {
bson_set_error (error, domain, code, "The serverId option must be >= 1");
RETURN (false);
}
*server_id = (uint32_t) bson_iter_as_int64 (&iter);
RETURN (true);
}
-bool
-_mongoc_validate_legacy_index (const bson_t *doc, bson_error_t *error)
-{
- bson_error_t validate_err;
-
- /* insert into system.indexes on pre-2.6 MongoDB, allow "." in keys */
- if (!bson_validate_with_error (doc,
- BSON_VALIDATE_UTF8 |
- BSON_VALIDATE_EMPTY_KEYS |
- BSON_VALIDATE_DOLLAR_KEYS,
- &validate_err)) {
- bson_set_error (error,
- MONGOC_ERROR_COMMAND,
- MONGOC_ERROR_COMMAND_INVALID_ARG,
- "legacy index document contains invalid key: %s",
- validate_err.message);
- return false;
- }
-
- return true;
-}
-
-
const bson_validate_flags_t insert_vflags =
(bson_validate_flags_t) BSON_VALIDATE_UTF8 | BSON_VALIDATE_UTF8_ALLOW_NULL |
BSON_VALIDATE_EMPTY_KEYS | BSON_VALIDATE_DOT_KEYS |
BSON_VALIDATE_DOLLAR_KEYS;
bool
_mongoc_validate_new_document (const bson_t *doc, bson_error_t *error)
{
bson_error_t validate_err;
if (!bson_validate_with_error (doc, insert_vflags, &validate_err)) {
bson_set_error (error,
MONGOC_ERROR_COMMAND,
MONGOC_ERROR_COMMAND_INVALID_ARG,
- "document to insert contains invalid key: %s",
+ "invalid document for insert: %s",
validate_err.message);
return false;
}
return true;
}
bool
_mongoc_validate_replace (const bson_t *doc, bson_error_t *error)
{
bson_error_t validate_err;
if (!bson_validate_with_error (doc, insert_vflags, &validate_err)) {
bson_set_error (error,
MONGOC_ERROR_COMMAND,
MONGOC_ERROR_COMMAND_INVALID_ARG,
- "replacement document contains invalid key: %s",
+ "invalid argument for replace: %s",
validate_err.message);
return false;
}
return true;
}
bool
_mongoc_validate_update (const bson_t *update, bson_error_t *error)
{
bson_error_t validate_err;
bson_iter_t iter;
const char *key;
int vflags = BSON_VALIDATE_UTF8 | BSON_VALIDATE_UTF8_ALLOW_NULL |
BSON_VALIDATE_EMPTY_KEYS;
if (!bson_validate_with_error (
update, (bson_validate_flags_t) vflags, &validate_err)) {
bson_set_error (error,
MONGOC_ERROR_COMMAND,
MONGOC_ERROR_COMMAND_INVALID_ARG,
- "update document contains invalid key: %s",
+ "invalid argument for update: %s",
validate_err.message);
return false;
}
if (!bson_iter_init (&iter, update)) {
bson_set_error (error,
MONGOC_ERROR_BSON,
MONGOC_ERROR_BSON_INVALID,
"update document is corrupt");
return false;
}
while (bson_iter_next (&iter)) {
key = bson_iter_key (&iter);
if (key[0] != '$') {
bson_set_error (error,
MONGOC_ERROR_COMMAND,
MONGOC_ERROR_COMMAND_INVALID_ARG,
"Invalid key '%s': update only works with $ operators",
key);
return false;
}
}
return true;
}
void
mongoc_lowercase (const char *src, char *buf /* OUT */)
{
for (; *src; ++src, ++buf) {
*buf = tolower (*src);
}
}
bool
mongoc_parse_port (uint16_t *port, const char *str)
{
unsigned long ul_port;
ul_port = strtoul (str, NULL, 10);
if (ul_port == 0 || ul_port > UINT16_MAX) {
/* Parse error or port number out of range. mongod prohibits port 0. */
return false;
}
*port = (uint16_t) ul_port;
return true;
}
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-version-functions.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-version-functions.c
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-version-functions.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-version-functions.c
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-version-functions.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-version-functions.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-version-functions.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-version-functions.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-version.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-version.h
similarity index 94%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-version.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-version.h
index 917f4787..286aeedb 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-version.h
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-version.h
@@ -1,102 +1,102 @@
/*
* Copyright 2013 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#if !defined (MONGOC_INSIDE) && !defined (MONGOC_COMPILATION)
#error "Only <mongoc.h> can be included directly."
#endif
#ifndef MONGOC_VERSION_H
#define MONGOC_VERSION_H
/**
* MONGOC_MAJOR_VERSION:
*
* MONGOC major version component (e.g. 1 if %MONGOC_VERSION is 1.2.3)
*/
#define MONGOC_MAJOR_VERSION (1)
/**
* MONGOC_MINOR_VERSION:
*
* MONGOC minor version component (e.g. 2 if %MONGOC_VERSION is 1.2.3)
*/
-#define MONGOC_MINOR_VERSION (8)
+#define MONGOC_MINOR_VERSION (9)
/**
* MONGOC_MICRO_VERSION:
*
* MONGOC micro version component (e.g. 3 if %MONGOC_VERSION is 1.2.3)
*/
-#define MONGOC_MICRO_VERSION (2)
+#define MONGOC_MICRO_VERSION (3)
/**
* MONGOC_PRERELEASE_VERSION:
*
* MONGOC prerelease version component (e.g. rc0 if %MONGOC_VERSION is 1.2.3-rc0)
*/
#define MONGOC_PRERELEASE_VERSION ()
/**
* MONGOC_VERSION:
*
* MONGOC version.
*/
-#define MONGOC_VERSION (1.8.2)
+#define MONGOC_VERSION (1.9.3)
/**
* MONGOC_VERSION_S:
*
* MONGOC version, encoded as a string, useful for printing and
* concatenation.
*/
-#define MONGOC_VERSION_S "1.8.2"
+#define MONGOC_VERSION_S "1.9.3"
/**
* MONGOC_VERSION_HEX:
*
* MONGOC version, encoded as an hexadecimal number, useful for
* integer comparisons.
*/
#define MONGOC_VERSION_HEX (MONGOC_MAJOR_VERSION << 24 | \
MONGOC_MINOR_VERSION << 16 | \
MONGOC_MICRO_VERSION << 8)
/**
* MONGOC_CHECK_VERSION:
* @major: required major version
* @minor: required minor version
* @micro: required micro version
*
* Compile-time version checking. Evaluates to %TRUE if the version
* of MONGOC is greater than the required one.
*/
#define MONGOC_CHECK_VERSION(major,minor,micro) \
(MONGOC_MAJOR_VERSION > (major) || \
(MONGOC_MAJOR_VERSION == (major) && MONGOC_MINOR_VERSION > (minor)) || \
(MONGOC_MAJOR_VERSION == (major) && MONGOC_MINOR_VERSION == (minor) && \
MONGOC_MICRO_VERSION >= (micro)))
#endif /* MONGOC_VERSION_H */
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-version.h.in b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-version.h.in
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-version.h.in
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-version.h.in
diff --git a/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-write-command-legacy-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-write-command-legacy-private.h
new file mode 100644
index 00000000..224b27de
--- /dev/null
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-write-command-legacy-private.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2014-present MongoDB, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#ifndef MONGOC_WRITE_COMMAND_LEGACY_PRIVATE_H
+#define MONGOC_WRITE_COMMAND_LEGACY_PRIVATE_H
+
+#if !defined(MONGOC_COMPILATION)
+#error "Only <mongoc.h> can be included directly."
+#endif
+
+#include <bson.h>
+#include "mongoc-client-private.h"
+#include "mongoc-write-command-private.h"
+
+BSON_BEGIN_DECLS
+
+void
+_mongoc_write_command_insert_legacy (mongoc_write_command_t *command,
+ mongoc_client_t *client,
+ mongoc_server_stream_t *server_stream,
+ const char *database,
+ const char *collection,
+ uint32_t offset,
+ mongoc_write_result_t *result,
+ bson_error_t *error);
+void
+_mongoc_write_command_update_legacy (mongoc_write_command_t *command,
+ mongoc_client_t *client,
+ mongoc_server_stream_t *server_stream,
+ const char *database,
+ const char *collection,
+ uint32_t offset,
+ mongoc_write_result_t *result,
+ bson_error_t *error);
+void
+_mongoc_write_command_delete_legacy (mongoc_write_command_t *command,
+ mongoc_client_t *client,
+ mongoc_server_stream_t *server_stream,
+ const char *database,
+ const char *collection,
+ uint32_t offset,
+ mongoc_write_result_t *result,
+ bson_error_t *error);
+BSON_END_DECLS
+
+
+#endif /* MONGOC_WRITE_COMMAND_LEGACY_PRIVATE_H */
diff --git a/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-write-command-legacy.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-write-command-legacy.c
new file mode 100644
index 00000000..e8b39c6d
--- /dev/null
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-write-command-legacy.c
@@ -0,0 +1,518 @@
+/*
+ * Copyright 2014-present MongoDB, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <bson.h>
+
+#include "mongoc-write-command-legacy-private.h"
+#include "mongoc-trace-private.h"
+#include "mongoc-util-private.h"
+
+static void
+_mongoc_monitor_legacy_write (mongoc_client_t *client,
+ mongoc_write_command_t *command,
+ const char *db,
+ const char *collection,
+ mongoc_server_stream_t *stream,
+ int64_t request_id)
+{
+ bson_t doc;
+ mongoc_apm_command_started_t event;
+ mongoc_write_concern_t *wc;
+
+ ENTRY;
+
+ if (!client->apm_callbacks.started) {
+ EXIT;
+ }
+ wc = mongoc_write_concern_new ();
+ mongoc_write_concern_set_w (wc, 0);
+
+ bson_init (&doc);
+ _mongoc_write_command_init (&doc, command, collection, wc);
+ _append_array_from_command (command, &doc);
+
+ mongoc_apm_command_started_init (
+ &event,
+ &doc,
+ db,
+ _mongoc_command_type_to_name (command->type),
+ request_id,
+ command->operation_id,
+ &stream->sd->host,
+ stream->sd->id,
+ client->apm_context);
+
+ client->apm_callbacks.started (&event);
+
+ mongoc_apm_command_started_cleanup (&event);
+ mongoc_write_concern_destroy (wc);
+ bson_destroy (&doc);
+}
+
+
+/* fire command-succeeded event as if we'd used a modern write command.
+ * note, cluster.request_id was incremented once for the write, again
+ * for the getLastError, so cluster.request_id is no longer valid; used the
+ * passed-in request_id instead.
+ */
+static void
+_mongoc_monitor_legacy_write_succeeded (mongoc_client_t *client,
+ int64_t duration,
+ mongoc_write_command_t *command,
+ mongoc_server_stream_t *stream,
+ int64_t request_id)
+{
+ bson_t doc;
+
+ mongoc_apm_command_succeeded_t event;
+
+ ENTRY;
+
+ if (!client->apm_callbacks.succeeded) {
+ EXIT;
+ }
+
+ bson_init (&doc);
+ /*
+ * Unacknowledged writes must provide a CommandSucceededEvent with a { ok: 1
+ * } reply.
+ * https://github.com/mongodb/specifications/blob/master/source/command-monitoring/command-monitoring.rst#unacknowledged-acknowledged-writes
+ */
+ bson_append_int32 (&doc, "ok", 2, 1);
+ bson_append_int32 (&doc, "n", 1, (int32_t) command->n_documents);
+
+ mongoc_apm_command_succeeded_init (
+ &event,
+ duration,
+ &doc,
+ _mongoc_command_type_to_name (command->type),
+ request_id,
+ command->operation_id,
+ &stream->sd->host,
+ stream->sd->id,
+ client->apm_context);
+
+ client->apm_callbacks.succeeded (&event);
+
+ mongoc_apm_command_succeeded_cleanup (&event);
+ bson_destroy (&doc);
+
+ EXIT;
+}
+
+
+void
+_mongoc_write_command_delete_legacy (mongoc_write_command_t *command,
+ mongoc_client_t *client,
+ mongoc_server_stream_t *server_stream,
+ const char *database,
+ const char *collection,
+ uint32_t offset,
+ mongoc_write_result_t *result,
+ bson_error_t *error)
+{
+ int64_t started;
+ int32_t max_bson_obj_size;
+ const uint8_t *data;
+ mongoc_rpc_t rpc;
+ uint32_t request_id;
+ bson_iter_t q_iter;
+ uint32_t len;
+ int64_t limit = 0;
+ char ns[MONGOC_NAMESPACE_MAX + 1];
+ bool r;
+ bson_reader_t *reader;
+ const bson_t *bson;
+ bool eof;
+
+ ENTRY;
+
+ BSON_ASSERT (command);
+ BSON_ASSERT (client);
+ BSON_ASSERT (database);
+ BSON_ASSERT (server_stream);
+ BSON_ASSERT (collection);
+
+ started = bson_get_monotonic_time ();
+
+ max_bson_obj_size = mongoc_server_stream_max_bson_obj_size (server_stream);
+
+ if (!command->n_documents) {
+ bson_set_error (error,
+ MONGOC_ERROR_COLLECTION,
+ MONGOC_ERROR_COLLECTION_DELETE_FAILED,
+ "Cannot do an empty delete.");
+ result->failed = true;
+ EXIT;
+ }
+
+ bson_snprintf (ns, sizeof ns, "%s.%s", database, collection);
+
+ reader =
+ bson_reader_new_from_data (command->payload.data, command->payload.len);
+ while ((bson = bson_reader_read (reader, &eof))) {
+ /* the document is like { "q": { <selector> }, limit: <0 or 1> } */
+ r = (bson_iter_init (&q_iter, bson) && bson_iter_find (&q_iter, "q") &&
+ BSON_ITER_HOLDS_DOCUMENT (&q_iter));
+
+ BSON_ASSERT (r);
+ bson_iter_document (&q_iter, &len, &data);
+ BSON_ASSERT (data);
+ BSON_ASSERT (len >= 5);
+ if (len > max_bson_obj_size) {
+ _mongoc_write_command_too_large_error (
+ error, 0, len, max_bson_obj_size);
+ result->failed = true;
+ bson_reader_destroy (reader);
+ EXIT;
+ }
+
+ request_id = ++client->cluster.request_id;
+
+ rpc.header.msg_len = 0;
+ rpc.header.request_id = request_id;
+ rpc.header.response_to = 0;
+ rpc.header.opcode = MONGOC_OPCODE_DELETE;
+ rpc.delete_.zero = 0;
+ rpc.delete_.collection = ns;
+
+ if (bson_iter_find (&q_iter, "limit") &&
+ (BSON_ITER_HOLDS_INT (&q_iter))) {
+ limit = bson_iter_as_int64 (&q_iter);
+ }
+
+ rpc.delete_.flags =
+ limit ? MONGOC_DELETE_SINGLE_REMOVE : MONGOC_DELETE_NONE;
+ rpc.delete_.selector = data;
+
+ _mongoc_monitor_legacy_write (
+ client, command, database, collection, server_stream, request_id);
+
+ if (!mongoc_cluster_legacy_rpc_sendv_to_server (
+ &client->cluster, &rpc, server_stream, error)) {
+ result->failed = true;
+ bson_reader_destroy (reader);
+ EXIT;
+ }
+
+ _mongoc_monitor_legacy_write_succeeded (client,
+ bson_get_monotonic_time () -
+ started,
+ command,
+ server_stream,
+ request_id);
+
+ started = bson_get_monotonic_time ();
+ }
+ bson_reader_destroy (reader);
+
+ EXIT;
+}
+
+
+void
+_mongoc_write_command_insert_legacy (mongoc_write_command_t *command,
+ mongoc_client_t *client,
+ mongoc_server_stream_t *server_stream,
+ const char *database,
+ const char *collection,
+ uint32_t offset,
+ mongoc_write_result_t *result,
+ bson_error_t *error)
+{
+ int64_t started;
+ mongoc_iovec_t *iov;
+ mongoc_rpc_t rpc;
+ uint32_t size = 0;
+ bool has_more;
+ char ns[MONGOC_NAMESPACE_MAX + 1];
+ uint32_t n_docs_in_batch;
+ uint32_t request_id = 0;
+ uint32_t idx = 0;
+ int32_t max_msg_size;
+ int32_t max_bson_obj_size;
+ bool singly;
+ bson_reader_t *reader;
+ const bson_t *bson;
+ bool eof;
+ int data_offset = 0;
+
+ ENTRY;
+
+ BSON_ASSERT (command);
+ BSON_ASSERT (client);
+ BSON_ASSERT (database);
+ BSON_ASSERT (server_stream);
+ BSON_ASSERT (collection);
+ BSON_ASSERT (command->type == MONGOC_WRITE_COMMAND_INSERT);
+
+ started = bson_get_monotonic_time ();
+
+ max_bson_obj_size = mongoc_server_stream_max_bson_obj_size (server_stream);
+ max_msg_size = mongoc_server_stream_max_msg_size (server_stream);
+
+ singly = !command->u.insert.allow_bulk_op_insert;
+
+ if (!command->n_documents) {
+ bson_set_error (error,
+ MONGOC_ERROR_COLLECTION,
+ MONGOC_ERROR_COLLECTION_INSERT_FAILED,
+ "Cannot do an empty insert.");
+ result->failed = true;
+ EXIT;
+ }
+
+ bson_snprintf (ns, sizeof ns, "%s.%s", database, collection);
+
+ iov = (mongoc_iovec_t *) bson_malloc ((sizeof *iov) * command->n_documents);
+
+again:
+ has_more = false;
+ n_docs_in_batch = 0;
+ size = (uint32_t) (sizeof (mongoc_rpc_header_t) + 4 + strlen (database) + 1 +
+ strlen (collection) + 1);
+
+ reader = bson_reader_new_from_data (command->payload.data + data_offset,
+ command->payload.len - data_offset);
+ while ((bson = bson_reader_read (reader, &eof))) {
+ BSON_ASSERT (n_docs_in_batch <= idx);
+ BSON_ASSERT (idx <= command->n_documents);
+
+ if (bson->len > max_bson_obj_size) {
+ /* document is too large */
+ _mongoc_write_command_too_large_error (
+ error, idx, bson->len, max_bson_obj_size);
+
+ data_offset += bson->len;
+
+ if (command->flags.ordered) {
+ /* send the batch so far (if any) and return the error */
+ break;
+ }
+ } else if ((n_docs_in_batch == 1 && singly) ||
+ size > (max_msg_size - bson->len)) {
+ /* batch is full, send it and then start the next batch */
+ has_more = true;
+ break;
+ } else {
+ /* add document to batch and continue building the batch */
+ iov[n_docs_in_batch].iov_base = (void *) bson_get_data (bson);
+ iov[n_docs_in_batch].iov_len = bson->len;
+ size += bson->len;
+ n_docs_in_batch++;
+ data_offset += bson->len;
+ }
+
+ idx++;
+ }
+ bson_reader_destroy (reader);
+
+ if (n_docs_in_batch) {
+ request_id = ++client->cluster.request_id;
+
+ rpc.header.msg_len = 0;
+ rpc.header.request_id = request_id;
+ rpc.header.response_to = 0;
+ rpc.header.opcode = MONGOC_OPCODE_INSERT;
+ rpc.insert.flags =
+ ((command->flags.ordered) ? MONGOC_INSERT_NONE
+ : MONGOC_INSERT_CONTINUE_ON_ERROR);
+ rpc.insert.collection = ns;
+ rpc.insert.documents = iov;
+ rpc.insert.n_documents = n_docs_in_batch;
+
+ _mongoc_monitor_legacy_write (
+ client, command, database, collection, server_stream, request_id);
+
+ if (!mongoc_cluster_legacy_rpc_sendv_to_server (
+ &client->cluster, &rpc, server_stream, error)) {
+ result->failed = true;
+ GOTO (cleanup);
+ }
+
+ _mongoc_monitor_legacy_write_succeeded (client,
+ bson_get_monotonic_time () -
+ started,
+ command,
+ server_stream,
+ request_id);
+
+ started = bson_get_monotonic_time ();
+ }
+
+cleanup:
+
+ if (has_more) {
+ GOTO (again);
+ }
+
+ bson_free (iov);
+
+ EXIT;
+}
+
+
+void
+_mongoc_write_command_update_legacy (mongoc_write_command_t *command,
+ mongoc_client_t *client,
+ mongoc_server_stream_t *server_stream,
+ const char *database,
+ const char *collection,
+ uint32_t offset,
+ mongoc_write_result_t *result,
+ bson_error_t *error)
+{
+ int64_t started;
+ int32_t max_bson_obj_size;
+ mongoc_rpc_t rpc;
+ uint32_t request_id = 0;
+ bson_iter_t subiter, subsubiter;
+ bson_t doc;
+ bson_t update, selector;
+ const uint8_t *data = NULL;
+ uint32_t len = 0;
+ size_t err_offset;
+ bool val = false;
+ char ns[MONGOC_NAMESPACE_MAX + 1];
+ int vflags = (BSON_VALIDATE_UTF8 | BSON_VALIDATE_UTF8_ALLOW_NULL |
+ BSON_VALIDATE_DOLLAR_KEYS | BSON_VALIDATE_DOT_KEYS);
+ bson_reader_t *reader;
+ const bson_t *bson;
+ bool eof;
+
+ ENTRY;
+
+ BSON_ASSERT (command);
+ BSON_ASSERT (client);
+ BSON_ASSERT (database);
+ BSON_ASSERT (server_stream);
+ BSON_ASSERT (collection);
+
+ started = bson_get_monotonic_time ();
+
+ max_bson_obj_size = mongoc_server_stream_max_bson_obj_size (server_stream);
+
+ reader =
+ bson_reader_new_from_data (command->payload.data, command->payload.len);
+ while ((bson = bson_reader_read (reader, &eof))) {
+ if (bson_iter_init (&subiter, bson) && bson_iter_find (&subiter, "u") &&
+ BSON_ITER_HOLDS_DOCUMENT (&subiter)) {
+ bson_iter_document (&subiter, &len, &data);
+ bson_init_static (&doc, data, len);
+
+ if (bson_iter_init (&subsubiter, &doc) &&
+ bson_iter_next (&subsubiter) &&
+ (bson_iter_key (&subsubiter)[0] != '$') &&
+ !bson_validate (
+ &doc, (bson_validate_flags_t) vflags, &err_offset)) {
+ result->failed = true;
+ bson_set_error (error,
+ MONGOC_ERROR_BSON,
+ MONGOC_ERROR_BSON_INVALID,
+ "update document is corrupt or contains "
+ "invalid keys including $ or .");
+ bson_reader_destroy (reader);
+ EXIT;
+ }
+ } else {
+ result->failed = true;
+ bson_set_error (error,
+ MONGOC_ERROR_BSON,
+ MONGOC_ERROR_BSON_INVALID,
+ "updates is malformed.");
+ bson_reader_destroy (reader);
+ EXIT;
+ }
+ }
+
+ bson_snprintf (ns, sizeof ns, "%s.%s", database, collection);
+
+ bson_reader_destroy (reader);
+ reader =
+ bson_reader_new_from_data (command->payload.data, command->payload.len);
+ while ((bson = bson_reader_read (reader, &eof))) {
+ request_id = ++client->cluster.request_id;
+
+ rpc.header.msg_len = 0;
+ rpc.header.request_id = request_id;
+ rpc.header.response_to = 0;
+ rpc.header.opcode = MONGOC_OPCODE_UPDATE;
+ rpc.update.zero = 0;
+ rpc.update.collection = ns;
+ rpc.update.flags = MONGOC_UPDATE_NONE;
+
+ bson_iter_init (&subiter, bson);
+ while (bson_iter_next (&subiter)) {
+ if (strcmp (bson_iter_key (&subiter), "u") == 0) {
+ bson_iter_document (&subiter, &len, &data);
+ if (len > max_bson_obj_size) {
+ _mongoc_write_command_too_large_error (
+ error, 0, len, max_bson_obj_size);
+ result->failed = true;
+ bson_reader_destroy (reader);
+ EXIT;
+ }
+
+ rpc.update.update = data;
+ bson_init_static (&update, data, len);
+ } else if (strcmp (bson_iter_key (&subiter), "q") == 0) {
+ bson_iter_document (&subiter, &len, &data);
+ if (len > max_bson_obj_size) {
+ _mongoc_write_command_too_large_error (
+ error, 0, len, max_bson_obj_size);
+ result->failed = true;
+ bson_reader_destroy (reader);
+ EXIT;
+ }
+
+ rpc.update.selector = data;
+ bson_init_static (&selector, data, len);
+ } else if (strcmp (bson_iter_key (&subiter), "multi") == 0) {
+ val = bson_iter_bool (&subiter);
+ if (val) {
+ rpc.update.flags = (mongoc_update_flags_t) (
+ rpc.update.flags | MONGOC_UPDATE_MULTI_UPDATE);
+ }
+ } else if (strcmp (bson_iter_key (&subiter), "upsert") == 0) {
+ val = bson_iter_bool (&subiter);
+ if (val) {
+ rpc.update.flags = (mongoc_update_flags_t) (
+ rpc.update.flags | MONGOC_UPDATE_UPSERT);
+ }
+ }
+ }
+
+ _mongoc_monitor_legacy_write (
+ client, command, database, collection, server_stream, request_id);
+
+ if (!mongoc_cluster_legacy_rpc_sendv_to_server (
+ &client->cluster, &rpc, server_stream, error)) {
+ result->failed = true;
+ bson_reader_destroy (reader);
+ EXIT;
+ }
+
+ _mongoc_monitor_legacy_write_succeeded (client,
+ bson_get_monotonic_time () -
+ started,
+ command,
+ server_stream,
+ request_id);
+
+ started = bson_get_monotonic_time ();
+ }
+ bson_reader_destroy (reader);
+}
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-write-command-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-write-command-private.h
similarity index 75%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-write-command-private.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-write-command-private.h
index b96dae4f..86801eb2 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-write-command-private.h
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-write-command-private.h
@@ -1,161 +1,188 @@
/*
* Copyright 2014 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MONGOC_WRITE_COMMAND_PRIVATE_H
#define MONGOC_WRITE_COMMAND_PRIVATE_H
#if !defined(MONGOC_COMPILATION)
#error "Only <mongoc.h> can be included directly."
#endif
#include <bson.h>
#include "mongoc-client.h"
#include "mongoc-error.h"
#include "mongoc-write-concern.h"
#include "mongoc-server-stream-private.h"
+#include "mongoc-buffer-private.h"
BSON_BEGIN_DECLS
#define MONGOC_WRITE_COMMAND_DELETE 0
#define MONGOC_WRITE_COMMAND_INSERT 1
#define MONGOC_WRITE_COMMAND_UPDATE 2
typedef enum {
MONGOC_BYPASS_DOCUMENT_VALIDATION_FALSE = 0,
MONGOC_BYPASS_DOCUMENT_VALIDATION_TRUE = 1 << 0,
MONGOC_BYPASS_DOCUMENT_VALIDATION_DEFAULT = 1 << 1,
} mongoc_write_bypass_document_validation_t;
struct _mongoc_bulk_write_flags_t {
bool ordered;
mongoc_write_bypass_document_validation_t bypass_document_validation;
bool has_collation;
+ bool has_multi_write;
};
typedef struct {
int type;
- bson_t *documents;
+ mongoc_buffer_t payload;
uint32_t n_documents;
mongoc_bulk_write_flags_t flags;
int64_t operation_id;
+ bson_t cmd_opts;
union {
struct {
bool allow_bulk_op_insert;
} insert;
} u;
} mongoc_write_command_t;
typedef struct {
- /* true after a legacy update prevents us from calculating nModified */
- bool omit_nModified;
uint32_t nInserted;
uint32_t nMatched;
uint32_t nModified;
uint32_t nRemoved;
uint32_t nUpserted;
- /* like [{"index": int, "_id": value}, ...] */
- bson_t writeErrors;
/* like [{"index": int, "code": int, "errmsg": str}, ...] */
+ bson_t writeErrors;
+ /* like [{"index": int, "_id": value}, ...] */
bson_t upserted;
- /* like [{"code": 64, "errmsg": "duplicate"}, ...] */
uint32_t n_writeConcernErrors;
+ /* like [{"code": 64, "errmsg": "duplicate"}, ...] */
bson_t writeConcernErrors;
bool failed; /* The command failed */
- bool must_stop; /* The stream may have been disonnected */
+ bool must_stop; /* The stream may have been disconnected */
bson_error_t error;
uint32_t upsert_append_count;
} mongoc_write_result_t;
+const char *
+_mongoc_command_type_to_field_name (int command_type);
+const char *
+_mongoc_command_type_to_name (int command_type);
+
void
_mongoc_write_command_destroy (mongoc_write_command_t *command);
void
+_mongoc_write_command_init (bson_t *doc,
+ mongoc_write_command_t *command,
+ const char *collection,
+ const mongoc_write_concern_t *write_concern);
+void
_mongoc_write_command_init_insert (mongoc_write_command_t *command,
const bson_t *document,
+ const bson_t *cmd_opts,
mongoc_bulk_write_flags_t flags,
int64_t operation_id,
bool allow_bulk_op_insert);
void
_mongoc_write_command_init_delete (mongoc_write_command_t *command,
const bson_t *selectors,
+ const bson_t *cmd_opts,
const bson_t *opts,
mongoc_bulk_write_flags_t flags,
int64_t operation_id);
void
_mongoc_write_command_init_update (mongoc_write_command_t *command,
const bson_t *selector,
const bson_t *update,
const bson_t *opts,
mongoc_bulk_write_flags_t flags,
int64_t operation_id);
void
_mongoc_write_command_insert_append (mongoc_write_command_t *command,
const bson_t *document);
void
_mongoc_write_command_update_append (mongoc_write_command_t *command,
const bson_t *selector,
const bson_t *update,
const bson_t *opts);
void
_mongoc_write_command_delete_append (mongoc_write_command_t *command,
const bson_t *selector,
const bson_t *opts);
void
+_mongoc_write_command_too_large_error (bson_error_t *error,
+ int32_t idx,
+ int32_t len,
+ int32_t max_bson_size);
+void
_mongoc_write_command_execute (mongoc_write_command_t *command,
mongoc_client_t *client,
mongoc_server_stream_t *server_stream,
const char *database,
const char *collection,
const mongoc_write_concern_t *write_concern,
uint32_t offset,
+ mongoc_client_session_t *cs,
mongoc_write_result_t *result);
void
_mongoc_write_result_init (mongoc_write_result_t *result);
void
+_mongoc_write_result_append_upsert (mongoc_write_result_t *result,
+ int32_t idx,
+ const bson_value_t *value);
+int32_t
+_mongoc_write_result_merge_arrays (uint32_t offset,
+ mongoc_write_result_t *result,
+ bson_t *dest,
+ bson_iter_t *iter);
+void
_mongoc_write_result_merge (mongoc_write_result_t *result,
mongoc_write_command_t *command,
const bson_t *reply,
uint32_t offset);
-void
-_mongoc_write_result_merge_legacy (mongoc_write_result_t *result,
- mongoc_write_command_t *command,
- const bson_t *reply,
- int32_t error_api_version,
- mongoc_error_code_t default_code,
- uint32_t offset);
+#define MONGOC_WRITE_RESULT_COMPLETE(_result, ...) \
+ _mongoc_write_result_complete (_result, __VA_ARGS__, NULL)
bool
_mongoc_write_result_complete (mongoc_write_result_t *result,
int32_t error_api_version,
const mongoc_write_concern_t *wc,
mongoc_error_domain_t err_domain_override,
bson_t *reply,
- bson_error_t *error);
+ bson_error_t *error,
+ ...);
void
_mongoc_write_result_destroy (mongoc_write_result_t *result);
+void
+_append_array_from_command (mongoc_write_command_t *command, bson_t *bson);
+
BSON_END_DECLS
#endif /* MONGOC_WRITE_COMMAND_PRIVATE_H */
diff --git a/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-write-command.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-write-command.c
new file mode 100644
index 00000000..3f91d37e
--- /dev/null
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-write-command.c
@@ -0,0 +1,1294 @@
+/*
+ * Copyright 2014 MongoDB, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <bson.h>
+
+#include "mongoc-client-private.h"
+#include "mongoc-client-session-private.h"
+#include "mongoc-error.h"
+#include "mongoc-trace-private.h"
+#include "mongoc-write-command-private.h"
+#include "mongoc-write-command-legacy-private.h"
+#include "mongoc-write-concern-private.h"
+#include "mongoc-util-private.h"
+
+
+/*
+ * TODO:
+ *
+ * - Remove error parameter to ops, favor result->error.
+ */
+
+typedef void (*mongoc_write_op_t) (mongoc_write_command_t *command,
+ mongoc_client_t *client,
+ mongoc_server_stream_t *server_stream,
+ const char *database,
+ const char *collection,
+ uint32_t offset,
+ mongoc_write_result_t *result,
+ bson_error_t *error);
+
+
+/* indexed by MONGOC_WRITE_COMMAND_DELETE, INSERT, UPDATE */
+static const char *gCommandNames[] = {"delete", "insert", "update"};
+static const char *gCommandFields[] = {"deletes", "documents", "updates"};
+static const uint32_t gCommandFieldLens[] = {7, 9, 7};
+
+static mongoc_write_op_t gLegacyWriteOps[3] = {
+ _mongoc_write_command_delete_legacy,
+ _mongoc_write_command_insert_legacy,
+ _mongoc_write_command_update_legacy};
+
+
+const char *
+_mongoc_command_type_to_name (int command_type)
+{
+ return gCommandNames[command_type];
+}
+
+const char *
+_mongoc_command_type_to_field_name (int command_type)
+{
+ return gCommandFields[command_type];
+}
+
+void
+_mongoc_write_command_insert_append (mongoc_write_command_t *command,
+ const bson_t *document)
+{
+ bson_iter_t iter;
+ bson_oid_t oid;
+ bson_t tmp;
+
+ ENTRY;
+
+ BSON_ASSERT (command);
+ BSON_ASSERT (command->type == MONGOC_WRITE_COMMAND_INSERT);
+ BSON_ASSERT (document);
+ BSON_ASSERT (document->len >= 5);
+
+ /*
+ * If the document does not contain an "_id" field, we need to generate
+ * a new oid for "_id".
+ */
+ if (!bson_iter_init_find (&iter, document, "_id")) {
+ bson_init (&tmp);
+ bson_oid_init (&oid, NULL);
+ BSON_APPEND_OID (&tmp, "_id", &oid);
+ bson_concat (&tmp, document);
+ _mongoc_buffer_append (&command->payload, bson_get_data (&tmp), tmp.len);
+ bson_destroy (&tmp);
+ } else {
+ _mongoc_buffer_append (
+ &command->payload, bson_get_data (document), document->len);
+ }
+
+ command->n_documents++;
+
+ EXIT;
+}
+
+void
+_mongoc_write_command_update_append (mongoc_write_command_t *command,
+ const bson_t *selector,
+ const bson_t *update,
+ const bson_t *opts)
+{
+ bson_t document;
+
+ ENTRY;
+
+ BSON_ASSERT (command);
+ BSON_ASSERT (command->type == MONGOC_WRITE_COMMAND_UPDATE);
+ BSON_ASSERT (selector && update);
+
+ bson_init (&document);
+ BSON_APPEND_DOCUMENT (&document, "q", selector);
+ BSON_APPEND_DOCUMENT (&document, "u", update);
+ if (opts) {
+ bson_iter_t iter;
+
+ bson_concat (&document, opts);
+ command->flags.has_collation |= bson_has_field (opts, "collation");
+
+ if (bson_iter_init_find (&iter, opts, "multi") &&
+ bson_iter_as_bool (&iter)) {
+ command->flags.has_multi_write = true;
+ }
+ }
+
+ _mongoc_buffer_append (
+ &command->payload, bson_get_data (&document), document.len);
+ command->n_documents++;
+
+ bson_destroy (&document);
+
+ EXIT;
+}
+
+void
+_mongoc_write_command_delete_append (mongoc_write_command_t *command,
+ const bson_t *selector,
+ const bson_t *opts)
+{
+ bson_t document;
+
+ ENTRY;
+
+ BSON_ASSERT (command);
+ BSON_ASSERT (command->type == MONGOC_WRITE_COMMAND_DELETE);
+ BSON_ASSERT (selector);
+
+ BSON_ASSERT (selector->len >= 5);
+
+ bson_init (&document);
+ BSON_APPEND_DOCUMENT (&document, "q", selector);
+ if (opts) {
+ bson_iter_t iter;
+
+ bson_concat (&document, opts);
+ command->flags.has_collation |= bson_has_field (opts, "collation");
+
+ if (bson_iter_init_find (&iter, opts, "limit") &&
+ bson_iter_as_int64 (&iter) != 1) {
+ command->flags.has_multi_write = true;
+ }
+ }
+
+ _mongoc_buffer_append (
+ &command->payload, bson_get_data (&document), document.len);
+ command->n_documents++;
+
+ bson_destroy (&document);
+
+ EXIT;
+}
+
+void
+_mongoc_write_command_init_bulk (mongoc_write_command_t *command,
+ int type,
+ mongoc_bulk_write_flags_t flags,
+ int64_t operation_id,
+ const bson_t *opts)
+{
+ ENTRY;
+
+ BSON_ASSERT (command);
+
+ command->type = type;
+ command->flags = flags;
+ command->operation_id = operation_id;
+ if (!bson_empty0 (opts)) {
+ bson_copy_to (opts, &command->cmd_opts);
+ } else {
+ bson_init (&command->cmd_opts);
+ }
+
+ _mongoc_buffer_init (&command->payload, NULL, 0, NULL, NULL);
+ command->n_documents = 0;
+
+ EXIT;
+}
+
+
+void
+_mongoc_write_command_init_insert (mongoc_write_command_t *command, /* IN */
+ const bson_t *document, /* IN */
+ const bson_t *cmd_opts, /* IN */
+ mongoc_bulk_write_flags_t flags, /* IN */
+ int64_t operation_id, /* IN */
+ bool allow_bulk_op_insert) /* IN */
+{
+ ENTRY;
+
+ BSON_ASSERT (command);
+
+ _mongoc_write_command_init_bulk (
+ command, MONGOC_WRITE_COMMAND_INSERT, flags, operation_id, cmd_opts);
+
+ command->u.insert.allow_bulk_op_insert = (uint8_t) allow_bulk_op_insert;
+ /* must handle NULL document from mongoc_collection_insert_bulk */
+ if (document) {
+ _mongoc_write_command_insert_append (command, document);
+ }
+
+ EXIT;
+}
+
+
+void
+_mongoc_write_command_init_delete (mongoc_write_command_t *command, /* IN */
+ const bson_t *selector, /* IN */
+ const bson_t *cmd_opts, /* IN */
+ const bson_t *opts, /* IN */
+ mongoc_bulk_write_flags_t flags, /* IN */
+ int64_t operation_id) /* IN */
+{
+ ENTRY;
+
+ BSON_ASSERT (command);
+ BSON_ASSERT (selector);
+
+ _mongoc_write_command_init_bulk (
+ command, MONGOC_WRITE_COMMAND_DELETE, flags, operation_id, cmd_opts);
+ _mongoc_write_command_delete_append (command, selector, opts);
+
+ EXIT;
+}
+
+
+void
+_mongoc_write_command_init_update (mongoc_write_command_t *command, /* IN */
+ const bson_t *selector, /* IN */
+ const bson_t *update, /* IN */
+ const bson_t *opts, /* IN */
+ mongoc_bulk_write_flags_t flags, /* IN */
+ int64_t operation_id) /* IN */
+{
+ ENTRY;
+
+ BSON_ASSERT (command);
+ BSON_ASSERT (selector);
+ BSON_ASSERT (update);
+
+ _mongoc_write_command_init_bulk (
+ command, MONGOC_WRITE_COMMAND_UPDATE, flags, operation_id, NULL);
+ _mongoc_write_command_update_append (command, selector, update, opts);
+
+ EXIT;
+}
+
+
+/* takes initialized bson_t *doc and begins formatting a write command */
+void
+_mongoc_write_command_init (bson_t *doc,
+ mongoc_write_command_t *command,
+ const char *collection,
+ const mongoc_write_concern_t *write_concern)
+{
+ ENTRY;
+
+ if (!command->n_documents) {
+ EXIT;
+ }
+
+ BSON_APPEND_UTF8 (doc, gCommandNames[command->type], collection);
+ if (write_concern && !mongoc_write_concern_is_default (write_concern)) {
+ BSON_APPEND_DOCUMENT (doc,
+ "writeConcern",
+ _mongoc_write_concern_get_bson (
+ (mongoc_write_concern_t *) (write_concern)));
+ }
+ BSON_APPEND_BOOL (doc, "ordered", command->flags.ordered);
+
+ if (command->flags.bypass_document_validation !=
+ MONGOC_BYPASS_DOCUMENT_VALIDATION_DEFAULT) {
+ BSON_APPEND_BOOL (doc,
+ "bypassDocumentValidation",
+ !!command->flags.bypass_document_validation);
+ }
+
+ EXIT;
+}
+
+
+/*
+ *-------------------------------------------------------------------------
+ *
+ * _mongoc_write_command_too_large_error --
+ *
+ * Fill a bson_error_t and optional bson_t with error info after
+ * receiving a document for bulk insert, update, or remove that is
+ * larger than max_bson_size.
+ *
+ * "err_doc" should be NULL or an empty initialized bson_t.
+ *
+ * Returns:
+ * None.
+ *
+ * Side effects:
+ * "error" and optionally "err_doc" are filled out.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+void
+_mongoc_write_command_too_large_error (bson_error_t *error,
+ int32_t idx,
+ int32_t len,
+ int32_t max_bson_size)
+{
+ bson_set_error (error,
+ MONGOC_ERROR_BSON,
+ MONGOC_ERROR_BSON_INVALID,
+ "Document %u is too large for the cluster. "
+ "Document is %u bytes, max is %d.",
+ idx,
+ len,
+ max_bson_size);
+}
+
+
+void
+_empty_error (mongoc_write_command_t *command, bson_error_t *error)
+{
+ static const uint32_t codes[] = {MONGOC_ERROR_COLLECTION_DELETE_FAILED,
+ MONGOC_ERROR_COLLECTION_INSERT_FAILED,
+ MONGOC_ERROR_COLLECTION_UPDATE_FAILED};
+
+ bson_set_error (error,
+ MONGOC_ERROR_COLLECTION,
+ codes[command->type],
+ "Cannot do an empty %s",
+ gCommandNames[command->type]);
+}
+
+
+bool
+_mongoc_write_command_will_overflow (uint32_t len_so_far,
+ uint32_t document_len,
+ uint32_t n_documents_written,
+ int32_t max_bson_size,
+ int32_t max_write_batch_size)
+{
+ /* max BSON object size + 16k bytes.
+ * server guarantees there is enough room: SERVER-10643
+ */
+ int32_t max_cmd_size = max_bson_size + 16384;
+
+ BSON_ASSERT (max_bson_size);
+
+ if (len_so_far + document_len > max_cmd_size) {
+ return true;
+ } else if (max_write_batch_size > 0 &&
+ n_documents_written >= max_write_batch_size) {
+ return true;
+ }
+
+ return false;
+}
+
+
+static void
+_mongoc_write_opmsg (mongoc_write_command_t *command,
+ mongoc_client_t *client,
+ mongoc_server_stream_t *server_stream,
+ const char *database,
+ const char *collection,
+ const mongoc_write_concern_t *write_concern,
+ uint32_t index_offset,
+ mongoc_client_session_t *cs,
+ mongoc_write_result_t *result,
+ bson_error_t *error)
+{
+ mongoc_cmd_parts_t parts;
+ bson_iter_t iter;
+ bson_t cmd;
+ bson_t reply;
+ bool ret = false;
+ int32_t max_msg_size;
+ int32_t max_bson_obj_size;
+ int32_t max_document_count;
+ uint32_t header;
+ uint32_t payload_batch_size = 0;
+ uint32_t payload_total_offset = 0;
+ bool ship_it = false;
+ int document_count = 0;
+ int32_t len;
+ mongoc_server_stream_t *retry_server_stream = NULL;
+
+ ENTRY;
+
+ BSON_ASSERT (command);
+ BSON_ASSERT (client);
+ BSON_ASSERT (database);
+ BSON_ASSERT (server_stream);
+ BSON_ASSERT (collection);
+
+/* MongoDB has a extra allowance to allow updating 16mb document,
+ * as the update operators would otherwise overflow the 16mb object limit
+ */
+#define BSON_OBJECT_ALLOWANCE (16 * 1024)
+ max_bson_obj_size = mongoc_server_stream_max_bson_obj_size (server_stream);
+ max_msg_size = mongoc_server_stream_max_msg_size (server_stream);
+ max_document_count =
+ mongoc_server_stream_max_write_batch_size (server_stream);
+
+ bson_init (&cmd);
+ _mongoc_write_command_init (&cmd, command, collection, write_concern);
+ mongoc_cmd_parts_init (&parts, client, database, MONGOC_QUERY_NONE, &cmd);
+ mongoc_cmd_parts_set_session (&parts, cs);
+ parts.assembled.operation_id = command->operation_id;
+ parts.is_write_command = true;
+ parts.assembled.is_acknowledged =
+ mongoc_write_concern_is_acknowledged (write_concern);
+
+ /* Write commands that include multi-document operations are not retryable.
+ * Set this explicitly so that mongoc_cmd_parts_assemble does not need to
+ * inspect the command body later. */
+ parts.allow_txn_number = command->flags.has_multi_write
+ ? MONGOC_CMD_PARTS_ALLOW_TXN_NUMBER_NO
+ : MONGOC_CMD_PARTS_ALLOW_TXN_NUMBER_YES;
+
+ bson_iter_init (&iter, &command->cmd_opts);
+ if (!mongoc_cmd_parts_append_opts (
+ &parts, &iter, server_stream->sd->max_wire_version, error)) {
+ bson_destroy (&cmd);
+ mongoc_cmd_parts_cleanup (&parts);
+ EXIT;
+ }
+
+ if (!mongoc_cmd_parts_assemble (&parts, server_stream, error)) {
+ bson_destroy (&cmd);
+ mongoc_cmd_parts_cleanup (&parts);
+ EXIT;
+ }
+
+ /*
+ * OP_MSG header == 16 byte
+ * + 4 bytes flagBits
+ * + 1 byte payload type = 1
+ * + 1 byte payload type = 2
+ * + 4 byte size of payload
+ * == 26 bytes opcode overhead
+ * + X Full command document {insert: "test", writeConcern: {...}}
+ * + Y command identifier ("documents", "deletes", "updates") ( + \0)
+ */
+
+ header =
+ 26 + parts.assembled.command->len + gCommandFieldLens[command->type] + 1;
+
+ do {
+ memcpy (&len,
+ command->payload.data + payload_batch_size + payload_total_offset,
+ 4);
+ len = BSON_UINT32_FROM_LE (len);
+
+ if (len > max_bson_obj_size + BSON_OBJECT_ALLOWANCE) {
+ /* Quit if the document is too large */
+ _mongoc_write_command_too_large_error (
+ error, index_offset, len, max_bson_obj_size);
+ result->failed = true;
+ break;
+
+ } else if ((payload_batch_size + header) + len <= max_msg_size) {
+ /* The current batch is still under max batch size in bytes */
+ payload_batch_size += len;
+
+ /* If this document filled the maximum document count */
+ if (++document_count == max_document_count) {
+ ship_it = true;
+ /* If this document is the last document we have */
+ } else if (payload_batch_size + payload_total_offset ==
+ command->payload.len) {
+ ship_it = true;
+ } else {
+ ship_it = false;
+ }
+ } else {
+ ship_it = true;
+ }
+
+ if (ship_it) {
+ bool is_retryable = parts.is_retryable_write;
+
+ /* Seek past the document offset we have already sent */
+ parts.assembled.payload = command->payload.data + payload_total_offset;
+ /* Only send the documents up to this size */
+ parts.assembled.payload_size = payload_batch_size;
+ parts.assembled.payload_identifier = gCommandFields[command->type];
+
+ /* increment the transaction number for the first attempt of each
+ * retryable write command */
+ if (is_retryable) {
+ bson_iter_t txn_number_iter;
+ BSON_ASSERT (bson_iter_init_find (
+ &txn_number_iter, parts.assembled.command, "txnNumber"));
+ bson_iter_overwrite_int64 (
+ &txn_number_iter,
+ ++parts.assembled.session->server_session->txn_number);
+ }
+ retry:
+ ret = mongoc_cluster_run_command_monitored (
+ &client->cluster, &parts.assembled, &reply, error);
+
+ /* Add this batch size so we skip these documents next time */
+ payload_total_offset += payload_batch_size;
+ payload_batch_size = 0;
+
+ /* If a retryable error is encountered and the write is retryable,
+ * select a new writable stream and retry. If server selection fails or
+ * the selected server does not support retryable writes, fall through
+ * and allow the original error to be reported. */
+ if (!ret && is_retryable &&
+ (error->domain == MONGOC_ERROR_STREAM ||
+ mongoc_cluster_is_not_master_error (error))) {
+ bson_error_t ignored_error;
+
+ /* each write command may be retried at most once */
+ is_retryable = false;
+
+ if (retry_server_stream) {
+ mongoc_server_stream_cleanup (retry_server_stream);
+ }
+
+ retry_server_stream = mongoc_cluster_stream_for_writes (
+ &client->cluster, &ignored_error);
+
+ if (retry_server_stream &&
+ retry_server_stream->sd->max_wire_version >=
+ WIRE_VERSION_RETRY_WRITES) {
+ parts.assembled.server_stream = retry_server_stream;
+ GOTO (retry);
+ }
+ }
+
+ if (!ret) {
+ result->failed = true;
+ result->must_stop = true;
+ }
+
+ /* Result merge needs to know the absolute index for a document
+ * so it can rewrite the error message which contains the relative
+ * document index per batch
+ */
+ _mongoc_write_result_merge (result, command, &reply, index_offset);
+ index_offset += document_count;
+ document_count = 0;
+ bson_destroy (&reply);
+ }
+ /* While we have more documents to write */
+ } while (payload_total_offset < command->payload.len);
+
+ bson_destroy (&cmd);
+ mongoc_cmd_parts_cleanup (&parts);
+
+ if (retry_server_stream) {
+ mongoc_server_stream_cleanup (retry_server_stream);
+ }
+
+ if (ret) {
+ /* if a retry succeeded, clear the initial error */
+ memset (&result->error, 0, sizeof (bson_error_t));
+ }
+
+ EXIT;
+}
+
+
+void
+_append_array_from_command (mongoc_write_command_t *command, bson_t *bson)
+{
+ bson_t ar;
+ bson_reader_t *reader;
+ char str[16];
+ uint32_t i = 0;
+ const char *key;
+ bool eof;
+ const bson_t *current;
+
+
+ reader =
+ bson_reader_new_from_data (command->payload.data, command->payload.len);
+
+ bson_append_array_begin (bson,
+ gCommandFields[command->type],
+ gCommandFieldLens[command->type],
+ &ar);
+
+ while ((current = bson_reader_read (reader, &eof))) {
+ bson_uint32_to_string (i, &key, str, sizeof str);
+ BSON_APPEND_DOCUMENT (&ar, key, current);
+ i++;
+ }
+
+ bson_append_array_end (bson, &ar);
+ bson_reader_destroy (reader);
+}
+
+static void
+_mongoc_write_opquery (mongoc_write_command_t *command,
+ mongoc_client_t *client,
+ mongoc_server_stream_t *server_stream,
+ const char *database,
+ const char *collection,
+ const mongoc_write_concern_t *write_concern,
+ uint32_t offset,
+ mongoc_client_session_t *session,
+ mongoc_write_result_t *result,
+ bson_error_t *error)
+{
+ mongoc_cmd_parts_t parts;
+ bson_iter_t iter;
+ const char *key;
+ uint32_t len = 0;
+ bson_t ar;
+ bson_t cmd;
+ bson_t reply;
+ char str[16];
+ bool has_more;
+ bool ret = false;
+ uint32_t i;
+ int32_t max_bson_obj_size;
+ int32_t max_write_batch_size;
+ uint32_t overhead;
+ uint32_t key_len;
+ int data_offset = 0;
+ bson_reader_t *reader;
+ const bson_t *bson;
+ bool eof;
+
+ ENTRY;
+
+ BSON_ASSERT (command);
+ BSON_ASSERT (client);
+ BSON_ASSERT (database);
+ BSON_ASSERT (server_stream);
+ BSON_ASSERT (collection);
+
+ bson_init (&cmd);
+ max_bson_obj_size = mongoc_server_stream_max_bson_obj_size (server_stream);
+ max_write_batch_size =
+ mongoc_server_stream_max_write_batch_size (server_stream);
+
+again:
+ has_more = false;
+ i = 0;
+
+ _mongoc_write_command_init (&cmd, command, collection, write_concern);
+
+ /* 1 byte to specify array type, 1 byte for field name's null terminator */
+ overhead = cmd.len + 2 + gCommandFieldLens[command->type];
+
+
+ reader = bson_reader_new_from_data (command->payload.data + data_offset,
+ command->payload.len - data_offset);
+
+ bson_append_array_begin (&cmd,
+ gCommandFields[command->type],
+ gCommandFieldLens[command->type],
+ &ar);
+
+ while ((bson = bson_reader_read (reader, &eof))) {
+ key_len = (uint32_t) bson_uint32_to_string (i, &key, str, sizeof str);
+ len = bson->len;
+ /* 1 byte to specify document type, 1 byte for key's null terminator */
+ if (_mongoc_write_command_will_overflow (overhead,
+ key_len + len + 2 + ar.len,
+ i,
+ max_bson_obj_size,
+ max_write_batch_size)) {
+ has_more = true;
+ break;
+ }
+ BSON_APPEND_DOCUMENT (&ar, key, bson);
+ data_offset += len;
+ i++;
+ }
+
+ bson_append_array_end (&cmd, &ar);
+
+ if (!i) {
+ _mongoc_write_command_too_large_error (error, i, len, max_bson_obj_size);
+ result->failed = true;
+ result->must_stop = true;
+ ret = false;
+ if (bson) {
+ data_offset += len;
+ }
+ } else {
+ mongoc_cmd_parts_init (&parts, client, database, MONGOC_QUERY_NONE, &cmd);
+ mongoc_cmd_parts_set_session (&parts, session);
+ parts.is_write_command = true;
+ parts.assembled.operation_id = command->operation_id;
+ parts.assembled.is_acknowledged =
+ mongoc_write_concern_is_acknowledged (write_concern);
+ bson_iter_init (&iter, &command->cmd_opts);
+ if (!mongoc_cmd_parts_append_opts (
+ &parts, &iter, server_stream->sd->max_wire_version, error)) {
+ bson_destroy (&cmd);
+ mongoc_cmd_parts_cleanup (&parts);
+ EXIT;
+ }
+
+ ret = mongoc_cmd_parts_assemble (&parts, server_stream, error);
+ if (ret) {
+ ret = mongoc_cluster_run_command_monitored (
+ &client->cluster, &parts.assembled, &reply, error);
+ } else {
+ /* assembling failed */
+ result->must_stop = true;
+ bson_init (&reply);
+ }
+
+ if (!ret) {
+ result->failed = true;
+ if (bson_empty (&reply)) {
+ /* assembling failed, or a network error running the command */
+ result->must_stop = true;
+ }
+ }
+
+ if (session) {
+ _mongoc_client_session_handle_reply (
+ session,
+ mongoc_write_concern_is_acknowledged (write_concern),
+ &reply);
+ }
+
+ _mongoc_write_result_merge (result, command, &reply, offset);
+ offset += i;
+ bson_destroy (&reply);
+ mongoc_cmd_parts_cleanup (&parts);
+ }
+ bson_reader_destroy (reader);
+
+ if (has_more && (ret || !command->flags.ordered) && !result->must_stop) {
+ bson_reinit (&cmd);
+ GOTO (again);
+ }
+
+ bson_destroy (&cmd);
+ EXIT;
+}
+
+
+void
+_mongoc_write_command_execute (
+ mongoc_write_command_t *command, /* IN */
+ mongoc_client_t *client, /* IN */
+ mongoc_server_stream_t *server_stream, /* IN */
+ const char *database, /* IN */
+ const char *collection, /* IN */
+ const mongoc_write_concern_t *write_concern, /* IN */
+ uint32_t offset, /* IN */
+ mongoc_client_session_t *cs, /* IN */
+ mongoc_write_result_t *result) /* OUT */
+{
+ ENTRY;
+
+ BSON_ASSERT (command);
+ BSON_ASSERT (client);
+ BSON_ASSERT (server_stream);
+ BSON_ASSERT (database);
+ BSON_ASSERT (collection);
+ BSON_ASSERT (result);
+
+ if (!write_concern) {
+ write_concern = client->write_concern;
+ }
+
+ if (!mongoc_write_concern_is_valid (write_concern)) {
+ bson_set_error (&result->error,
+ MONGOC_ERROR_COMMAND,
+ MONGOC_ERROR_COMMAND_INVALID_ARG,
+ "The write concern is invalid.");
+ result->failed = true;
+ EXIT;
+ }
+
+ if (command->flags.has_collation) {
+ if (!mongoc_write_concern_is_acknowledged (write_concern)) {
+ result->failed = true;
+ bson_set_error (&result->error,
+ MONGOC_ERROR_COMMAND,
+ MONGOC_ERROR_COMMAND_INVALID_ARG,
+ "Cannot set collation for unacknowledged writes");
+ EXIT;
+ }
+ if (server_stream->sd->max_wire_version < WIRE_VERSION_COLLATION) {
+ bson_set_error (&result->error,
+ MONGOC_ERROR_COMMAND,
+ MONGOC_ERROR_PROTOCOL_BAD_WIRE_VERSION,
+ "Collation is not supported by the selected server");
+ result->failed = true;
+ EXIT;
+ }
+ }
+ if (command->flags.bypass_document_validation !=
+ MONGOC_BYPASS_DOCUMENT_VALIDATION_DEFAULT) {
+ if (!mongoc_write_concern_is_acknowledged (write_concern)) {
+ result->failed = true;
+ bson_set_error (
+ &result->error,
+ MONGOC_ERROR_COMMAND,
+ MONGOC_ERROR_COMMAND_INVALID_ARG,
+ "Cannot set bypassDocumentValidation for unacknowledged writes");
+ EXIT;
+ }
+ }
+ if (command->payload.len == 0) {
+ _empty_error (command, &result->error);
+ EXIT;
+ }
+
+ if (server_stream->sd->max_wire_version >= WIRE_VERSION_OP_MSG) {
+ _mongoc_write_opmsg (command,
+ client,
+ server_stream,
+ database,
+ collection,
+ write_concern,
+ offset,
+ cs,
+ result,
+ &result->error);
+ } else {
+ if (mongoc_write_concern_is_acknowledged (write_concern)) {
+ _mongoc_write_opquery (command,
+ client,
+ server_stream,
+ database,
+ collection,
+ write_concern,
+ offset,
+ cs,
+ result,
+ &result->error);
+ } else {
+ gLegacyWriteOps[command->type](command,
+ client,
+ server_stream,
+ database,
+ collection,
+ offset,
+ result,
+ &result->error);
+ }
+ }
+
+ EXIT;
+}
+
+
+void
+_mongoc_write_command_destroy (mongoc_write_command_t *command)
+{
+ ENTRY;
+
+ if (command) {
+ bson_destroy (&command->cmd_opts);
+ _mongoc_buffer_destroy (&command->payload);
+ }
+
+ EXIT;
+}
+
+
+void
+_mongoc_write_result_init (mongoc_write_result_t *result) /* IN */
+{
+ ENTRY;
+
+ BSON_ASSERT (result);
+
+ memset (result, 0, sizeof *result);
+
+ bson_init (&result->upserted);
+ bson_init (&result->writeConcernErrors);
+ bson_init (&result->writeErrors);
+
+ EXIT;
+}
+
+
+void
+_mongoc_write_result_destroy (mongoc_write_result_t *result)
+{
+ ENTRY;
+
+ BSON_ASSERT (result);
+
+ bson_destroy (&result->upserted);
+ bson_destroy (&result->writeConcernErrors);
+ bson_destroy (&result->writeErrors);
+
+ EXIT;
+}
+
+
+void
+_mongoc_write_result_append_upsert (mongoc_write_result_t *result,
+ int32_t idx,
+ const bson_value_t *value)
+{
+ bson_t child;
+ const char *keyptr = NULL;
+ char key[12];
+ int len;
+
+ BSON_ASSERT (result);
+ BSON_ASSERT (value);
+
+ len = (int) bson_uint32_to_string (
+ result->upsert_append_count, &keyptr, key, sizeof key);
+
+ bson_append_document_begin (&result->upserted, keyptr, len, &child);
+ BSON_APPEND_INT32 (&child, "index", idx);
+ BSON_APPEND_VALUE (&child, "_id", value);
+ bson_append_document_end (&result->upserted, &child);
+
+ result->upsert_append_count++;
+}
+
+
+int32_t
+_mongoc_write_result_merge_arrays (uint32_t offset,
+ mongoc_write_result_t *result, /* IN */
+ bson_t *dest, /* IN */
+ bson_iter_t *iter) /* IN */
+{
+ const bson_value_t *value;
+ bson_iter_t ar;
+ bson_iter_t citer;
+ int32_t idx;
+ int32_t count = 0;
+ int32_t aridx;
+ bson_t child;
+ const char *keyptr = NULL;
+ char key[12];
+ int len;
+
+ ENTRY;
+
+ BSON_ASSERT (result);
+ BSON_ASSERT (dest);
+ BSON_ASSERT (iter);
+ BSON_ASSERT (BSON_ITER_HOLDS_ARRAY (iter));
+
+ aridx = bson_count_keys (dest);
+
+ if (bson_iter_recurse (iter, &ar)) {
+ while (bson_iter_next (&ar)) {
+ if (BSON_ITER_HOLDS_DOCUMENT (&ar) &&
+ bson_iter_recurse (&ar, &citer)) {
+ len =
+ (int) bson_uint32_to_string (aridx++, &keyptr, key, sizeof key);
+ bson_append_document_begin (dest, keyptr, len, &child);
+ while (bson_iter_next (&citer)) {
+ if (BSON_ITER_IS_KEY (&citer, "index")) {
+ idx = bson_iter_int32 (&citer) + offset;
+ BSON_APPEND_INT32 (&child, "index", idx);
+ } else {
+ value = bson_iter_value (&citer);
+ BSON_APPEND_VALUE (&child, bson_iter_key (&citer), value);
+ }
+ }
+ bson_append_document_end (dest, &child);
+ count++;
+ }
+ }
+ }
+
+ RETURN (count);
+}
+
+
+void
+_mongoc_write_result_merge (mongoc_write_result_t *result, /* IN */
+ mongoc_write_command_t *command, /* IN */
+ const bson_t *reply, /* IN */
+ uint32_t offset)
+{
+ int32_t server_index = 0;
+ const bson_value_t *value;
+ bson_iter_t iter;
+ bson_iter_t citer;
+ bson_iter_t ar;
+ int32_t n_upserted = 0;
+ int32_t affected = 0;
+
+ ENTRY;
+
+ BSON_ASSERT (result);
+ BSON_ASSERT (reply);
+
+ if (bson_iter_init_find (&iter, reply, "n") &&
+ BSON_ITER_HOLDS_INT32 (&iter)) {
+ affected = bson_iter_int32 (&iter);
+ }
+
+ if (bson_iter_init_find (&iter, reply, "writeErrors") &&
+ BSON_ITER_HOLDS_ARRAY (&iter) && bson_iter_recurse (&iter, &citer) &&
+ bson_iter_next (&citer)) {
+ result->failed = true;
+ }
+
+ switch (command->type) {
+ case MONGOC_WRITE_COMMAND_INSERT:
+ result->nInserted += affected;
+ break;
+ case MONGOC_WRITE_COMMAND_DELETE:
+ result->nRemoved += affected;
+ break;
+ case MONGOC_WRITE_COMMAND_UPDATE:
+
+ /* server returns each upserted _id with its index into this batch
+ * look for "upserted": [{"index": 4, "_id": ObjectId()}, ...] */
+ if (bson_iter_init_find (&iter, reply, "upserted")) {
+ if (BSON_ITER_HOLDS_ARRAY (&iter) &&
+ (bson_iter_recurse (&iter, &ar))) {
+ while (bson_iter_next (&ar)) {
+ if (BSON_ITER_HOLDS_DOCUMENT (&ar) &&
+ bson_iter_recurse (&ar, &citer) &&
+ bson_iter_find (&citer, "index") &&
+ BSON_ITER_HOLDS_INT32 (&citer)) {
+ server_index = bson_iter_int32 (&citer);
+
+ if (bson_iter_recurse (&ar, &citer) &&
+ bson_iter_find (&citer, "_id")) {
+ value = bson_iter_value (&citer);
+ _mongoc_write_result_append_upsert (
+ result, offset + server_index, value);
+ n_upserted++;
+ }
+ }
+ }
+ }
+ result->nUpserted += n_upserted;
+ /*
+ * XXX: The following addition to nMatched needs some checking.
+ * I'm highly skeptical of it.
+ */
+ result->nMatched += BSON_MAX (0, (affected - n_upserted));
+ } else {
+ result->nMatched += affected;
+ }
+ if (bson_iter_init_find (&iter, reply, "nModified") &&
+ BSON_ITER_HOLDS_INT32 (&iter)) {
+ result->nModified += bson_iter_int32 (&iter);
+ }
+ break;
+ default:
+ BSON_ASSERT (false);
+ break;
+ }
+
+ if (bson_iter_init_find (&iter, reply, "writeErrors") &&
+ BSON_ITER_HOLDS_ARRAY (&iter)) {
+ _mongoc_write_result_merge_arrays (
+ offset, result, &result->writeErrors, &iter);
+ }
+
+ if (bson_iter_init_find (&iter, reply, "writeConcernError") &&
+ BSON_ITER_HOLDS_DOCUMENT (&iter)) {
+ uint32_t len;
+ const uint8_t *data;
+ bson_t write_concern_error;
+ char str[16];
+ const char *key;
+
+ /* writeConcernError is a subdocument in the server response
+ * append it to the result->writeConcernErrors array */
+ bson_iter_document (&iter, &len, &data);
+ bson_init_static (&write_concern_error, data, len);
+
+ bson_uint32_to_string (
+ result->n_writeConcernErrors, &key, str, sizeof str);
+
+ bson_append_document (
+ &result->writeConcernErrors, key, -1, &write_concern_error);
+
+ result->n_writeConcernErrors++;
+ }
+
+ EXIT;
+}
+
+
+/*
+ * If error is not set, set code from first document in array like
+ * [{"code": 64, "errmsg": "duplicate"}, ...]. Format the error message
+ * from all errors in array.
+ */
+static void
+_set_error_from_response (bson_t *bson_array,
+ mongoc_error_domain_t domain,
+ const char *error_type,
+ bson_error_t *error /* OUT */)
+{
+ bson_iter_t array_iter;
+ bson_iter_t doc_iter;
+ bson_string_t *compound_err;
+ const char *errmsg = NULL;
+ int32_t code = 0;
+ uint32_t n_keys, i;
+
+ compound_err = bson_string_new (NULL);
+ n_keys = bson_count_keys (bson_array);
+ if (n_keys > 1) {
+ bson_string_append_printf (
+ compound_err, "Multiple %s errors: ", error_type);
+ }
+
+ if (!bson_empty0 (bson_array) && bson_iter_init (&array_iter, bson_array)) {
+ /* get first code and all error messages */
+ i = 0;
+
+ while (bson_iter_next (&array_iter)) {
+ if (BSON_ITER_HOLDS_DOCUMENT (&array_iter) &&
+ bson_iter_recurse (&array_iter, &doc_iter)) {
+ /* parse doc, which is like {"code": 64, "errmsg": "duplicate"} */
+ while (bson_iter_next (&doc_iter)) {
+ /* use the first error code we find */
+ if (BSON_ITER_IS_KEY (&doc_iter, "code") && code == 0) {
+ code = bson_iter_int32 (&doc_iter);
+ } else if (BSON_ITER_IS_KEY (&doc_iter, "errmsg")) {
+ errmsg = bson_iter_utf8 (&doc_iter, NULL);
+
+ /* build message like 'Multiple write errors: "foo", "bar"' */
+ if (n_keys > 1) {
+ bson_string_append_printf (compound_err, "\"%s\"", errmsg);
+ if (i < n_keys - 1) {
+ bson_string_append (compound_err, ", ");
+ }
+ } else {
+ /* single error message */
+ bson_string_append (compound_err, errmsg);
+ }
+ }
+ }
+
+ i++;
+ }
+ }
+
+ if (code && compound_err->len) {
+ bson_set_error (
+ error, domain, (uint32_t) code, "%s", compound_err->str);
+ }
+ }
+
+ bson_string_free (compound_err, true);
+}
+
+
+/* complete a write result, including only certain fields */
+bool
+_mongoc_write_result_complete (
+ mongoc_write_result_t *result, /* IN */
+ int32_t error_api_version, /* IN */
+ const mongoc_write_concern_t *wc, /* IN */
+ mongoc_error_domain_t err_domain_override, /* IN */
+ bson_t *bson, /* OUT */
+ bson_error_t *error, /* OUT */
+ ...)
+{
+ mongoc_error_domain_t domain;
+ va_list args;
+ const char *field;
+ int n_args;
+ bson_iter_t iter;
+ bson_iter_t child;
+
+ ENTRY;
+
+ BSON_ASSERT (result);
+
+ if (error_api_version >= MONGOC_ERROR_API_VERSION_2) {
+ domain = MONGOC_ERROR_SERVER;
+ } else if (err_domain_override) {
+ domain = err_domain_override;
+ } else if (result->error.domain) {
+ domain = (mongoc_error_domain_t) result->error.domain;
+ } else {
+ domain = MONGOC_ERROR_COLLECTION;
+ }
+
+ /* produce either old fields like nModified from the deprecated Bulk API Spec
+ * or new fields like modifiedCount from the CRUD Spec, which we partly obey
+ */
+ if (bson && mongoc_write_concern_is_acknowledged (wc)) {
+ n_args = 0;
+ va_start (args, error);
+ while ((field = va_arg (args, const char *))) {
+ n_args++;
+
+ if (!strcmp (field, "nInserted")) {
+ BSON_APPEND_INT32 (bson, field, result->nInserted);
+ } else if (!strcmp (field, "insertedCount")) {
+ BSON_APPEND_INT32 (bson, field, result->nInserted);
+ } else if (!strcmp (field, "nMatched")) {
+ BSON_APPEND_INT32 (bson, field, result->nMatched);
+ } else if (!strcmp (field, "matchedCount")) {
+ BSON_APPEND_INT32 (bson, field, result->nMatched);
+ } else if (!strcmp (field, "nModified")) {
+ BSON_APPEND_INT32 (bson, field, result->nModified);
+ } else if (!strcmp (field, "modifiedCount")) {
+ BSON_APPEND_INT32 (bson, field, result->nModified);
+ } else if (!strcmp (field, "nRemoved")) {
+ BSON_APPEND_INT32 (bson, field, result->nRemoved);
+ } else if (!strcmp (field, "deletedCount")) {
+ BSON_APPEND_INT32 (bson, field, result->nRemoved);
+ } else if (!strcmp (field, "nUpserted")) {
+ BSON_APPEND_INT32 (bson, field, result->nUpserted);
+ } else if (!strcmp (field, "upsertedCount")) {
+ BSON_APPEND_INT32 (bson, field, result->nUpserted);
+ } else if (!strcmp (field, "upserted") &&
+ !bson_empty0 (&result->upserted)) {
+ BSON_APPEND_ARRAY (bson, field, &result->upserted);
+ } else if (!strcmp (field, "upsertedId") &&
+ !bson_empty0 (&result->upserted) &&
+ bson_iter_init_find (&iter, &result->upserted, "0") &&
+ bson_iter_recurse (&iter, &child) &&
+ bson_iter_find (&child, "_id")) {
+ /* "upsertedId", singular, for update_one() */
+ BSON_APPEND_VALUE (bson, "upsertedId", bson_iter_value (&child));
+ }
+ }
+
+ va_end (args);
+
+ /* default: a standard result includes all Bulk API fields */
+ if (!n_args) {
+ BSON_APPEND_INT32 (bson, "nInserted", result->nInserted);
+ BSON_APPEND_INT32 (bson, "nMatched", result->nMatched);
+ BSON_APPEND_INT32 (bson, "nModified", result->nModified);
+ BSON_APPEND_INT32 (bson, "nRemoved", result->nRemoved);
+ BSON_APPEND_INT32 (bson, "nUpserted", result->nUpserted);
+ if (!bson_empty0 (&result->upserted)) {
+ BSON_APPEND_ARRAY (bson, "upserted", &result->upserted);
+ }
+ }
+
+ /* always append errors if there are any */
+ if (!n_args || !bson_empty (&result->writeErrors)) {
+ BSON_APPEND_ARRAY (bson, "writeErrors", &result->writeErrors);
+ }
+
+ if (result->n_writeConcernErrors) {
+ BSON_APPEND_ARRAY (
+ bson, "writeConcernErrors", &result->writeConcernErrors);
+ }
+ }
+
+ /* set bson_error_t from first write error or write concern error */
+ _set_error_from_response (
+ &result->writeErrors, domain, "write", &result->error);
+
+ if (!result->error.code) {
+ _set_error_from_response (&result->writeConcernErrors,
+ MONGOC_ERROR_WRITE_CONCERN,
+ "write concern",
+ &result->error);
+ }
+
+ if (error) {
+ memcpy (error, &result->error, sizeof *error);
+ }
+
+ RETURN (!result->failed && result->error.code == 0);
+}
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-write-concern-private.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-write-concern-private.h
similarity index 78%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-write-concern-private.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-write-concern-private.h
index c1db5fb1..b767ddc5 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-write-concern-private.h
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-write-concern-private.h
@@ -1,64 +1,56 @@
/*
* Copyright 2013 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MONGOC_WRITE_CONCERN_PRIVATE_H
#define MONGOC_WRITE_CONCERN_PRIVATE_H
#if !defined(MONGOC_COMPILATION)
#error "Only <mongoc.h> can be included directly."
#endif
#include <bson.h>
BSON_BEGIN_DECLS
#define MONGOC_WRITE_CONCERN_FSYNC_DEFAULT -1
#define MONGOC_WRITE_CONCERN_JOURNAL_DEFAULT -1
struct _mongoc_write_concern_t {
int8_t fsync_; /* deprecated */
int8_t journal;
int32_t w;
int32_t wtimeout;
char *wtag;
bool frozen;
bson_t compiled;
- bson_t compiled_gle;
bool is_default;
};
mongoc_write_concern_t *
-_mongoc_write_concern_new_from_iter (bson_iter_t *iter);
-bool
-_mongoc_write_concern_iter_is_valid (bson_iter_t *iter);
-const bson_t *
-_mongoc_write_concern_get_gle (mongoc_write_concern_t *write_concern);
+_mongoc_write_concern_new_from_iter (bson_iter_t *iter, bson_error_t *error);
const bson_t *
_mongoc_write_concern_get_bson (mongoc_write_concern_t *write_concern);
bool
-_mongoc_write_concern_validate (const mongoc_write_concern_t *write_concern,
- bson_error_t *error);
-bool
_mongoc_parse_wc_err (const bson_t *doc, bson_error_t *error);
BSON_END_DECLS
#endif /* MONGOC_WRITE_CONCERN_PRIVATE_H */
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-write-concern.c b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-write-concern.c
similarity index 71%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-write-concern.c
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-write-concern.c
index afacb17e..52da1bad 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-write-concern.c
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-write-concern.c
@@ -1,663 +1,580 @@
/*
* Copyright 2013 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "mongoc-error.h"
#include "mongoc-log.h"
+#include "mongoc-util-private.h"
#include "mongoc-write-concern.h"
#include "mongoc-write-concern-private.h"
-static BSON_INLINE bool
-_mongoc_write_concern_warn_frozen (mongoc_write_concern_t *write_concern)
-{
- if (write_concern->frozen) {
- MONGOC_WARNING ("Cannot modify a frozen write-concern.");
- }
-
- return write_concern->frozen;
-}
-
static void
_mongoc_write_concern_freeze (mongoc_write_concern_t *write_concern);
/**
* mongoc_write_concern_new:
*
* Create a new mongoc_write_concern_t.
*
* Returns: A newly allocated mongoc_write_concern_t. This should be freed
* with mongoc_write_concern_destroy().
*/
mongoc_write_concern_t *
mongoc_write_concern_new (void)
{
mongoc_write_concern_t *write_concern;
write_concern =
(mongoc_write_concern_t *) bson_malloc0 (sizeof *write_concern);
write_concern->w = MONGOC_WRITE_CONCERN_W_DEFAULT;
write_concern->fsync_ = MONGOC_WRITE_CONCERN_FSYNC_DEFAULT;
write_concern->journal = MONGOC_WRITE_CONCERN_JOURNAL_DEFAULT;
write_concern->is_default = true;
+ bson_init (&write_concern->compiled);
+
return write_concern;
}
mongoc_write_concern_t *
mongoc_write_concern_copy (const mongoc_write_concern_t *write_concern)
{
mongoc_write_concern_t *ret = NULL;
if (write_concern) {
ret = mongoc_write_concern_new ();
ret->fsync_ = write_concern->fsync_;
ret->journal = write_concern->journal;
ret->w = write_concern->w;
ret->wtimeout = write_concern->wtimeout;
ret->frozen = false;
ret->wtag = bson_strdup (write_concern->wtag);
ret->is_default = write_concern->is_default;
}
return ret;
}
/**
* mongoc_write_concern_destroy:
* @write_concern: A mongoc_write_concern_t.
*
* Releases a mongoc_write_concern_t and all associated memory.
*/
void
mongoc_write_concern_destroy (mongoc_write_concern_t *write_concern)
{
if (write_concern) {
- if (write_concern->compiled.len) {
- bson_destroy (&write_concern->compiled);
- bson_destroy (&write_concern->compiled_gle);
- }
-
+ bson_destroy (&write_concern->compiled);
bson_free (write_concern->wtag);
bson_free (write_concern);
}
}
bool
mongoc_write_concern_get_fsync (const mongoc_write_concern_t *write_concern)
{
BSON_ASSERT (write_concern);
return (write_concern->fsync_ == true);
}
/**
* mongoc_write_concern_set_fsync:
* @write_concern: A mongoc_write_concern_t.
* @fsync_: If the write concern requires fsync() by the server.
*
* Set if fsync() should be called on the server before acknowledging a
* write request.
*/
void
mongoc_write_concern_set_fsync (mongoc_write_concern_t *write_concern,
bool fsync_)
{
BSON_ASSERT (write_concern);
- if (!_mongoc_write_concern_warn_frozen (write_concern)) {
- write_concern->fsync_ = !!fsync_;
- write_concern->is_default = false;
- }
+ write_concern->fsync_ = !!fsync_;
+ write_concern->is_default = false;
+ write_concern->frozen = false;
}
bool
mongoc_write_concern_get_journal (const mongoc_write_concern_t *write_concern)
{
BSON_ASSERT (write_concern);
return (write_concern->journal == true);
}
bool
mongoc_write_concern_journal_is_set (
const mongoc_write_concern_t *write_concern)
{
BSON_ASSERT (write_concern);
return (write_concern->journal != MONGOC_WRITE_CONCERN_JOURNAL_DEFAULT);
}
/**
* mongoc_write_concern_set_journal:
* @write_concern: A mongoc_write_concern_t.
* @journal: If the write should be journaled.
*
* Set if the write request should be journaled before acknowledging the
* write request.
*/
void
mongoc_write_concern_set_journal (mongoc_write_concern_t *write_concern,
bool journal)
{
BSON_ASSERT (write_concern);
- if (!_mongoc_write_concern_warn_frozen (write_concern)) {
- write_concern->journal = !!journal;
- write_concern->is_default = false;
- }
+ write_concern->journal = !!journal;
+ write_concern->is_default = false;
+ write_concern->frozen = false;
}
int32_t
mongoc_write_concern_get_w (const mongoc_write_concern_t *write_concern)
{
BSON_ASSERT (write_concern);
return write_concern->w;
}
/**
* mongoc_write_concern_set_w:
* @w: The number of nodes for write or MONGOC_WRITE_CONCERN_W_MAJORITY
* for "majority".
*
* Sets the number of nodes that must acknowledge the write request before
* acknowledging the write request to the client.
*
* You may specifiy @w as MONGOC_WRITE_CONCERN_W_MAJORITY to request that
* a "majority" of nodes acknowledge the request.
*/
void
mongoc_write_concern_set_w (mongoc_write_concern_t *write_concern, int32_t w)
{
BSON_ASSERT (write_concern);
BSON_ASSERT (w >= -3);
- if (!_mongoc_write_concern_warn_frozen (write_concern)) {
- write_concern->w = w;
- if (w != MONGOC_WRITE_CONCERN_W_DEFAULT) {
- write_concern->is_default = false;
- }
+ write_concern->w = w;
+ if (w != MONGOC_WRITE_CONCERN_W_DEFAULT) {
+ write_concern->is_default = false;
}
+ write_concern->frozen = false;
}
int32_t
mongoc_write_concern_get_wtimeout (const mongoc_write_concern_t *write_concern)
{
BSON_ASSERT (write_concern);
return write_concern->wtimeout;
}
/**
* mongoc_write_concern_set_wtimeout:
* @write_concern: A mongoc_write_concern_t.
* @wtimeout_msec: Number of milliseconds before timeout.
*
* Sets the number of milliseconds to wait before considering a write
* request as failed. A value of 0 indicates no write timeout.
*
* The @wtimeout_msec parameter must be positive or zero. Negative values will
* be ignored.
*/
void
mongoc_write_concern_set_wtimeout (mongoc_write_concern_t *write_concern,
int32_t wtimeout_msec)
{
BSON_ASSERT (write_concern);
if (wtimeout_msec < 0) {
return;
}
- if (!_mongoc_write_concern_warn_frozen (write_concern)) {
- write_concern->wtimeout = wtimeout_msec;
- write_concern->is_default = false;
- }
+ write_concern->wtimeout = wtimeout_msec;
+ write_concern->is_default = false;
+ write_concern->frozen = false;
}
bool
mongoc_write_concern_get_wmajority (const mongoc_write_concern_t *write_concern)
{
BSON_ASSERT (write_concern);
return (write_concern->w == MONGOC_WRITE_CONCERN_W_MAJORITY);
}
/**
* mongoc_write_concern_set_wmajority:
* @write_concern: A mongoc_write_concern_t.
* @wtimeout_msec: Number of milliseconds before timeout.
*
* Sets the "w" of a write concern to "majority". It is suggested that
* you provide a reasonable @wtimeout_msec to wait before considering the
* write request failed. A @wtimeout_msec value of 0 indicates no write timeout.
*
* The @wtimeout_msec parameter must be positive or zero. Negative values will
* be ignored.
*/
void
mongoc_write_concern_set_wmajority (mongoc_write_concern_t *write_concern,
int32_t wtimeout_msec)
{
BSON_ASSERT (write_concern);
- if (!_mongoc_write_concern_warn_frozen (write_concern)) {
- write_concern->w = MONGOC_WRITE_CONCERN_W_MAJORITY;
- write_concern->is_default = false;
+ write_concern->w = MONGOC_WRITE_CONCERN_W_MAJORITY;
+ write_concern->is_default = false;
+ write_concern->frozen = false;
- if (wtimeout_msec >= 0) {
- write_concern->wtimeout = wtimeout_msec;
- }
+ if (wtimeout_msec >= 0) {
+ write_concern->wtimeout = wtimeout_msec;
}
}
const char *
mongoc_write_concern_get_wtag (const mongoc_write_concern_t *write_concern)
{
BSON_ASSERT (write_concern);
if (write_concern->w == MONGOC_WRITE_CONCERN_W_TAG) {
return write_concern->wtag;
}
return NULL;
}
void
mongoc_write_concern_set_wtag (mongoc_write_concern_t *write_concern,
const char *wtag)
{
BSON_ASSERT (write_concern);
- if (!_mongoc_write_concern_warn_frozen (write_concern)) {
- bson_free (write_concern->wtag);
- write_concern->wtag = bson_strdup (wtag);
- write_concern->w = MONGOC_WRITE_CONCERN_W_TAG;
- write_concern->is_default = false;
- }
+ bson_free (write_concern->wtag);
+ write_concern->wtag = bson_strdup (wtag);
+ write_concern->w = MONGOC_WRITE_CONCERN_W_TAG;
+ write_concern->is_default = false;
+ write_concern->frozen = false;
}
/**
* mongoc_write_concern_get_bson:
* @write_concern: A mongoc_write_concern_t.
*
* This is an internal function.
*
- * Freeze the write concern if necessary and retrieve the encoded bson_t
- * representing the write concern.
- *
- * You may not modify the write concern further after calling this function.
- *
- * Returns: A bson_t that should not be modified or freed as it is owned by
- * the mongoc_write_concern_t instance.
+ * Returns: A bson_t representing the write concern, which is owned by the
+ * mongoc_write_concern_t instance and should not be modified or freed.
*/
const bson_t *
_mongoc_write_concern_get_bson (mongoc_write_concern_t *write_concern)
{
if (!write_concern->frozen) {
_mongoc_write_concern_freeze (write_concern);
}
return &write_concern->compiled;
}
-/**
- * mongoc_write_concern_get_gle:
- * @write_concern: A mongoc_write_concern_t.
- *
- * This is an internal function.
- *
- * Freeze the write concern if necessary and retrieve the encoded bson_t
- * representing the write concern as a get last error command.
- *
- * You may not modify the write concern further after calling this function.
- *
- * Returns: A bson_t that should not be modified or freed as it is owned by
- * the mongoc_write_concern_t instance.
- */
-const bson_t *
-_mongoc_write_concern_get_gle (mongoc_write_concern_t *write_concern)
-{
- if (!write_concern->frozen) {
- _mongoc_write_concern_freeze (write_concern);
- }
-
- return &write_concern->compiled_gle;
-}
-
-
/**
* mongoc_write_concern_is_default:
* @write_concern: A mongoc_write_concern_t.
*
* Returns is_default, which is true when write_concern has not been modified.
*
*/
bool
mongoc_write_concern_is_default (const mongoc_write_concern_t *write_concern)
{
return !write_concern || write_concern->is_default;
}
/**
* mongoc_write_concern_freeze:
* @write_concern: A mongoc_write_concern_t.
*
* This is an internal function.
*
- * Freeze the write concern if necessary and encode it into a bson_ts which
- * represent the raw bson form and the get last error command form.
- *
- * You may not modify the write concern further after calling this function.
+ * Encodes the write concern into a bson_t, which may then be returned by
+ * mongoc_read_concern_get_bson().
*/
static void
_mongoc_write_concern_freeze (mongoc_write_concern_t *write_concern)
{
bson_t *compiled;
- bson_t *compiled_gle;
BSON_ASSERT (write_concern);
compiled = &write_concern->compiled;
- compiled_gle = &write_concern->compiled_gle;
write_concern->frozen = true;
- bson_init (compiled);
- bson_init (compiled_gle);
+ bson_reinit (compiled);
if (write_concern->w == MONGOC_WRITE_CONCERN_W_TAG) {
BSON_ASSERT (write_concern->wtag);
BSON_APPEND_UTF8 (compiled, "w", write_concern->wtag);
} else if (write_concern->w == MONGOC_WRITE_CONCERN_W_MAJORITY) {
BSON_APPEND_UTF8 (compiled, "w", "majority");
} else if (write_concern->w == MONGOC_WRITE_CONCERN_W_DEFAULT) {
/* Do Nothing */
} else {
BSON_APPEND_INT32 (compiled, "w", write_concern->w);
}
if (write_concern->fsync_ != MONGOC_WRITE_CONCERN_FSYNC_DEFAULT) {
bson_append_bool (compiled, "fsync", 5, !!write_concern->fsync_);
}
if (write_concern->journal != MONGOC_WRITE_CONCERN_JOURNAL_DEFAULT) {
bson_append_bool (compiled, "j", 1, !!write_concern->journal);
}
if (write_concern->wtimeout) {
bson_append_int32 (compiled, "wtimeout", 8, write_concern->wtimeout);
}
-
- BSON_APPEND_INT32 (compiled_gle, "getlasterror", 1);
- bson_concat (compiled_gle, compiled);
}
/**
* mongoc_write_concern_is_acknowledged:
* @concern: (in): A mongoc_write_concern_t.
*
* Checks to see if @write_concern requests that a getlasterror command is to
* be delivered to the MongoDB server.
*
* Returns: true if a getlasterror command should be sent.
*/
bool
mongoc_write_concern_is_acknowledged (
const mongoc_write_concern_t *write_concern)
{
if (write_concern) {
return (((write_concern->w != MONGOC_WRITE_CONCERN_W_UNACKNOWLEDGED) &&
(write_concern->w != MONGOC_WRITE_CONCERN_W_ERRORS_IGNORED)) ||
write_concern->fsync_ == true ||
mongoc_write_concern_get_journal (write_concern));
}
return true;
}
/**
* mongoc_write_concern_is_valid:
* @write_concern: (in): A mongoc_write_concern_t.
*
* Checks to see if @write_concern is valid and does not contain conflicting
* options.
*
* Returns: true if the write concern is valid; otherwise false.
*/
bool
mongoc_write_concern_is_valid (const mongoc_write_concern_t *write_concern)
{
if (!write_concern) {
return false;
}
/* Journal or fsync should require acknowledgement. */
if ((write_concern->fsync_ == true ||
mongoc_write_concern_get_journal (write_concern)) &&
(write_concern->w == MONGOC_WRITE_CONCERN_W_UNACKNOWLEDGED ||
write_concern->w == MONGOC_WRITE_CONCERN_W_ERRORS_IGNORED)) {
return false;
}
if (write_concern->wtimeout < 0) {
return false;
}
return true;
}
-bool
+static bool
_mongoc_write_concern_validate (const mongoc_write_concern_t *write_concern,
bson_error_t *error)
{
if (write_concern && !mongoc_write_concern_is_valid (write_concern)) {
bson_set_error (error,
MONGOC_ERROR_COMMAND,
MONGOC_ERROR_COMMAND_INVALID_ARG,
- "Invalid mongoc_write_concern_t");
+ "Invalid writeConcern");
return false;
}
return true;
}
/**
* _mongoc_parse_wc_err:
* @doc: (in): A bson document.
* @error: (out): A bson_error_t.
*
* Parses a document, usually a server reply,
* looking for a writeConcernError. Returns true if
* there is a writeConcernError, false otherwise.
*/
bool
_mongoc_parse_wc_err (const bson_t *doc, bson_error_t *error)
{
bson_iter_t iter;
bson_iter_t inner;
if (bson_iter_init_find (&iter, doc, "writeConcernError") &&
BSON_ITER_HOLDS_DOCUMENT (&iter)) {
const char *errmsg = NULL;
int32_t code = 0;
bson_iter_recurse (&iter, &inner);
while (bson_iter_next (&inner)) {
if (BSON_ITER_IS_KEY (&inner, "code")) {
code = bson_iter_int32 (&inner);
} else if (BSON_ITER_IS_KEY (&inner, "errmsg")) {
errmsg = bson_iter_utf8 (&inner, NULL);
}
}
bson_set_error (error,
MONGOC_ERROR_WRITE_CONCERN,
code,
"Write Concern error: %s",
errmsg);
return true;
}
return false;
}
/**
* mongoc_write_concern_append:
* @write_concern: (in): A mongoc_write_concern_t.
* @command: (out): A pointer to a bson document.
*
* Appends a write_concern document to a command, to send to
* a server.
*
* Returns true on success, false on failure.
*
*/
bool
mongoc_write_concern_append (mongoc_write_concern_t *write_concern,
bson_t *command)
{
if (!mongoc_write_concern_is_valid (write_concern)) {
MONGOC_ERROR ("Invalid writeConcern passed into "
"mongoc_write_concern_append.");
return false;
}
if (!bson_append_document (command,
"writeConcern",
12,
_mongoc_write_concern_get_bson (write_concern))) {
MONGOC_ERROR ("Could not append writeConcern to command.");
return false;
}
return true;
}
/**
* _mongoc_write_concern_new_from_iter:
*
* Create a new mongoc_write_concern_t from an iterator positioned on
* a "writeConcern" document.
*
* Returns: A newly allocated mongoc_write_concern_t. This should be freed
* with mongoc_write_concern_destroy().
*/
mongoc_write_concern_t *
-_mongoc_write_concern_new_from_iter (bson_iter_t *iter)
+_mongoc_write_concern_new_from_iter (bson_iter_t *iter, bson_error_t *error)
{
bson_iter_t inner;
mongoc_write_concern_t *write_concern;
BSON_ASSERT (iter);
- write_concern =
- (mongoc_write_concern_t *) bson_malloc0 (sizeof *write_concern);
- write_concern->w = MONGOC_WRITE_CONCERN_W_DEFAULT;
- write_concern->fsync_ = MONGOC_WRITE_CONCERN_FSYNC_DEFAULT;
- write_concern->journal = MONGOC_WRITE_CONCERN_JOURNAL_DEFAULT;
+
+ write_concern = mongoc_write_concern_new ();
BSON_ASSERT (bson_iter_recurse (iter, &inner));
while (bson_iter_next (&inner)) {
if (BSON_ITER_IS_KEY (&inner, "w")) {
if (BSON_ITER_HOLDS_INT32 (&inner)) {
- write_concern->w = bson_iter_int32 (&inner);
+ mongoc_write_concern_set_w (write_concern,
+ bson_iter_int32 (&inner));
} else if (BSON_ITER_HOLDS_UTF8 (&inner)) {
if (!strcmp (bson_iter_utf8 (&inner, NULL), "majority")) {
- write_concern->w = MONGOC_WRITE_CONCERN_W_MAJORITY;
+ /* mongoc_write_concern_set_wmajority() only assigns wtimeout if
+ * it is >= 0. Since we set wtimeout below, pass -1 here. */
+ mongoc_write_concern_set_wmajority (write_concern, -1);
} else {
- write_concern->w = MONGOC_WRITE_CONCERN_W_TAG;
- write_concern->wtag = bson_iter_dup_utf8 (&inner, NULL);
+ mongoc_write_concern_set_wtag (write_concern,
+ bson_iter_utf8 (&inner, NULL));
}
+ } else {
+ /* wrong type for "w" */
+ goto fail;
}
- } else if (BSON_ITER_IS_KEY (&inner, "fsync") &&
- BSON_ITER_HOLDS_BOOL (&inner)) {
- write_concern->fsync_ = bson_iter_bool (&inner);
- } else if (BSON_ITER_IS_KEY (&inner, "j") &&
- BSON_ITER_HOLDS_BOOL (&inner)) {
- write_concern->journal = bson_iter_bool (&inner);
- } else if (BSON_ITER_IS_KEY (&inner, "wtimeout") &&
- BSON_ITER_HOLDS_INT32 (&inner)) {
- write_concern->wtimeout = bson_iter_bool (&inner);
- }
- }
-
- return write_concern;
-}
-
-/**
- * _mongoc_write_concern_iter_is_valid:
- * @iter: (in): A bson_iter_t positioned on a "writeConcern" BSON document.
- *
- * Checks to see if @write_concern is valid and does not contain conflicting
- * options.
- *
- * Returns: true if the write concern is valid; otherwise false.
- */
-bool
-_mongoc_write_concern_iter_is_valid (bson_iter_t *iter)
-{
- bson_iter_t inner;
- bool has_fsync = false;
- bool w0 = false;
- bool j = false;
-
- BSON_ASSERT (iter);
- BSON_ASSERT (bson_iter_recurse (iter, &inner));
- while (bson_iter_next (&inner)) {
- if (BSON_ITER_IS_KEY (&inner, "fsync")) {
+ } else if (BSON_ITER_IS_KEY (&inner, "fsync")) {
if (!BSON_ITER_HOLDS_BOOL (&inner)) {
- return false;
- }
- has_fsync = bson_iter_bool (&inner);
- } else if (BSON_ITER_IS_KEY (&inner, "w")) {
- if (BSON_ITER_HOLDS_INT32 (&inner)) {
- if (bson_iter_int32 (&inner) ==
- MONGOC_WRITE_CONCERN_W_UNACKNOWLEDGED ||
- bson_iter_int32 (&inner) ==
- MONGOC_WRITE_CONCERN_W_ERRORS_IGNORED) {
- w0 = true;
- }
- } else if (!(BSON_ITER_HOLDS_UTF8 (&inner))) {
- return false;
+ goto fail;
}
+ BEGIN_IGNORE_DEPRECATIONS;
+ mongoc_write_concern_set_fsync (write_concern,
+ bson_iter_bool (&inner));
+ END_IGNORE_DEPRECATIONS;
} else if (BSON_ITER_IS_KEY (&inner, "j")) {
if (!BSON_ITER_HOLDS_BOOL (&inner)) {
- return false;
+ goto fail;
}
- j = bson_iter_bool (&inner);
+ mongoc_write_concern_set_journal (write_concern,
+ bson_iter_bool (&inner));
} else if (BSON_ITER_IS_KEY (&inner, "wtimeout")) {
if (!BSON_ITER_HOLDS_INT32 (&inner) || bson_iter_int32 (&inner) < 0) {
- return false;
+ goto fail;
}
+ mongoc_write_concern_set_wtimeout (write_concern,
+ bson_iter_int32 (&inner));
}
}
- if ((has_fsync || j) && w0) {
- return false;
+ if (!_mongoc_write_concern_validate (write_concern, error)) {
+ mongoc_write_concern_destroy (write_concern);
+ return NULL;
}
- return true;
+ return write_concern;
+
+fail:
+ bson_set_error (error,
+ MONGOC_ERROR_COMMAND,
+ MONGOC_ERROR_COMMAND_INVALID_ARG,
+ "Invalid writeConcern");
+ mongoc_write_concern_destroy (write_concern);
+ return NULL;
}
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-write-concern.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-write-concern.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc-write-concern.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc-write-concern.h
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc.h
similarity index 96%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc.h
index 28f2d23e..b61713af 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/mongoc.h
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/mongoc.h
@@ -1,65 +1,67 @@
/*
* Copyright 2013 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MONGOC_H
#define MONGOC_H
#include <bson.h>
#define MONGOC_INSIDE
#include "mongoc-macros.h"
#include "mongoc-apm.h"
#include "mongoc-bulk-operation.h"
+#include "mongoc-change-stream.h"
#include "mongoc-client.h"
#include "mongoc-client-pool.h"
#include "mongoc-collection.h"
#include "mongoc-config.h"
#include "mongoc-cursor.h"
#include "mongoc-database.h"
#include "mongoc-index.h"
#include "mongoc-error.h"
#include "mongoc-flags.h"
#include "mongoc-gridfs.h"
#include "mongoc-gridfs-file.h"
#include "mongoc-gridfs-file-list.h"
#include "mongoc-gridfs-file-page.h"
#include "mongoc-host-list.h"
#include "mongoc-init.h"
#include "mongoc-matcher.h"
#include "mongoc-handshake.h"
#include "mongoc-opcode.h"
#include "mongoc-log.h"
#include "mongoc-socket.h"
+#include "mongoc-client-session.h"
#include "mongoc-stream.h"
#include "mongoc-stream-buffered.h"
#include "mongoc-stream-file.h"
#include "mongoc-stream-gridfs.h"
#include "mongoc-stream-socket.h"
#include "mongoc-uri.h"
#include "mongoc-write-concern.h"
#include "mongoc-version.h"
#include "mongoc-version-functions.h"
#ifdef MONGOC_ENABLE_SSL
#include "mongoc-rand.h"
#include "mongoc-stream-tls.h"
#include "mongoc-ssl.h"
#endif
#undef MONGOC_INSIDE
#endif /* MONGOC_H */
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/op-compressed.def b/mongodb-1.4.2/src/libmongoc/src/mongoc/op-compressed.def
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/op-compressed.def
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/op-compressed.def
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/op-delete.def b/mongodb-1.4.2/src/libmongoc/src/mongoc/op-delete.def
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/op-delete.def
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/op-delete.def
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/op-get-more.def b/mongodb-1.4.2/src/libmongoc/src/mongoc/op-get-more.def
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/op-get-more.def
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/op-get-more.def
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/op-header.def b/mongodb-1.4.2/src/libmongoc/src/mongoc/op-header.def
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/op-header.def
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/op-header.def
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/op-insert.def b/mongodb-1.4.2/src/libmongoc/src/mongoc/op-insert.def
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/op-insert.def
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/op-insert.def
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/op-kill-cursors.def b/mongodb-1.4.2/src/libmongoc/src/mongoc/op-kill-cursors.def
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/op-kill-cursors.def
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/op-kill-cursors.def
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/op-msg.def b/mongodb-1.4.2/src/libmongoc/src/mongoc/op-msg.def
similarity index 67%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/op-msg.def
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/op-msg.def
index 98ea64ad..92e0c6bc 100644
--- a/mongodb-1.3.4/src/libmongoc/src/mongoc/op-msg.def
+++ b/mongodb-1.4.2/src/libmongoc/src/mongoc/op-msg.def
@@ -1,8 +1,9 @@
RPC(
msg,
INT32_FIELD(msg_len)
INT32_FIELD(request_id)
INT32_FIELD(response_to)
INT32_FIELD(opcode)
- CSTRING_FIELD(msg)
+ UINT32_FIELD(flags)
+ SECTION_ARRAY_FIELD(sections)
)
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/op-query.def b/mongodb-1.4.2/src/libmongoc/src/mongoc/op-query.def
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/op-query.def
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/op-query.def
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/op-reply-header.def b/mongodb-1.4.2/src/libmongoc/src/mongoc/op-reply-header.def
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/op-reply-header.def
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/op-reply-header.def
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/op-reply.def b/mongodb-1.4.2/src/libmongoc/src/mongoc/op-reply.def
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/op-reply.def
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/op-reply.def
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/op-update.def b/mongodb-1.4.2/src/libmongoc/src/mongoc/op-update.def
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/op-update.def
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/op-update.def
diff --git a/mongodb-1.3.4/src/libmongoc/src/mongoc/utlist.h b/mongodb-1.4.2/src/libmongoc/src/mongoc/utlist.h
similarity index 100%
rename from mongodb-1.3.4/src/libmongoc/src/mongoc/utlist.h
rename to mongodb-1.4.2/src/libmongoc/src/mongoc/utlist.h
diff --git a/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/adler32.c b/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/adler32.c
new file mode 100644
index 00000000..d0be4380
--- /dev/null
+++ b/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/adler32.c
@@ -0,0 +1,186 @@
+/* adler32.c -- compute the Adler-32 checksum of a data stream
+ * Copyright (C) 1995-2011, 2016 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* @(#) $Id$ */
+
+#include "zutil.h"
+
+local uLong adler32_combine_ OF((uLong adler1, uLong adler2, z_off64_t len2));
+
+#define BASE 65521U /* largest prime smaller than 65536 */
+#define NMAX 5552
+/* NMAX is the largest n such that 255n(n+1)/2 + (n+1)(BASE-1) <= 2^32-1 */
+
+#define DO1(buf,i) {adler += (buf)[i]; sum2 += adler;}
+#define DO2(buf,i) DO1(buf,i); DO1(buf,i+1);
+#define DO4(buf,i) DO2(buf,i); DO2(buf,i+2);
+#define DO8(buf,i) DO4(buf,i); DO4(buf,i+4);
+#define DO16(buf) DO8(buf,0); DO8(buf,8);
+
+/* use NO_DIVIDE if your processor does not do division in hardware --
+ try it both ways to see which is faster */
+#ifdef NO_DIVIDE
+/* note that this assumes BASE is 65521, where 65536 % 65521 == 15
+ (thank you to John Reiser for pointing this out) */
+# define CHOP(a) \
+ do { \
+ unsigned long tmp = a >> 16; \
+ a &= 0xffffUL; \
+ a += (tmp << 4) - tmp; \
+ } while (0)
+# define MOD28(a) \
+ do { \
+ CHOP(a); \
+ if (a >= BASE) a -= BASE; \
+ } while (0)
+# define MOD(a) \
+ do { \
+ CHOP(a); \
+ MOD28(a); \
+ } while (0)
+# define MOD63(a) \
+ do { /* this assumes a is not negative */ \
+ z_off64_t tmp = a >> 32; \
+ a &= 0xffffffffL; \
+ a += (tmp << 8) - (tmp << 5) + tmp; \
+ tmp = a >> 16; \
+ a &= 0xffffL; \
+ a += (tmp << 4) - tmp; \
+ tmp = a >> 16; \
+ a &= 0xffffL; \
+ a += (tmp << 4) - tmp; \
+ if (a >= BASE) a -= BASE; \
+ } while (0)
+#else
+# define MOD(a) a %= BASE
+# define MOD28(a) a %= BASE
+# define MOD63(a) a %= BASE
+#endif
+
+/* ========================================================================= */
+uLong ZEXPORT adler32_z(adler, buf, len)
+ uLong adler;
+ const Bytef *buf;
+ z_size_t len;
+{
+ unsigned long sum2;
+ unsigned n;
+
+ /* split Adler-32 into component sums */
+ sum2 = (adler >> 16) & 0xffff;
+ adler &= 0xffff;
+
+ /* in case user likes doing a byte at a time, keep it fast */
+ if (len == 1) {
+ adler += buf[0];
+ if (adler >= BASE)
+ adler -= BASE;
+ sum2 += adler;
+ if (sum2 >= BASE)
+ sum2 -= BASE;
+ return adler | (sum2 << 16);
+ }
+
+ /* initial Adler-32 value (deferred check for len == 1 speed) */
+ if (buf == Z_NULL)
+ return 1L;
+
+ /* in case short lengths are provided, keep it somewhat fast */
+ if (len < 16) {
+ while (len--) {
+ adler += *buf++;
+ sum2 += adler;
+ }
+ if (adler >= BASE)
+ adler -= BASE;
+ MOD28(sum2); /* only added so many BASE's */
+ return adler | (sum2 << 16);
+ }
+
+ /* do length NMAX blocks -- requires just one modulo operation */
+ while (len >= NMAX) {
+ len -= NMAX;
+ n = NMAX / 16; /* NMAX is divisible by 16 */
+ do {
+ DO16(buf); /* 16 sums unrolled */
+ buf += 16;
+ } while (--n);
+ MOD(adler);
+ MOD(sum2);
+ }
+
+ /* do remaining bytes (less than NMAX, still just one modulo) */
+ if (len) { /* avoid modulos if none remaining */
+ while (len >= 16) {
+ len -= 16;
+ DO16(buf);
+ buf += 16;
+ }
+ while (len--) {
+ adler += *buf++;
+ sum2 += adler;
+ }
+ MOD(adler);
+ MOD(sum2);
+ }
+
+ /* return recombined sums */
+ return adler | (sum2 << 16);
+}
+
+/* ========================================================================= */
+uLong ZEXPORT adler32(adler, buf, len)
+ uLong adler;
+ const Bytef *buf;
+ uInt len;
+{
+ return adler32_z(adler, buf, len);
+}
+
+/* ========================================================================= */
+local uLong adler32_combine_(adler1, adler2, len2)
+ uLong adler1;
+ uLong adler2;
+ z_off64_t len2;
+{
+ unsigned long sum1;
+ unsigned long sum2;
+ unsigned rem;
+
+ /* for negative len, return invalid adler32 as a clue for debugging */
+ if (len2 < 0)
+ return 0xffffffffUL;
+
+ /* the derivation of this formula is left as an exercise for the reader */
+ MOD63(len2); /* assumes len2 >= 0 */
+ rem = (unsigned)len2;
+ sum1 = adler1 & 0xffff;
+ sum2 = rem * sum1;
+ MOD(sum2);
+ sum1 += (adler2 & 0xffff) + BASE - 1;
+ sum2 += ((adler1 >> 16) & 0xffff) + ((adler2 >> 16) & 0xffff) + BASE - rem;
+ if (sum1 >= BASE) sum1 -= BASE;
+ if (sum1 >= BASE) sum1 -= BASE;
+ if (sum2 >= ((unsigned long)BASE << 1)) sum2 -= ((unsigned long)BASE << 1);
+ if (sum2 >= BASE) sum2 -= BASE;
+ return sum1 | (sum2 << 16);
+}
+
+/* ========================================================================= */
+uLong ZEXPORT adler32_combine(adler1, adler2, len2)
+ uLong adler1;
+ uLong adler2;
+ z_off_t len2;
+{
+ return adler32_combine_(adler1, adler2, len2);
+}
+
+uLong ZEXPORT adler32_combine64(adler1, adler2, len2)
+ uLong adler1;
+ uLong adler2;
+ z_off64_t len2;
+{
+ return adler32_combine_(adler1, adler2, len2);
+}
diff --git a/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/compress.c b/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/compress.c
new file mode 100644
index 00000000..e2db404a
--- /dev/null
+++ b/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/compress.c
@@ -0,0 +1,86 @@
+/* compress.c -- compress a memory buffer
+ * Copyright (C) 1995-2005, 2014, 2016 Jean-loup Gailly, Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* @(#) $Id$ */
+
+#define ZLIB_INTERNAL
+#include "zlib.h"
+
+/* ===========================================================================
+ Compresses the source buffer into the destination buffer. The level
+ parameter has the same meaning as in deflateInit. sourceLen is the byte
+ length of the source buffer. Upon entry, destLen is the total size of the
+ destination buffer, which must be at least 0.1% larger than sourceLen plus
+ 12 bytes. Upon exit, destLen is the actual size of the compressed buffer.
+
+ compress2 returns Z_OK if success, Z_MEM_ERROR if there was not enough
+ memory, Z_BUF_ERROR if there was not enough room in the output buffer,
+ Z_STREAM_ERROR if the level parameter is invalid.
+*/
+int ZEXPORT compress2 (dest, destLen, source, sourceLen, level)
+ Bytef *dest;
+ uLongf *destLen;
+ const Bytef *source;
+ uLong sourceLen;
+ int level;
+{
+ z_stream stream;
+ int err;
+ const uInt max = (uInt)-1;
+ uLong left;
+
+ left = *destLen;
+ *destLen = 0;
+
+ stream.zalloc = (alloc_func)0;
+ stream.zfree = (free_func)0;
+ stream.opaque = (voidpf)0;
+
+ err = deflateInit(&stream, level);
+ if (err != Z_OK) return err;
+
+ stream.next_out = dest;
+ stream.avail_out = 0;
+ stream.next_in = (z_const Bytef *)source;
+ stream.avail_in = 0;
+
+ do {
+ if (stream.avail_out == 0) {
+ stream.avail_out = left > (uLong)max ? max : (uInt)left;
+ left -= stream.avail_out;
+ }
+ if (stream.avail_in == 0) {
+ stream.avail_in = sourceLen > (uLong)max ? max : (uInt)sourceLen;
+ sourceLen -= stream.avail_in;
+ }
+ err = deflate(&stream, sourceLen ? Z_NO_FLUSH : Z_FINISH);
+ } while (err == Z_OK);
+
+ *destLen = stream.total_out;
+ deflateEnd(&stream);
+ return err == Z_STREAM_END ? Z_OK : err;
+}
+
+/* ===========================================================================
+ */
+int ZEXPORT compress (dest, destLen, source, sourceLen)
+ Bytef *dest;
+ uLongf *destLen;
+ const Bytef *source;
+ uLong sourceLen;
+{
+ return compress2(dest, destLen, source, sourceLen, Z_DEFAULT_COMPRESSION);
+}
+
+/* ===========================================================================
+ If the default memLevel or windowBits for deflateInit() is changed, then
+ this function needs to be updated.
+ */
+uLong ZEXPORT compressBound (sourceLen)
+ uLong sourceLen;
+{
+ return sourceLen + (sourceLen >> 12) + (sourceLen >> 14) +
+ (sourceLen >> 25) + 13;
+}
diff --git a/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/crc32.c b/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/crc32.c
new file mode 100644
index 00000000..9580440c
--- /dev/null
+++ b/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/crc32.c
@@ -0,0 +1,442 @@
+/* crc32.c -- compute the CRC-32 of a data stream
+ * Copyright (C) 1995-2006, 2010, 2011, 2012, 2016 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ *
+ * Thanks to Rodney Brown <rbrown64@csc.com.au> for his contribution of faster
+ * CRC methods: exclusive-oring 32 bits of data at a time, and pre-computing
+ * tables for updating the shift register in one step with three exclusive-ors
+ * instead of four steps with four exclusive-ors. This results in about a
+ * factor of two increase in speed on a Power PC G4 (PPC7455) using gcc -O3.
+ */
+
+/* @(#) $Id$ */
+
+/*
+ Note on the use of DYNAMIC_CRC_TABLE: there is no mutex or semaphore
+ protection on the static variables used to control the first-use generation
+ of the crc tables. Therefore, if you #define DYNAMIC_CRC_TABLE, you should
+ first call get_crc_table() to initialize the tables before allowing more than
+ one thread to use crc32().
+
+ DYNAMIC_CRC_TABLE and MAKECRCH can be #defined to write out crc32.h.
+ */
+
+#ifdef MAKECRCH
+# include <stdio.h>
+# ifndef DYNAMIC_CRC_TABLE
+# define DYNAMIC_CRC_TABLE
+# endif /* !DYNAMIC_CRC_TABLE */
+#endif /* MAKECRCH */
+
+#include "zutil.h" /* for STDC and FAR definitions */
+
+/* Definitions for doing the crc four data bytes at a time. */
+#if !defined(NOBYFOUR) && defined(Z_U4)
+# define BYFOUR
+#endif
+#ifdef BYFOUR
+ local unsigned long crc32_little OF((unsigned long,
+ const unsigned char FAR *, z_size_t));
+ local unsigned long crc32_big OF((unsigned long,
+ const unsigned char FAR *, z_size_t));
+# define TBLS 8
+#else
+# define TBLS 1
+#endif /* BYFOUR */
+
+/* Local functions for crc concatenation */
+local unsigned long gf2_matrix_times OF((unsigned long *mat,
+ unsigned long vec));
+local void gf2_matrix_square OF((unsigned long *square, unsigned long *mat));
+local uLong crc32_combine_ OF((uLong crc1, uLong crc2, z_off64_t len2));
+
+
+#ifdef DYNAMIC_CRC_TABLE
+
+local volatile int crc_table_empty = 1;
+local z_crc_t FAR crc_table[TBLS][256];
+local void make_crc_table OF((void));
+#ifdef MAKECRCH
+ local void write_table OF((FILE *, const z_crc_t FAR *));
+#endif /* MAKECRCH */
+/*
+ Generate tables for a byte-wise 32-bit CRC calculation on the polynomial:
+ x^32+x^26+x^23+x^22+x^16+x^12+x^11+x^10+x^8+x^7+x^5+x^4+x^2+x+1.
+
+ Polynomials over GF(2) are represented in binary, one bit per coefficient,
+ with the lowest powers in the most significant bit. Then adding polynomials
+ is just exclusive-or, and multiplying a polynomial by x is a right shift by
+ one. If we call the above polynomial p, and represent a byte as the
+ polynomial q, also with the lowest power in the most significant bit (so the
+ byte 0xb1 is the polynomial x^7+x^3+x+1), then the CRC is (q*x^32) mod p,
+ where a mod b means the remainder after dividing a by b.
+
+ This calculation is done using the shift-register method of multiplying and
+ taking the remainder. The register is initialized to zero, and for each
+ incoming bit, x^32 is added mod p to the register if the bit is a one (where
+ x^32 mod p is p+x^32 = x^26+...+1), and the register is multiplied mod p by
+ x (which is shifting right by one and adding x^32 mod p if the bit shifted
+ out is a one). We start with the highest power (least significant bit) of
+ q and repeat for all eight bits of q.
+
+ The first table is simply the CRC of all possible eight bit values. This is
+ all the information needed to generate CRCs on data a byte at a time for all
+ combinations of CRC register values and incoming bytes. The remaining tables
+ allow for word-at-a-time CRC calculation for both big-endian and little-
+ endian machines, where a word is four bytes.
+*/
+local void make_crc_table()
+{
+ z_crc_t c;
+ int n, k;
+ z_crc_t poly; /* polynomial exclusive-or pattern */
+ /* terms of polynomial defining this crc (except x^32): */
+ static volatile int first = 1; /* flag to limit concurrent making */
+ static const unsigned char p[] = {0,1,2,4,5,7,8,10,11,12,16,22,23,26};
+
+ /* See if another task is already doing this (not thread-safe, but better
+ than nothing -- significantly reduces duration of vulnerability in
+ case the advice about DYNAMIC_CRC_TABLE is ignored) */
+ if (first) {
+ first = 0;
+
+ /* make exclusive-or pattern from polynomial (0xedb88320UL) */
+ poly = 0;
+ for (n = 0; n < (int)(sizeof(p)/sizeof(unsigned char)); n++)
+ poly |= (z_crc_t)1 << (31 - p[n]);
+
+ /* generate a crc for every 8-bit value */
+ for (n = 0; n < 256; n++) {
+ c = (z_crc_t)n;
+ for (k = 0; k < 8; k++)
+ c = c & 1 ? poly ^ (c >> 1) : c >> 1;
+ crc_table[0][n] = c;
+ }
+
+#ifdef BYFOUR
+ /* generate crc for each value followed by one, two, and three zeros,
+ and then the byte reversal of those as well as the first table */
+ for (n = 0; n < 256; n++) {
+ c = crc_table[0][n];
+ crc_table[4][n] = ZSWAP32(c);
+ for (k = 1; k < 4; k++) {
+ c = crc_table[0][c & 0xff] ^ (c >> 8);
+ crc_table[k][n] = c;
+ crc_table[k + 4][n] = ZSWAP32(c);
+ }
+ }
+#endif /* BYFOUR */
+
+ crc_table_empty = 0;
+ }
+ else { /* not first */
+ /* wait for the other guy to finish (not efficient, but rare) */
+ while (crc_table_empty)
+ ;
+ }
+
+#ifdef MAKECRCH
+ /* write out CRC tables to crc32.h */
+ {
+ FILE *out;
+
+ out = fopen("crc32.h", "w");
+ if (out == NULL) return;
+ fprintf(out, "/* crc32.h -- tables for rapid CRC calculation\n");
+ fprintf(out, " * Generated automatically by crc32.c\n */\n\n");
+ fprintf(out, "local const z_crc_t FAR ");
+ fprintf(out, "crc_table[TBLS][256] =\n{\n {\n");
+ write_table(out, crc_table[0]);
+# ifdef BYFOUR
+ fprintf(out, "#ifdef BYFOUR\n");
+ for (k = 1; k < 8; k++) {
+ fprintf(out, " },\n {\n");
+ write_table(out, crc_table[k]);
+ }
+ fprintf(out, "#endif\n");
+# endif /* BYFOUR */
+ fprintf(out, " }\n};\n");
+ fclose(out);
+ }
+#endif /* MAKECRCH */
+}
+
+#ifdef MAKECRCH
+local void write_table(out, table)
+ FILE *out;
+ const z_crc_t FAR *table;
+{
+ int n;
+
+ for (n = 0; n < 256; n++)
+ fprintf(out, "%s0x%08lxUL%s", n % 5 ? "" : " ",
+ (unsigned long)(table[n]),
+ n == 255 ? "\n" : (n % 5 == 4 ? ",\n" : ", "));
+}
+#endif /* MAKECRCH */
+
+#else /* !DYNAMIC_CRC_TABLE */
+/* ========================================================================
+ * Tables of CRC-32s of all single-byte values, made by make_crc_table().
+ */
+#include "crc32.h"
+#endif /* DYNAMIC_CRC_TABLE */
+
+/* =========================================================================
+ * This function can be used by asm versions of crc32()
+ */
+const z_crc_t FAR * ZEXPORT get_crc_table()
+{
+#ifdef DYNAMIC_CRC_TABLE
+ if (crc_table_empty)
+ make_crc_table();
+#endif /* DYNAMIC_CRC_TABLE */
+ return (const z_crc_t FAR *)crc_table;
+}
+
+/* ========================================================================= */
+#define DO1 crc = crc_table[0][((int)crc ^ (*buf++)) & 0xff] ^ (crc >> 8)
+#define DO8 DO1; DO1; DO1; DO1; DO1; DO1; DO1; DO1
+
+/* ========================================================================= */
+unsigned long ZEXPORT crc32_z(crc, buf, len)
+ unsigned long crc;
+ const unsigned char FAR *buf;
+ z_size_t len;
+{
+ if (buf == Z_NULL) return 0UL;
+
+#ifdef DYNAMIC_CRC_TABLE
+ if (crc_table_empty)
+ make_crc_table();
+#endif /* DYNAMIC_CRC_TABLE */
+
+#ifdef BYFOUR
+ if (sizeof(void *) == sizeof(ptrdiff_t)) {
+ z_crc_t endian;
+
+ endian = 1;
+ if (*((unsigned char *)(&endian)))
+ return crc32_little(crc, buf, len);
+ else
+ return crc32_big(crc, buf, len);
+ }
+#endif /* BYFOUR */
+ crc = crc ^ 0xffffffffUL;
+ while (len >= 8) {
+ DO8;
+ len -= 8;
+ }
+ if (len) do {
+ DO1;
+ } while (--len);
+ return crc ^ 0xffffffffUL;
+}
+
+/* ========================================================================= */
+unsigned long ZEXPORT crc32(crc, buf, len)
+ unsigned long crc;
+ const unsigned char FAR *buf;
+ uInt len;
+{
+ return crc32_z(crc, buf, len);
+}
+
+#ifdef BYFOUR
+
+/*
+ This BYFOUR code accesses the passed unsigned char * buffer with a 32-bit
+ integer pointer type. This violates the strict aliasing rule, where a
+ compiler can assume, for optimization purposes, that two pointers to
+ fundamentally different types won't ever point to the same memory. This can
+ manifest as a problem only if one of the pointers is written to. This code
+ only reads from those pointers. So long as this code remains isolated in
+ this compilation unit, there won't be a problem. For this reason, this code
+ should not be copied and pasted into a compilation unit in which other code
+ writes to the buffer that is passed to these routines.
+ */
+
+/* ========================================================================= */
+#define DOLIT4 c ^= *buf4++; \
+ c = crc_table[3][c & 0xff] ^ crc_table[2][(c >> 8) & 0xff] ^ \
+ crc_table[1][(c >> 16) & 0xff] ^ crc_table[0][c >> 24]
+#define DOLIT32 DOLIT4; DOLIT4; DOLIT4; DOLIT4; DOLIT4; DOLIT4; DOLIT4; DOLIT4
+
+/* ========================================================================= */
+local unsigned long crc32_little(crc, buf, len)
+ unsigned long crc;
+ const unsigned char FAR *buf;
+ z_size_t len;
+{
+ register z_crc_t c;
+ register const z_crc_t FAR *buf4;
+
+ c = (z_crc_t)crc;
+ c = ~c;
+ while (len && ((ptrdiff_t)buf & 3)) {
+ c = crc_table[0][(c ^ *buf++) & 0xff] ^ (c >> 8);
+ len--;
+ }
+
+ buf4 = (const z_crc_t FAR *)(const void FAR *)buf;
+ while (len >= 32) {
+ DOLIT32;
+ len -= 32;
+ }
+ while (len >= 4) {
+ DOLIT4;
+ len -= 4;
+ }
+ buf = (const unsigned char FAR *)buf4;
+
+ if (len) do {
+ c = crc_table[0][(c ^ *buf++) & 0xff] ^ (c >> 8);
+ } while (--len);
+ c = ~c;
+ return (unsigned long)c;
+}
+
+/* ========================================================================= */
+#define DOBIG4 c ^= *buf4++; \
+ c = crc_table[4][c & 0xff] ^ crc_table[5][(c >> 8) & 0xff] ^ \
+ crc_table[6][(c >> 16) & 0xff] ^ crc_table[7][c >> 24]
+#define DOBIG32 DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4
+
+/* ========================================================================= */
+local unsigned long crc32_big(crc, buf, len)
+ unsigned long crc;
+ const unsigned char FAR *buf;
+ z_size_t len;
+{
+ register z_crc_t c;
+ register const z_crc_t FAR *buf4;
+
+ c = ZSWAP32((z_crc_t)crc);
+ c = ~c;
+ while (len && ((ptrdiff_t)buf & 3)) {
+ c = crc_table[4][(c >> 24) ^ *buf++] ^ (c << 8);
+ len--;
+ }
+
+ buf4 = (const z_crc_t FAR *)(const void FAR *)buf;
+ while (len >= 32) {
+ DOBIG32;
+ len -= 32;
+ }
+ while (len >= 4) {
+ DOBIG4;
+ len -= 4;
+ }
+ buf = (const unsigned char FAR *)buf4;
+
+ if (len) do {
+ c = crc_table[4][(c >> 24) ^ *buf++] ^ (c << 8);
+ } while (--len);
+ c = ~c;
+ return (unsigned long)(ZSWAP32(c));
+}
+
+#endif /* BYFOUR */
+
+#define GF2_DIM 32 /* dimension of GF(2) vectors (length of CRC) */
+
+/* ========================================================================= */
+local unsigned long gf2_matrix_times(mat, vec)
+ unsigned long *mat;
+ unsigned long vec;
+{
+ unsigned long sum;
+
+ sum = 0;
+ while (vec) {
+ if (vec & 1)
+ sum ^= *mat;
+ vec >>= 1;
+ mat++;
+ }
+ return sum;
+}
+
+/* ========================================================================= */
+local void gf2_matrix_square(square, mat)
+ unsigned long *square;
+ unsigned long *mat;
+{
+ int n;
+
+ for (n = 0; n < GF2_DIM; n++)
+ square[n] = gf2_matrix_times(mat, mat[n]);
+}
+
+/* ========================================================================= */
+local uLong crc32_combine_(crc1, crc2, len2)
+ uLong crc1;
+ uLong crc2;
+ z_off64_t len2;
+{
+ int n;
+ unsigned long row;
+ unsigned long even[GF2_DIM]; /* even-power-of-two zeros operator */
+ unsigned long odd[GF2_DIM]; /* odd-power-of-two zeros operator */
+
+ /* degenerate case (also disallow negative lengths) */
+ if (len2 <= 0)
+ return crc1;
+
+ /* put operator for one zero bit in odd */
+ odd[0] = 0xedb88320UL; /* CRC-32 polynomial */
+ row = 1;
+ for (n = 1; n < GF2_DIM; n++) {
+ odd[n] = row;
+ row <<= 1;
+ }
+
+ /* put operator for two zero bits in even */
+ gf2_matrix_square(even, odd);
+
+ /* put operator for four zero bits in odd */
+ gf2_matrix_square(odd, even);
+
+ /* apply len2 zeros to crc1 (first square will put the operator for one
+ zero byte, eight zero bits, in even) */
+ do {
+ /* apply zeros operator for this bit of len2 */
+ gf2_matrix_square(even, odd);
+ if (len2 & 1)
+ crc1 = gf2_matrix_times(even, crc1);
+ len2 >>= 1;
+
+ /* if no more bits set, then done */
+ if (len2 == 0)
+ break;
+
+ /* another iteration of the loop with odd and even swapped */
+ gf2_matrix_square(odd, even);
+ if (len2 & 1)
+ crc1 = gf2_matrix_times(odd, crc1);
+ len2 >>= 1;
+
+ /* if no more bits set, then done */
+ } while (len2 != 0);
+
+ /* return combined crc */
+ crc1 ^= crc2;
+ return crc1;
+}
+
+/* ========================================================================= */
+uLong ZEXPORT crc32_combine(crc1, crc2, len2)
+ uLong crc1;
+ uLong crc2;
+ z_off_t len2;
+{
+ return crc32_combine_(crc1, crc2, len2);
+}
+
+uLong ZEXPORT crc32_combine64(crc1, crc2, len2)
+ uLong crc1;
+ uLong crc2;
+ z_off64_t len2;
+{
+ return crc32_combine_(crc1, crc2, len2);
+}
diff --git a/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/crc32.h b/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/crc32.h
new file mode 100644
index 00000000..9e0c7781
--- /dev/null
+++ b/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/crc32.h
@@ -0,0 +1,441 @@
+/* crc32.h -- tables for rapid CRC calculation
+ * Generated automatically by crc32.c
+ */
+
+local const z_crc_t FAR crc_table[TBLS][256] =
+{
+ {
+ 0x00000000UL, 0x77073096UL, 0xee0e612cUL, 0x990951baUL, 0x076dc419UL,
+ 0x706af48fUL, 0xe963a535UL, 0x9e6495a3UL, 0x0edb8832UL, 0x79dcb8a4UL,
+ 0xe0d5e91eUL, 0x97d2d988UL, 0x09b64c2bUL, 0x7eb17cbdUL, 0xe7b82d07UL,
+ 0x90bf1d91UL, 0x1db71064UL, 0x6ab020f2UL, 0xf3b97148UL, 0x84be41deUL,
+ 0x1adad47dUL, 0x6ddde4ebUL, 0xf4d4b551UL, 0x83d385c7UL, 0x136c9856UL,
+ 0x646ba8c0UL, 0xfd62f97aUL, 0x8a65c9ecUL, 0x14015c4fUL, 0x63066cd9UL,
+ 0xfa0f3d63UL, 0x8d080df5UL, 0x3b6e20c8UL, 0x4c69105eUL, 0xd56041e4UL,
+ 0xa2677172UL, 0x3c03e4d1UL, 0x4b04d447UL, 0xd20d85fdUL, 0xa50ab56bUL,
+ 0x35b5a8faUL, 0x42b2986cUL, 0xdbbbc9d6UL, 0xacbcf940UL, 0x32d86ce3UL,
+ 0x45df5c75UL, 0xdcd60dcfUL, 0xabd13d59UL, 0x26d930acUL, 0x51de003aUL,
+ 0xc8d75180UL, 0xbfd06116UL, 0x21b4f4b5UL, 0x56b3c423UL, 0xcfba9599UL,
+ 0xb8bda50fUL, 0x2802b89eUL, 0x5f058808UL, 0xc60cd9b2UL, 0xb10be924UL,
+ 0x2f6f7c87UL, 0x58684c11UL, 0xc1611dabUL, 0xb6662d3dUL, 0x76dc4190UL,
+ 0x01db7106UL, 0x98d220bcUL, 0xefd5102aUL, 0x71b18589UL, 0x06b6b51fUL,
+ 0x9fbfe4a5UL, 0xe8b8d433UL, 0x7807c9a2UL, 0x0f00f934UL, 0x9609a88eUL,
+ 0xe10e9818UL, 0x7f6a0dbbUL, 0x086d3d2dUL, 0x91646c97UL, 0xe6635c01UL,
+ 0x6b6b51f4UL, 0x1c6c6162UL, 0x856530d8UL, 0xf262004eUL, 0x6c0695edUL,
+ 0x1b01a57bUL, 0x8208f4c1UL, 0xf50fc457UL, 0x65b0d9c6UL, 0x12b7e950UL,
+ 0x8bbeb8eaUL, 0xfcb9887cUL, 0x62dd1ddfUL, 0x15da2d49UL, 0x8cd37cf3UL,
+ 0xfbd44c65UL, 0x4db26158UL, 0x3ab551ceUL, 0xa3bc0074UL, 0xd4bb30e2UL,
+ 0x4adfa541UL, 0x3dd895d7UL, 0xa4d1c46dUL, 0xd3d6f4fbUL, 0x4369e96aUL,
+ 0x346ed9fcUL, 0xad678846UL, 0xda60b8d0UL, 0x44042d73UL, 0x33031de5UL,
+ 0xaa0a4c5fUL, 0xdd0d7cc9UL, 0x5005713cUL, 0x270241aaUL, 0xbe0b1010UL,
+ 0xc90c2086UL, 0x5768b525UL, 0x206f85b3UL, 0xb966d409UL, 0xce61e49fUL,
+ 0x5edef90eUL, 0x29d9c998UL, 0xb0d09822UL, 0xc7d7a8b4UL, 0x59b33d17UL,
+ 0x2eb40d81UL, 0xb7bd5c3bUL, 0xc0ba6cadUL, 0xedb88320UL, 0x9abfb3b6UL,
+ 0x03b6e20cUL, 0x74b1d29aUL, 0xead54739UL, 0x9dd277afUL, 0x04db2615UL,
+ 0x73dc1683UL, 0xe3630b12UL, 0x94643b84UL, 0x0d6d6a3eUL, 0x7a6a5aa8UL,
+ 0xe40ecf0bUL, 0x9309ff9dUL, 0x0a00ae27UL, 0x7d079eb1UL, 0xf00f9344UL,
+ 0x8708a3d2UL, 0x1e01f268UL, 0x6906c2feUL, 0xf762575dUL, 0x806567cbUL,
+ 0x196c3671UL, 0x6e6b06e7UL, 0xfed41b76UL, 0x89d32be0UL, 0x10da7a5aUL,
+ 0x67dd4accUL, 0xf9b9df6fUL, 0x8ebeeff9UL, 0x17b7be43UL, 0x60b08ed5UL,
+ 0xd6d6a3e8UL, 0xa1d1937eUL, 0x38d8c2c4UL, 0x4fdff252UL, 0xd1bb67f1UL,
+ 0xa6bc5767UL, 0x3fb506ddUL, 0x48b2364bUL, 0xd80d2bdaUL, 0xaf0a1b4cUL,
+ 0x36034af6UL, 0x41047a60UL, 0xdf60efc3UL, 0xa867df55UL, 0x316e8eefUL,
+ 0x4669be79UL, 0xcb61b38cUL, 0xbc66831aUL, 0x256fd2a0UL, 0x5268e236UL,
+ 0xcc0c7795UL, 0xbb0b4703UL, 0x220216b9UL, 0x5505262fUL, 0xc5ba3bbeUL,
+ 0xb2bd0b28UL, 0x2bb45a92UL, 0x5cb36a04UL, 0xc2d7ffa7UL, 0xb5d0cf31UL,
+ 0x2cd99e8bUL, 0x5bdeae1dUL, 0x9b64c2b0UL, 0xec63f226UL, 0x756aa39cUL,
+ 0x026d930aUL, 0x9c0906a9UL, 0xeb0e363fUL, 0x72076785UL, 0x05005713UL,
+ 0x95bf4a82UL, 0xe2b87a14UL, 0x7bb12baeUL, 0x0cb61b38UL, 0x92d28e9bUL,
+ 0xe5d5be0dUL, 0x7cdcefb7UL, 0x0bdbdf21UL, 0x86d3d2d4UL, 0xf1d4e242UL,
+ 0x68ddb3f8UL, 0x1fda836eUL, 0x81be16cdUL, 0xf6b9265bUL, 0x6fb077e1UL,
+ 0x18b74777UL, 0x88085ae6UL, 0xff0f6a70UL, 0x66063bcaUL, 0x11010b5cUL,
+ 0x8f659effUL, 0xf862ae69UL, 0x616bffd3UL, 0x166ccf45UL, 0xa00ae278UL,
+ 0xd70dd2eeUL, 0x4e048354UL, 0x3903b3c2UL, 0xa7672661UL, 0xd06016f7UL,
+ 0x4969474dUL, 0x3e6e77dbUL, 0xaed16a4aUL, 0xd9d65adcUL, 0x40df0b66UL,
+ 0x37d83bf0UL, 0xa9bcae53UL, 0xdebb9ec5UL, 0x47b2cf7fUL, 0x30b5ffe9UL,
+ 0xbdbdf21cUL, 0xcabac28aUL, 0x53b39330UL, 0x24b4a3a6UL, 0xbad03605UL,
+ 0xcdd70693UL, 0x54de5729UL, 0x23d967bfUL, 0xb3667a2eUL, 0xc4614ab8UL,
+ 0x5d681b02UL, 0x2a6f2b94UL, 0xb40bbe37UL, 0xc30c8ea1UL, 0x5a05df1bUL,
+ 0x2d02ef8dUL
+#ifdef BYFOUR
+ },
+ {
+ 0x00000000UL, 0x191b3141UL, 0x32366282UL, 0x2b2d53c3UL, 0x646cc504UL,
+ 0x7d77f445UL, 0x565aa786UL, 0x4f4196c7UL, 0xc8d98a08UL, 0xd1c2bb49UL,
+ 0xfaefe88aUL, 0xe3f4d9cbUL, 0xacb54f0cUL, 0xb5ae7e4dUL, 0x9e832d8eUL,
+ 0x87981ccfUL, 0x4ac21251UL, 0x53d92310UL, 0x78f470d3UL, 0x61ef4192UL,
+ 0x2eaed755UL, 0x37b5e614UL, 0x1c98b5d7UL, 0x05838496UL, 0x821b9859UL,
+ 0x9b00a918UL, 0xb02dfadbUL, 0xa936cb9aUL, 0xe6775d5dUL, 0xff6c6c1cUL,
+ 0xd4413fdfUL, 0xcd5a0e9eUL, 0x958424a2UL, 0x8c9f15e3UL, 0xa7b24620UL,
+ 0xbea97761UL, 0xf1e8e1a6UL, 0xe8f3d0e7UL, 0xc3de8324UL, 0xdac5b265UL,
+ 0x5d5daeaaUL, 0x44469febUL, 0x6f6bcc28UL, 0x7670fd69UL, 0x39316baeUL,
+ 0x202a5aefUL, 0x0b07092cUL, 0x121c386dUL, 0xdf4636f3UL, 0xc65d07b2UL,
+ 0xed705471UL, 0xf46b6530UL, 0xbb2af3f7UL, 0xa231c2b6UL, 0x891c9175UL,
+ 0x9007a034UL, 0x179fbcfbUL, 0x0e848dbaUL, 0x25a9de79UL, 0x3cb2ef38UL,
+ 0x73f379ffUL, 0x6ae848beUL, 0x41c51b7dUL, 0x58de2a3cUL, 0xf0794f05UL,
+ 0xe9627e44UL, 0xc24f2d87UL, 0xdb541cc6UL, 0x94158a01UL, 0x8d0ebb40UL,
+ 0xa623e883UL, 0xbf38d9c2UL, 0x38a0c50dUL, 0x21bbf44cUL, 0x0a96a78fUL,
+ 0x138d96ceUL, 0x5ccc0009UL, 0x45d73148UL, 0x6efa628bUL, 0x77e153caUL,
+ 0xbabb5d54UL, 0xa3a06c15UL, 0x888d3fd6UL, 0x91960e97UL, 0xded79850UL,
+ 0xc7cca911UL, 0xece1fad2UL, 0xf5facb93UL, 0x7262d75cUL, 0x6b79e61dUL,
+ 0x4054b5deUL, 0x594f849fUL, 0x160e1258UL, 0x0f152319UL, 0x243870daUL,
+ 0x3d23419bUL, 0x65fd6ba7UL, 0x7ce65ae6UL, 0x57cb0925UL, 0x4ed03864UL,
+ 0x0191aea3UL, 0x188a9fe2UL, 0x33a7cc21UL, 0x2abcfd60UL, 0xad24e1afUL,
+ 0xb43fd0eeUL, 0x9f12832dUL, 0x8609b26cUL, 0xc94824abUL, 0xd05315eaUL,
+ 0xfb7e4629UL, 0xe2657768UL, 0x2f3f79f6UL, 0x362448b7UL, 0x1d091b74UL,
+ 0x04122a35UL, 0x4b53bcf2UL, 0x52488db3UL, 0x7965de70UL, 0x607eef31UL,
+ 0xe7e6f3feUL, 0xfefdc2bfUL, 0xd5d0917cUL, 0xcccba03dUL, 0x838a36faUL,
+ 0x9a9107bbUL, 0xb1bc5478UL, 0xa8a76539UL, 0x3b83984bUL, 0x2298a90aUL,
+ 0x09b5fac9UL, 0x10aecb88UL, 0x5fef5d4fUL, 0x46f46c0eUL, 0x6dd93fcdUL,
+ 0x74c20e8cUL, 0xf35a1243UL, 0xea412302UL, 0xc16c70c1UL, 0xd8774180UL,
+ 0x9736d747UL, 0x8e2de606UL, 0xa500b5c5UL, 0xbc1b8484UL, 0x71418a1aUL,
+ 0x685abb5bUL, 0x4377e898UL, 0x5a6cd9d9UL, 0x152d4f1eUL, 0x0c367e5fUL,
+ 0x271b2d9cUL, 0x3e001cddUL, 0xb9980012UL, 0xa0833153UL, 0x8bae6290UL,
+ 0x92b553d1UL, 0xddf4c516UL, 0xc4eff457UL, 0xefc2a794UL, 0xf6d996d5UL,
+ 0xae07bce9UL, 0xb71c8da8UL, 0x9c31de6bUL, 0x852aef2aUL, 0xca6b79edUL,
+ 0xd37048acUL, 0xf85d1b6fUL, 0xe1462a2eUL, 0x66de36e1UL, 0x7fc507a0UL,
+ 0x54e85463UL, 0x4df36522UL, 0x02b2f3e5UL, 0x1ba9c2a4UL, 0x30849167UL,
+ 0x299fa026UL, 0xe4c5aeb8UL, 0xfdde9ff9UL, 0xd6f3cc3aUL, 0xcfe8fd7bUL,
+ 0x80a96bbcUL, 0x99b25afdUL, 0xb29f093eUL, 0xab84387fUL, 0x2c1c24b0UL,
+ 0x350715f1UL, 0x1e2a4632UL, 0x07317773UL, 0x4870e1b4UL, 0x516bd0f5UL,
+ 0x7a468336UL, 0x635db277UL, 0xcbfad74eUL, 0xd2e1e60fUL, 0xf9ccb5ccUL,
+ 0xe0d7848dUL, 0xaf96124aUL, 0xb68d230bUL, 0x9da070c8UL, 0x84bb4189UL,
+ 0x03235d46UL, 0x1a386c07UL, 0x31153fc4UL, 0x280e0e85UL, 0x674f9842UL,
+ 0x7e54a903UL, 0x5579fac0UL, 0x4c62cb81UL, 0x8138c51fUL, 0x9823f45eUL,
+ 0xb30ea79dUL, 0xaa1596dcUL, 0xe554001bUL, 0xfc4f315aUL, 0xd7626299UL,
+ 0xce7953d8UL, 0x49e14f17UL, 0x50fa7e56UL, 0x7bd72d95UL, 0x62cc1cd4UL,
+ 0x2d8d8a13UL, 0x3496bb52UL, 0x1fbbe891UL, 0x06a0d9d0UL, 0x5e7ef3ecUL,
+ 0x4765c2adUL, 0x6c48916eUL, 0x7553a02fUL, 0x3a1236e8UL, 0x230907a9UL,
+ 0x0824546aUL, 0x113f652bUL, 0x96a779e4UL, 0x8fbc48a5UL, 0xa4911b66UL,
+ 0xbd8a2a27UL, 0xf2cbbce0UL, 0xebd08da1UL, 0xc0fdde62UL, 0xd9e6ef23UL,
+ 0x14bce1bdUL, 0x0da7d0fcUL, 0x268a833fUL, 0x3f91b27eUL, 0x70d024b9UL,
+ 0x69cb15f8UL, 0x42e6463bUL, 0x5bfd777aUL, 0xdc656bb5UL, 0xc57e5af4UL,
+ 0xee530937UL, 0xf7483876UL, 0xb809aeb1UL, 0xa1129ff0UL, 0x8a3fcc33UL,
+ 0x9324fd72UL
+ },
+ {
+ 0x00000000UL, 0x01c26a37UL, 0x0384d46eUL, 0x0246be59UL, 0x0709a8dcUL,
+ 0x06cbc2ebUL, 0x048d7cb2UL, 0x054f1685UL, 0x0e1351b8UL, 0x0fd13b8fUL,
+ 0x0d9785d6UL, 0x0c55efe1UL, 0x091af964UL, 0x08d89353UL, 0x0a9e2d0aUL,
+ 0x0b5c473dUL, 0x1c26a370UL, 0x1de4c947UL, 0x1fa2771eUL, 0x1e601d29UL,
+ 0x1b2f0bacUL, 0x1aed619bUL, 0x18abdfc2UL, 0x1969b5f5UL, 0x1235f2c8UL,
+ 0x13f798ffUL, 0x11b126a6UL, 0x10734c91UL, 0x153c5a14UL, 0x14fe3023UL,
+ 0x16b88e7aUL, 0x177ae44dUL, 0x384d46e0UL, 0x398f2cd7UL, 0x3bc9928eUL,
+ 0x3a0bf8b9UL, 0x3f44ee3cUL, 0x3e86840bUL, 0x3cc03a52UL, 0x3d025065UL,
+ 0x365e1758UL, 0x379c7d6fUL, 0x35dac336UL, 0x3418a901UL, 0x3157bf84UL,
+ 0x3095d5b3UL, 0x32d36beaUL, 0x331101ddUL, 0x246be590UL, 0x25a98fa7UL,
+ 0x27ef31feUL, 0x262d5bc9UL, 0x23624d4cUL, 0x22a0277bUL, 0x20e69922UL,
+ 0x2124f315UL, 0x2a78b428UL, 0x2bbade1fUL, 0x29fc6046UL, 0x283e0a71UL,
+ 0x2d711cf4UL, 0x2cb376c3UL, 0x2ef5c89aUL, 0x2f37a2adUL, 0x709a8dc0UL,
+ 0x7158e7f7UL, 0x731e59aeUL, 0x72dc3399UL, 0x7793251cUL, 0x76514f2bUL,
+ 0x7417f172UL, 0x75d59b45UL, 0x7e89dc78UL, 0x7f4bb64fUL, 0x7d0d0816UL,
+ 0x7ccf6221UL, 0x798074a4UL, 0x78421e93UL, 0x7a04a0caUL, 0x7bc6cafdUL,
+ 0x6cbc2eb0UL, 0x6d7e4487UL, 0x6f38fadeUL, 0x6efa90e9UL, 0x6bb5866cUL,
+ 0x6a77ec5bUL, 0x68315202UL, 0x69f33835UL, 0x62af7f08UL, 0x636d153fUL,
+ 0x612bab66UL, 0x60e9c151UL, 0x65a6d7d4UL, 0x6464bde3UL, 0x662203baUL,
+ 0x67e0698dUL, 0x48d7cb20UL, 0x4915a117UL, 0x4b531f4eUL, 0x4a917579UL,
+ 0x4fde63fcUL, 0x4e1c09cbUL, 0x4c5ab792UL, 0x4d98dda5UL, 0x46c49a98UL,
+ 0x4706f0afUL, 0x45404ef6UL, 0x448224c1UL, 0x41cd3244UL, 0x400f5873UL,
+ 0x4249e62aUL, 0x438b8c1dUL, 0x54f16850UL, 0x55330267UL, 0x5775bc3eUL,
+ 0x56b7d609UL, 0x53f8c08cUL, 0x523aaabbUL, 0x507c14e2UL, 0x51be7ed5UL,
+ 0x5ae239e8UL, 0x5b2053dfUL, 0x5966ed86UL, 0x58a487b1UL, 0x5deb9134UL,
+ 0x5c29fb03UL, 0x5e6f455aUL, 0x5fad2f6dUL, 0xe1351b80UL, 0xe0f771b7UL,
+ 0xe2b1cfeeUL, 0xe373a5d9UL, 0xe63cb35cUL, 0xe7fed96bUL, 0xe5b86732UL,
+ 0xe47a0d05UL, 0xef264a38UL, 0xeee4200fUL, 0xeca29e56UL, 0xed60f461UL,
+ 0xe82fe2e4UL, 0xe9ed88d3UL, 0xebab368aUL, 0xea695cbdUL, 0xfd13b8f0UL,
+ 0xfcd1d2c7UL, 0xfe976c9eUL, 0xff5506a9UL, 0xfa1a102cUL, 0xfbd87a1bUL,
+ 0xf99ec442UL, 0xf85cae75UL, 0xf300e948UL, 0xf2c2837fUL, 0xf0843d26UL,
+ 0xf1465711UL, 0xf4094194UL, 0xf5cb2ba3UL, 0xf78d95faUL, 0xf64fffcdUL,
+ 0xd9785d60UL, 0xd8ba3757UL, 0xdafc890eUL, 0xdb3ee339UL, 0xde71f5bcUL,
+ 0xdfb39f8bUL, 0xddf521d2UL, 0xdc374be5UL, 0xd76b0cd8UL, 0xd6a966efUL,
+ 0xd4efd8b6UL, 0xd52db281UL, 0xd062a404UL, 0xd1a0ce33UL, 0xd3e6706aUL,
+ 0xd2241a5dUL, 0xc55efe10UL, 0xc49c9427UL, 0xc6da2a7eUL, 0xc7184049UL,
+ 0xc25756ccUL, 0xc3953cfbUL, 0xc1d382a2UL, 0xc011e895UL, 0xcb4dafa8UL,
+ 0xca8fc59fUL, 0xc8c97bc6UL, 0xc90b11f1UL, 0xcc440774UL, 0xcd866d43UL,
+ 0xcfc0d31aUL, 0xce02b92dUL, 0x91af9640UL, 0x906dfc77UL, 0x922b422eUL,
+ 0x93e92819UL, 0x96a63e9cUL, 0x976454abUL, 0x9522eaf2UL, 0x94e080c5UL,
+ 0x9fbcc7f8UL, 0x9e7eadcfUL, 0x9c381396UL, 0x9dfa79a1UL, 0x98b56f24UL,
+ 0x99770513UL, 0x9b31bb4aUL, 0x9af3d17dUL, 0x8d893530UL, 0x8c4b5f07UL,
+ 0x8e0de15eUL, 0x8fcf8b69UL, 0x8a809decUL, 0x8b42f7dbUL, 0x89044982UL,
+ 0x88c623b5UL, 0x839a6488UL, 0x82580ebfUL, 0x801eb0e6UL, 0x81dcdad1UL,
+ 0x8493cc54UL, 0x8551a663UL, 0x8717183aUL, 0x86d5720dUL, 0xa9e2d0a0UL,
+ 0xa820ba97UL, 0xaa6604ceUL, 0xaba46ef9UL, 0xaeeb787cUL, 0xaf29124bUL,
+ 0xad6fac12UL, 0xacadc625UL, 0xa7f18118UL, 0xa633eb2fUL, 0xa4755576UL,
+ 0xa5b73f41UL, 0xa0f829c4UL, 0xa13a43f3UL, 0xa37cfdaaUL, 0xa2be979dUL,
+ 0xb5c473d0UL, 0xb40619e7UL, 0xb640a7beUL, 0xb782cd89UL, 0xb2cddb0cUL,
+ 0xb30fb13bUL, 0xb1490f62UL, 0xb08b6555UL, 0xbbd72268UL, 0xba15485fUL,
+ 0xb853f606UL, 0xb9919c31UL, 0xbcde8ab4UL, 0xbd1ce083UL, 0xbf5a5edaUL,
+ 0xbe9834edUL
+ },
+ {
+ 0x00000000UL, 0xb8bc6765UL, 0xaa09c88bUL, 0x12b5afeeUL, 0x8f629757UL,
+ 0x37def032UL, 0x256b5fdcUL, 0x9dd738b9UL, 0xc5b428efUL, 0x7d084f8aUL,
+ 0x6fbde064UL, 0xd7018701UL, 0x4ad6bfb8UL, 0xf26ad8ddUL, 0xe0df7733UL,
+ 0x58631056UL, 0x5019579fUL, 0xe8a530faUL, 0xfa109f14UL, 0x42acf871UL,
+ 0xdf7bc0c8UL, 0x67c7a7adUL, 0x75720843UL, 0xcdce6f26UL, 0x95ad7f70UL,
+ 0x2d111815UL, 0x3fa4b7fbUL, 0x8718d09eUL, 0x1acfe827UL, 0xa2738f42UL,
+ 0xb0c620acUL, 0x087a47c9UL, 0xa032af3eUL, 0x188ec85bUL, 0x0a3b67b5UL,
+ 0xb28700d0UL, 0x2f503869UL, 0x97ec5f0cUL, 0x8559f0e2UL, 0x3de59787UL,
+ 0x658687d1UL, 0xdd3ae0b4UL, 0xcf8f4f5aUL, 0x7733283fUL, 0xeae41086UL,
+ 0x525877e3UL, 0x40edd80dUL, 0xf851bf68UL, 0xf02bf8a1UL, 0x48979fc4UL,
+ 0x5a22302aUL, 0xe29e574fUL, 0x7f496ff6UL, 0xc7f50893UL, 0xd540a77dUL,
+ 0x6dfcc018UL, 0x359fd04eUL, 0x8d23b72bUL, 0x9f9618c5UL, 0x272a7fa0UL,
+ 0xbafd4719UL, 0x0241207cUL, 0x10f48f92UL, 0xa848e8f7UL, 0x9b14583dUL,
+ 0x23a83f58UL, 0x311d90b6UL, 0x89a1f7d3UL, 0x1476cf6aUL, 0xaccaa80fUL,
+ 0xbe7f07e1UL, 0x06c36084UL, 0x5ea070d2UL, 0xe61c17b7UL, 0xf4a9b859UL,
+ 0x4c15df3cUL, 0xd1c2e785UL, 0x697e80e0UL, 0x7bcb2f0eUL, 0xc377486bUL,
+ 0xcb0d0fa2UL, 0x73b168c7UL, 0x6104c729UL, 0xd9b8a04cUL, 0x446f98f5UL,
+ 0xfcd3ff90UL, 0xee66507eUL, 0x56da371bUL, 0x0eb9274dUL, 0xb6054028UL,
+ 0xa4b0efc6UL, 0x1c0c88a3UL, 0x81dbb01aUL, 0x3967d77fUL, 0x2bd27891UL,
+ 0x936e1ff4UL, 0x3b26f703UL, 0x839a9066UL, 0x912f3f88UL, 0x299358edUL,
+ 0xb4446054UL, 0x0cf80731UL, 0x1e4da8dfUL, 0xa6f1cfbaUL, 0xfe92dfecUL,
+ 0x462eb889UL, 0x549b1767UL, 0xec277002UL, 0x71f048bbUL, 0xc94c2fdeUL,
+ 0xdbf98030UL, 0x6345e755UL, 0x6b3fa09cUL, 0xd383c7f9UL, 0xc1366817UL,
+ 0x798a0f72UL, 0xe45d37cbUL, 0x5ce150aeUL, 0x4e54ff40UL, 0xf6e89825UL,
+ 0xae8b8873UL, 0x1637ef16UL, 0x048240f8UL, 0xbc3e279dUL, 0x21e91f24UL,
+ 0x99557841UL, 0x8be0d7afUL, 0x335cb0caUL, 0xed59b63bUL, 0x55e5d15eUL,
+ 0x47507eb0UL, 0xffec19d5UL, 0x623b216cUL, 0xda874609UL, 0xc832e9e7UL,
+ 0x708e8e82UL, 0x28ed9ed4UL, 0x9051f9b1UL, 0x82e4565fUL, 0x3a58313aUL,
+ 0xa78f0983UL, 0x1f336ee6UL, 0x0d86c108UL, 0xb53aa66dUL, 0xbd40e1a4UL,
+ 0x05fc86c1UL, 0x1749292fUL, 0xaff54e4aUL, 0x322276f3UL, 0x8a9e1196UL,
+ 0x982bbe78UL, 0x2097d91dUL, 0x78f4c94bUL, 0xc048ae2eUL, 0xd2fd01c0UL,
+ 0x6a4166a5UL, 0xf7965e1cUL, 0x4f2a3979UL, 0x5d9f9697UL, 0xe523f1f2UL,
+ 0x4d6b1905UL, 0xf5d77e60UL, 0xe762d18eUL, 0x5fdeb6ebUL, 0xc2098e52UL,
+ 0x7ab5e937UL, 0x680046d9UL, 0xd0bc21bcUL, 0x88df31eaUL, 0x3063568fUL,
+ 0x22d6f961UL, 0x9a6a9e04UL, 0x07bda6bdUL, 0xbf01c1d8UL, 0xadb46e36UL,
+ 0x15080953UL, 0x1d724e9aUL, 0xa5ce29ffUL, 0xb77b8611UL, 0x0fc7e174UL,
+ 0x9210d9cdUL, 0x2aacbea8UL, 0x38191146UL, 0x80a57623UL, 0xd8c66675UL,
+ 0x607a0110UL, 0x72cfaefeUL, 0xca73c99bUL, 0x57a4f122UL, 0xef189647UL,
+ 0xfdad39a9UL, 0x45115eccUL, 0x764dee06UL, 0xcef18963UL, 0xdc44268dUL,
+ 0x64f841e8UL, 0xf92f7951UL, 0x41931e34UL, 0x5326b1daUL, 0xeb9ad6bfUL,
+ 0xb3f9c6e9UL, 0x0b45a18cUL, 0x19f00e62UL, 0xa14c6907UL, 0x3c9b51beUL,
+ 0x842736dbUL, 0x96929935UL, 0x2e2efe50UL, 0x2654b999UL, 0x9ee8defcUL,
+ 0x8c5d7112UL, 0x34e11677UL, 0xa9362eceUL, 0x118a49abUL, 0x033fe645UL,
+ 0xbb838120UL, 0xe3e09176UL, 0x5b5cf613UL, 0x49e959fdUL, 0xf1553e98UL,
+ 0x6c820621UL, 0xd43e6144UL, 0xc68bceaaUL, 0x7e37a9cfUL, 0xd67f4138UL,
+ 0x6ec3265dUL, 0x7c7689b3UL, 0xc4caeed6UL, 0x591dd66fUL, 0xe1a1b10aUL,
+ 0xf3141ee4UL, 0x4ba87981UL, 0x13cb69d7UL, 0xab770eb2UL, 0xb9c2a15cUL,
+ 0x017ec639UL, 0x9ca9fe80UL, 0x241599e5UL, 0x36a0360bUL, 0x8e1c516eUL,
+ 0x866616a7UL, 0x3eda71c2UL, 0x2c6fde2cUL, 0x94d3b949UL, 0x090481f0UL,
+ 0xb1b8e695UL, 0xa30d497bUL, 0x1bb12e1eUL, 0x43d23e48UL, 0xfb6e592dUL,
+ 0xe9dbf6c3UL, 0x516791a6UL, 0xccb0a91fUL, 0x740cce7aUL, 0x66b96194UL,
+ 0xde0506f1UL
+ },
+ {
+ 0x00000000UL, 0x96300777UL, 0x2c610eeeUL, 0xba510999UL, 0x19c46d07UL,
+ 0x8ff46a70UL, 0x35a563e9UL, 0xa395649eUL, 0x3288db0eUL, 0xa4b8dc79UL,
+ 0x1ee9d5e0UL, 0x88d9d297UL, 0x2b4cb609UL, 0xbd7cb17eUL, 0x072db8e7UL,
+ 0x911dbf90UL, 0x6410b71dUL, 0xf220b06aUL, 0x4871b9f3UL, 0xde41be84UL,
+ 0x7dd4da1aUL, 0xebe4dd6dUL, 0x51b5d4f4UL, 0xc785d383UL, 0x56986c13UL,
+ 0xc0a86b64UL, 0x7af962fdUL, 0xecc9658aUL, 0x4f5c0114UL, 0xd96c0663UL,
+ 0x633d0ffaUL, 0xf50d088dUL, 0xc8206e3bUL, 0x5e10694cUL, 0xe44160d5UL,
+ 0x727167a2UL, 0xd1e4033cUL, 0x47d4044bUL, 0xfd850dd2UL, 0x6bb50aa5UL,
+ 0xfaa8b535UL, 0x6c98b242UL, 0xd6c9bbdbUL, 0x40f9bcacUL, 0xe36cd832UL,
+ 0x755cdf45UL, 0xcf0dd6dcUL, 0x593dd1abUL, 0xac30d926UL, 0x3a00de51UL,
+ 0x8051d7c8UL, 0x1661d0bfUL, 0xb5f4b421UL, 0x23c4b356UL, 0x9995bacfUL,
+ 0x0fa5bdb8UL, 0x9eb80228UL, 0x0888055fUL, 0xb2d90cc6UL, 0x24e90bb1UL,
+ 0x877c6f2fUL, 0x114c6858UL, 0xab1d61c1UL, 0x3d2d66b6UL, 0x9041dc76UL,
+ 0x0671db01UL, 0xbc20d298UL, 0x2a10d5efUL, 0x8985b171UL, 0x1fb5b606UL,
+ 0xa5e4bf9fUL, 0x33d4b8e8UL, 0xa2c90778UL, 0x34f9000fUL, 0x8ea80996UL,
+ 0x18980ee1UL, 0xbb0d6a7fUL, 0x2d3d6d08UL, 0x976c6491UL, 0x015c63e6UL,
+ 0xf4516b6bUL, 0x62616c1cUL, 0xd8306585UL, 0x4e0062f2UL, 0xed95066cUL,
+ 0x7ba5011bUL, 0xc1f40882UL, 0x57c40ff5UL, 0xc6d9b065UL, 0x50e9b712UL,
+ 0xeab8be8bUL, 0x7c88b9fcUL, 0xdf1ddd62UL, 0x492dda15UL, 0xf37cd38cUL,
+ 0x654cd4fbUL, 0x5861b24dUL, 0xce51b53aUL, 0x7400bca3UL, 0xe230bbd4UL,
+ 0x41a5df4aUL, 0xd795d83dUL, 0x6dc4d1a4UL, 0xfbf4d6d3UL, 0x6ae96943UL,
+ 0xfcd96e34UL, 0x468867adUL, 0xd0b860daUL, 0x732d0444UL, 0xe51d0333UL,
+ 0x5f4c0aaaUL, 0xc97c0dddUL, 0x3c710550UL, 0xaa410227UL, 0x10100bbeUL,
+ 0x86200cc9UL, 0x25b56857UL, 0xb3856f20UL, 0x09d466b9UL, 0x9fe461ceUL,
+ 0x0ef9de5eUL, 0x98c9d929UL, 0x2298d0b0UL, 0xb4a8d7c7UL, 0x173db359UL,
+ 0x810db42eUL, 0x3b5cbdb7UL, 0xad6cbac0UL, 0x2083b8edUL, 0xb6b3bf9aUL,
+ 0x0ce2b603UL, 0x9ad2b174UL, 0x3947d5eaUL, 0xaf77d29dUL, 0x1526db04UL,
+ 0x8316dc73UL, 0x120b63e3UL, 0x843b6494UL, 0x3e6a6d0dUL, 0xa85a6a7aUL,
+ 0x0bcf0ee4UL, 0x9dff0993UL, 0x27ae000aUL, 0xb19e077dUL, 0x44930ff0UL,
+ 0xd2a30887UL, 0x68f2011eUL, 0xfec20669UL, 0x5d5762f7UL, 0xcb676580UL,
+ 0x71366c19UL, 0xe7066b6eUL, 0x761bd4feUL, 0xe02bd389UL, 0x5a7ada10UL,
+ 0xcc4add67UL, 0x6fdfb9f9UL, 0xf9efbe8eUL, 0x43beb717UL, 0xd58eb060UL,
+ 0xe8a3d6d6UL, 0x7e93d1a1UL, 0xc4c2d838UL, 0x52f2df4fUL, 0xf167bbd1UL,
+ 0x6757bca6UL, 0xdd06b53fUL, 0x4b36b248UL, 0xda2b0dd8UL, 0x4c1b0aafUL,
+ 0xf64a0336UL, 0x607a0441UL, 0xc3ef60dfUL, 0x55df67a8UL, 0xef8e6e31UL,
+ 0x79be6946UL, 0x8cb361cbUL, 0x1a8366bcUL, 0xa0d26f25UL, 0x36e26852UL,
+ 0x95770cccUL, 0x03470bbbUL, 0xb9160222UL, 0x2f260555UL, 0xbe3bbac5UL,
+ 0x280bbdb2UL, 0x925ab42bUL, 0x046ab35cUL, 0xa7ffd7c2UL, 0x31cfd0b5UL,
+ 0x8b9ed92cUL, 0x1daede5bUL, 0xb0c2649bUL, 0x26f263ecUL, 0x9ca36a75UL,
+ 0x0a936d02UL, 0xa906099cUL, 0x3f360eebUL, 0x85670772UL, 0x13570005UL,
+ 0x824abf95UL, 0x147ab8e2UL, 0xae2bb17bUL, 0x381bb60cUL, 0x9b8ed292UL,
+ 0x0dbed5e5UL, 0xb7efdc7cUL, 0x21dfdb0bUL, 0xd4d2d386UL, 0x42e2d4f1UL,
+ 0xf8b3dd68UL, 0x6e83da1fUL, 0xcd16be81UL, 0x5b26b9f6UL, 0xe177b06fUL,
+ 0x7747b718UL, 0xe65a0888UL, 0x706a0fffUL, 0xca3b0666UL, 0x5c0b0111UL,
+ 0xff9e658fUL, 0x69ae62f8UL, 0xd3ff6b61UL, 0x45cf6c16UL, 0x78e20aa0UL,
+ 0xeed20dd7UL, 0x5483044eUL, 0xc2b30339UL, 0x612667a7UL, 0xf71660d0UL,
+ 0x4d476949UL, 0xdb776e3eUL, 0x4a6ad1aeUL, 0xdc5ad6d9UL, 0x660bdf40UL,
+ 0xf03bd837UL, 0x53aebca9UL, 0xc59ebbdeUL, 0x7fcfb247UL, 0xe9ffb530UL,
+ 0x1cf2bdbdUL, 0x8ac2bacaUL, 0x3093b353UL, 0xa6a3b424UL, 0x0536d0baUL,
+ 0x9306d7cdUL, 0x2957de54UL, 0xbf67d923UL, 0x2e7a66b3UL, 0xb84a61c4UL,
+ 0x021b685dUL, 0x942b6f2aUL, 0x37be0bb4UL, 0xa18e0cc3UL, 0x1bdf055aUL,
+ 0x8def022dUL
+ },
+ {
+ 0x00000000UL, 0x41311b19UL, 0x82623632UL, 0xc3532d2bUL, 0x04c56c64UL,
+ 0x45f4777dUL, 0x86a75a56UL, 0xc796414fUL, 0x088ad9c8UL, 0x49bbc2d1UL,
+ 0x8ae8effaUL, 0xcbd9f4e3UL, 0x0c4fb5acUL, 0x4d7eaeb5UL, 0x8e2d839eUL,
+ 0xcf1c9887UL, 0x5112c24aUL, 0x1023d953UL, 0xd370f478UL, 0x9241ef61UL,
+ 0x55d7ae2eUL, 0x14e6b537UL, 0xd7b5981cUL, 0x96848305UL, 0x59981b82UL,
+ 0x18a9009bUL, 0xdbfa2db0UL, 0x9acb36a9UL, 0x5d5d77e6UL, 0x1c6c6cffUL,
+ 0xdf3f41d4UL, 0x9e0e5acdUL, 0xa2248495UL, 0xe3159f8cUL, 0x2046b2a7UL,
+ 0x6177a9beUL, 0xa6e1e8f1UL, 0xe7d0f3e8UL, 0x2483dec3UL, 0x65b2c5daUL,
+ 0xaaae5d5dUL, 0xeb9f4644UL, 0x28cc6b6fUL, 0x69fd7076UL, 0xae6b3139UL,
+ 0xef5a2a20UL, 0x2c09070bUL, 0x6d381c12UL, 0xf33646dfUL, 0xb2075dc6UL,
+ 0x715470edUL, 0x30656bf4UL, 0xf7f32abbUL, 0xb6c231a2UL, 0x75911c89UL,
+ 0x34a00790UL, 0xfbbc9f17UL, 0xba8d840eUL, 0x79dea925UL, 0x38efb23cUL,
+ 0xff79f373UL, 0xbe48e86aUL, 0x7d1bc541UL, 0x3c2ade58UL, 0x054f79f0UL,
+ 0x447e62e9UL, 0x872d4fc2UL, 0xc61c54dbUL, 0x018a1594UL, 0x40bb0e8dUL,
+ 0x83e823a6UL, 0xc2d938bfUL, 0x0dc5a038UL, 0x4cf4bb21UL, 0x8fa7960aUL,
+ 0xce968d13UL, 0x0900cc5cUL, 0x4831d745UL, 0x8b62fa6eUL, 0xca53e177UL,
+ 0x545dbbbaUL, 0x156ca0a3UL, 0xd63f8d88UL, 0x970e9691UL, 0x5098d7deUL,
+ 0x11a9ccc7UL, 0xd2fae1ecUL, 0x93cbfaf5UL, 0x5cd76272UL, 0x1de6796bUL,
+ 0xdeb55440UL, 0x9f844f59UL, 0x58120e16UL, 0x1923150fUL, 0xda703824UL,
+ 0x9b41233dUL, 0xa76bfd65UL, 0xe65ae67cUL, 0x2509cb57UL, 0x6438d04eUL,
+ 0xa3ae9101UL, 0xe29f8a18UL, 0x21cca733UL, 0x60fdbc2aUL, 0xafe124adUL,
+ 0xeed03fb4UL, 0x2d83129fUL, 0x6cb20986UL, 0xab2448c9UL, 0xea1553d0UL,
+ 0x29467efbUL, 0x687765e2UL, 0xf6793f2fUL, 0xb7482436UL, 0x741b091dUL,
+ 0x352a1204UL, 0xf2bc534bUL, 0xb38d4852UL, 0x70de6579UL, 0x31ef7e60UL,
+ 0xfef3e6e7UL, 0xbfc2fdfeUL, 0x7c91d0d5UL, 0x3da0cbccUL, 0xfa368a83UL,
+ 0xbb07919aUL, 0x7854bcb1UL, 0x3965a7a8UL, 0x4b98833bUL, 0x0aa99822UL,
+ 0xc9fab509UL, 0x88cbae10UL, 0x4f5def5fUL, 0x0e6cf446UL, 0xcd3fd96dUL,
+ 0x8c0ec274UL, 0x43125af3UL, 0x022341eaUL, 0xc1706cc1UL, 0x804177d8UL,
+ 0x47d73697UL, 0x06e62d8eUL, 0xc5b500a5UL, 0x84841bbcUL, 0x1a8a4171UL,
+ 0x5bbb5a68UL, 0x98e87743UL, 0xd9d96c5aUL, 0x1e4f2d15UL, 0x5f7e360cUL,
+ 0x9c2d1b27UL, 0xdd1c003eUL, 0x120098b9UL, 0x533183a0UL, 0x9062ae8bUL,
+ 0xd153b592UL, 0x16c5f4ddUL, 0x57f4efc4UL, 0x94a7c2efUL, 0xd596d9f6UL,
+ 0xe9bc07aeUL, 0xa88d1cb7UL, 0x6bde319cUL, 0x2aef2a85UL, 0xed796bcaUL,
+ 0xac4870d3UL, 0x6f1b5df8UL, 0x2e2a46e1UL, 0xe136de66UL, 0xa007c57fUL,
+ 0x6354e854UL, 0x2265f34dUL, 0xe5f3b202UL, 0xa4c2a91bUL, 0x67918430UL,
+ 0x26a09f29UL, 0xb8aec5e4UL, 0xf99fdefdUL, 0x3accf3d6UL, 0x7bfde8cfUL,
+ 0xbc6ba980UL, 0xfd5ab299UL, 0x3e099fb2UL, 0x7f3884abUL, 0xb0241c2cUL,
+ 0xf1150735UL, 0x32462a1eUL, 0x73773107UL, 0xb4e17048UL, 0xf5d06b51UL,
+ 0x3683467aUL, 0x77b25d63UL, 0x4ed7facbUL, 0x0fe6e1d2UL, 0xccb5ccf9UL,
+ 0x8d84d7e0UL, 0x4a1296afUL, 0x0b238db6UL, 0xc870a09dUL, 0x8941bb84UL,
+ 0x465d2303UL, 0x076c381aUL, 0xc43f1531UL, 0x850e0e28UL, 0x42984f67UL,
+ 0x03a9547eUL, 0xc0fa7955UL, 0x81cb624cUL, 0x1fc53881UL, 0x5ef42398UL,
+ 0x9da70eb3UL, 0xdc9615aaUL, 0x1b0054e5UL, 0x5a314ffcUL, 0x996262d7UL,
+ 0xd85379ceUL, 0x174fe149UL, 0x567efa50UL, 0x952dd77bUL, 0xd41ccc62UL,
+ 0x138a8d2dUL, 0x52bb9634UL, 0x91e8bb1fUL, 0xd0d9a006UL, 0xecf37e5eUL,
+ 0xadc26547UL, 0x6e91486cUL, 0x2fa05375UL, 0xe836123aUL, 0xa9070923UL,
+ 0x6a542408UL, 0x2b653f11UL, 0xe479a796UL, 0xa548bc8fUL, 0x661b91a4UL,
+ 0x272a8abdUL, 0xe0bccbf2UL, 0xa18dd0ebUL, 0x62defdc0UL, 0x23efe6d9UL,
+ 0xbde1bc14UL, 0xfcd0a70dUL, 0x3f838a26UL, 0x7eb2913fUL, 0xb924d070UL,
+ 0xf815cb69UL, 0x3b46e642UL, 0x7a77fd5bUL, 0xb56b65dcUL, 0xf45a7ec5UL,
+ 0x370953eeUL, 0x763848f7UL, 0xb1ae09b8UL, 0xf09f12a1UL, 0x33cc3f8aUL,
+ 0x72fd2493UL
+ },
+ {
+ 0x00000000UL, 0x376ac201UL, 0x6ed48403UL, 0x59be4602UL, 0xdca80907UL,
+ 0xebc2cb06UL, 0xb27c8d04UL, 0x85164f05UL, 0xb851130eUL, 0x8f3bd10fUL,
+ 0xd685970dUL, 0xe1ef550cUL, 0x64f91a09UL, 0x5393d808UL, 0x0a2d9e0aUL,
+ 0x3d475c0bUL, 0x70a3261cUL, 0x47c9e41dUL, 0x1e77a21fUL, 0x291d601eUL,
+ 0xac0b2f1bUL, 0x9b61ed1aUL, 0xc2dfab18UL, 0xf5b56919UL, 0xc8f23512UL,
+ 0xff98f713UL, 0xa626b111UL, 0x914c7310UL, 0x145a3c15UL, 0x2330fe14UL,
+ 0x7a8eb816UL, 0x4de47a17UL, 0xe0464d38UL, 0xd72c8f39UL, 0x8e92c93bUL,
+ 0xb9f80b3aUL, 0x3cee443fUL, 0x0b84863eUL, 0x523ac03cUL, 0x6550023dUL,
+ 0x58175e36UL, 0x6f7d9c37UL, 0x36c3da35UL, 0x01a91834UL, 0x84bf5731UL,
+ 0xb3d59530UL, 0xea6bd332UL, 0xdd011133UL, 0x90e56b24UL, 0xa78fa925UL,
+ 0xfe31ef27UL, 0xc95b2d26UL, 0x4c4d6223UL, 0x7b27a022UL, 0x2299e620UL,
+ 0x15f32421UL, 0x28b4782aUL, 0x1fdeba2bUL, 0x4660fc29UL, 0x710a3e28UL,
+ 0xf41c712dUL, 0xc376b32cUL, 0x9ac8f52eUL, 0xada2372fUL, 0xc08d9a70UL,
+ 0xf7e75871UL, 0xae591e73UL, 0x9933dc72UL, 0x1c259377UL, 0x2b4f5176UL,
+ 0x72f11774UL, 0x459bd575UL, 0x78dc897eUL, 0x4fb64b7fUL, 0x16080d7dUL,
+ 0x2162cf7cUL, 0xa4748079UL, 0x931e4278UL, 0xcaa0047aUL, 0xfdcac67bUL,
+ 0xb02ebc6cUL, 0x87447e6dUL, 0xdefa386fUL, 0xe990fa6eUL, 0x6c86b56bUL,
+ 0x5bec776aUL, 0x02523168UL, 0x3538f369UL, 0x087faf62UL, 0x3f156d63UL,
+ 0x66ab2b61UL, 0x51c1e960UL, 0xd4d7a665UL, 0xe3bd6464UL, 0xba032266UL,
+ 0x8d69e067UL, 0x20cbd748UL, 0x17a11549UL, 0x4e1f534bUL, 0x7975914aUL,
+ 0xfc63de4fUL, 0xcb091c4eUL, 0x92b75a4cUL, 0xa5dd984dUL, 0x989ac446UL,
+ 0xaff00647UL, 0xf64e4045UL, 0xc1248244UL, 0x4432cd41UL, 0x73580f40UL,
+ 0x2ae64942UL, 0x1d8c8b43UL, 0x5068f154UL, 0x67023355UL, 0x3ebc7557UL,
+ 0x09d6b756UL, 0x8cc0f853UL, 0xbbaa3a52UL, 0xe2147c50UL, 0xd57ebe51UL,
+ 0xe839e25aUL, 0xdf53205bUL, 0x86ed6659UL, 0xb187a458UL, 0x3491eb5dUL,
+ 0x03fb295cUL, 0x5a456f5eUL, 0x6d2fad5fUL, 0x801b35e1UL, 0xb771f7e0UL,
+ 0xeecfb1e2UL, 0xd9a573e3UL, 0x5cb33ce6UL, 0x6bd9fee7UL, 0x3267b8e5UL,
+ 0x050d7ae4UL, 0x384a26efUL, 0x0f20e4eeUL, 0x569ea2ecUL, 0x61f460edUL,
+ 0xe4e22fe8UL, 0xd388ede9UL, 0x8a36abebUL, 0xbd5c69eaUL, 0xf0b813fdUL,
+ 0xc7d2d1fcUL, 0x9e6c97feUL, 0xa90655ffUL, 0x2c101afaUL, 0x1b7ad8fbUL,
+ 0x42c49ef9UL, 0x75ae5cf8UL, 0x48e900f3UL, 0x7f83c2f2UL, 0x263d84f0UL,
+ 0x115746f1UL, 0x944109f4UL, 0xa32bcbf5UL, 0xfa958df7UL, 0xcdff4ff6UL,
+ 0x605d78d9UL, 0x5737bad8UL, 0x0e89fcdaUL, 0x39e33edbUL, 0xbcf571deUL,
+ 0x8b9fb3dfUL, 0xd221f5ddUL, 0xe54b37dcUL, 0xd80c6bd7UL, 0xef66a9d6UL,
+ 0xb6d8efd4UL, 0x81b22dd5UL, 0x04a462d0UL, 0x33cea0d1UL, 0x6a70e6d3UL,
+ 0x5d1a24d2UL, 0x10fe5ec5UL, 0x27949cc4UL, 0x7e2adac6UL, 0x494018c7UL,
+ 0xcc5657c2UL, 0xfb3c95c3UL, 0xa282d3c1UL, 0x95e811c0UL, 0xa8af4dcbUL,
+ 0x9fc58fcaUL, 0xc67bc9c8UL, 0xf1110bc9UL, 0x740744ccUL, 0x436d86cdUL,
+ 0x1ad3c0cfUL, 0x2db902ceUL, 0x4096af91UL, 0x77fc6d90UL, 0x2e422b92UL,
+ 0x1928e993UL, 0x9c3ea696UL, 0xab546497UL, 0xf2ea2295UL, 0xc580e094UL,
+ 0xf8c7bc9fUL, 0xcfad7e9eUL, 0x9613389cUL, 0xa179fa9dUL, 0x246fb598UL,
+ 0x13057799UL, 0x4abb319bUL, 0x7dd1f39aUL, 0x3035898dUL, 0x075f4b8cUL,
+ 0x5ee10d8eUL, 0x698bcf8fUL, 0xec9d808aUL, 0xdbf7428bUL, 0x82490489UL,
+ 0xb523c688UL, 0x88649a83UL, 0xbf0e5882UL, 0xe6b01e80UL, 0xd1dadc81UL,
+ 0x54cc9384UL, 0x63a65185UL, 0x3a181787UL, 0x0d72d586UL, 0xa0d0e2a9UL,
+ 0x97ba20a8UL, 0xce0466aaUL, 0xf96ea4abUL, 0x7c78ebaeUL, 0x4b1229afUL,
+ 0x12ac6fadUL, 0x25c6adacUL, 0x1881f1a7UL, 0x2feb33a6UL, 0x765575a4UL,
+ 0x413fb7a5UL, 0xc429f8a0UL, 0xf3433aa1UL, 0xaafd7ca3UL, 0x9d97bea2UL,
+ 0xd073c4b5UL, 0xe71906b4UL, 0xbea740b6UL, 0x89cd82b7UL, 0x0cdbcdb2UL,
+ 0x3bb10fb3UL, 0x620f49b1UL, 0x55658bb0UL, 0x6822d7bbUL, 0x5f4815baUL,
+ 0x06f653b8UL, 0x319c91b9UL, 0xb48adebcUL, 0x83e01cbdUL, 0xda5e5abfUL,
+ 0xed3498beUL
+ },
+ {
+ 0x00000000UL, 0x6567bcb8UL, 0x8bc809aaUL, 0xeeafb512UL, 0x5797628fUL,
+ 0x32f0de37UL, 0xdc5f6b25UL, 0xb938d79dUL, 0xef28b4c5UL, 0x8a4f087dUL,
+ 0x64e0bd6fUL, 0x018701d7UL, 0xb8bfd64aUL, 0xddd86af2UL, 0x3377dfe0UL,
+ 0x56106358UL, 0x9f571950UL, 0xfa30a5e8UL, 0x149f10faUL, 0x71f8ac42UL,
+ 0xc8c07bdfUL, 0xada7c767UL, 0x43087275UL, 0x266fcecdUL, 0x707fad95UL,
+ 0x1518112dUL, 0xfbb7a43fUL, 0x9ed01887UL, 0x27e8cf1aUL, 0x428f73a2UL,
+ 0xac20c6b0UL, 0xc9477a08UL, 0x3eaf32a0UL, 0x5bc88e18UL, 0xb5673b0aUL,
+ 0xd00087b2UL, 0x6938502fUL, 0x0c5fec97UL, 0xe2f05985UL, 0x8797e53dUL,
+ 0xd1878665UL, 0xb4e03addUL, 0x5a4f8fcfUL, 0x3f283377UL, 0x8610e4eaUL,
+ 0xe3775852UL, 0x0dd8ed40UL, 0x68bf51f8UL, 0xa1f82bf0UL, 0xc49f9748UL,
+ 0x2a30225aUL, 0x4f579ee2UL, 0xf66f497fUL, 0x9308f5c7UL, 0x7da740d5UL,
+ 0x18c0fc6dUL, 0x4ed09f35UL, 0x2bb7238dUL, 0xc518969fUL, 0xa07f2a27UL,
+ 0x1947fdbaUL, 0x7c204102UL, 0x928ff410UL, 0xf7e848a8UL, 0x3d58149bUL,
+ 0x583fa823UL, 0xb6901d31UL, 0xd3f7a189UL, 0x6acf7614UL, 0x0fa8caacUL,
+ 0xe1077fbeUL, 0x8460c306UL, 0xd270a05eUL, 0xb7171ce6UL, 0x59b8a9f4UL,
+ 0x3cdf154cUL, 0x85e7c2d1UL, 0xe0807e69UL, 0x0e2fcb7bUL, 0x6b4877c3UL,
+ 0xa20f0dcbUL, 0xc768b173UL, 0x29c70461UL, 0x4ca0b8d9UL, 0xf5986f44UL,
+ 0x90ffd3fcUL, 0x7e5066eeUL, 0x1b37da56UL, 0x4d27b90eUL, 0x284005b6UL,
+ 0xc6efb0a4UL, 0xa3880c1cUL, 0x1ab0db81UL, 0x7fd76739UL, 0x9178d22bUL,
+ 0xf41f6e93UL, 0x03f7263bUL, 0x66909a83UL, 0x883f2f91UL, 0xed589329UL,
+ 0x546044b4UL, 0x3107f80cUL, 0xdfa84d1eUL, 0xbacff1a6UL, 0xecdf92feUL,
+ 0x89b82e46UL, 0x67179b54UL, 0x027027ecUL, 0xbb48f071UL, 0xde2f4cc9UL,
+ 0x3080f9dbUL, 0x55e74563UL, 0x9ca03f6bUL, 0xf9c783d3UL, 0x176836c1UL,
+ 0x720f8a79UL, 0xcb375de4UL, 0xae50e15cUL, 0x40ff544eUL, 0x2598e8f6UL,
+ 0x73888baeUL, 0x16ef3716UL, 0xf8408204UL, 0x9d273ebcUL, 0x241fe921UL,
+ 0x41785599UL, 0xafd7e08bUL, 0xcab05c33UL, 0x3bb659edUL, 0x5ed1e555UL,
+ 0xb07e5047UL, 0xd519ecffUL, 0x6c213b62UL, 0x094687daUL, 0xe7e932c8UL,
+ 0x828e8e70UL, 0xd49eed28UL, 0xb1f95190UL, 0x5f56e482UL, 0x3a31583aUL,
+ 0x83098fa7UL, 0xe66e331fUL, 0x08c1860dUL, 0x6da63ab5UL, 0xa4e140bdUL,
+ 0xc186fc05UL, 0x2f294917UL, 0x4a4ef5afUL, 0xf3762232UL, 0x96119e8aUL,
+ 0x78be2b98UL, 0x1dd99720UL, 0x4bc9f478UL, 0x2eae48c0UL, 0xc001fdd2UL,
+ 0xa566416aUL, 0x1c5e96f7UL, 0x79392a4fUL, 0x97969f5dUL, 0xf2f123e5UL,
+ 0x05196b4dUL, 0x607ed7f5UL, 0x8ed162e7UL, 0xebb6de5fUL, 0x528e09c2UL,
+ 0x37e9b57aUL, 0xd9460068UL, 0xbc21bcd0UL, 0xea31df88UL, 0x8f566330UL,
+ 0x61f9d622UL, 0x049e6a9aUL, 0xbda6bd07UL, 0xd8c101bfUL, 0x366eb4adUL,
+ 0x53090815UL, 0x9a4e721dUL, 0xff29cea5UL, 0x11867bb7UL, 0x74e1c70fUL,
+ 0xcdd91092UL, 0xa8beac2aUL, 0x46111938UL, 0x2376a580UL, 0x7566c6d8UL,
+ 0x10017a60UL, 0xfeaecf72UL, 0x9bc973caUL, 0x22f1a457UL, 0x479618efUL,
+ 0xa939adfdUL, 0xcc5e1145UL, 0x06ee4d76UL, 0x6389f1ceUL, 0x8d2644dcUL,
+ 0xe841f864UL, 0x51792ff9UL, 0x341e9341UL, 0xdab12653UL, 0xbfd69aebUL,
+ 0xe9c6f9b3UL, 0x8ca1450bUL, 0x620ef019UL, 0x07694ca1UL, 0xbe519b3cUL,
+ 0xdb362784UL, 0x35999296UL, 0x50fe2e2eUL, 0x99b95426UL, 0xfcdee89eUL,
+ 0x12715d8cUL, 0x7716e134UL, 0xce2e36a9UL, 0xab498a11UL, 0x45e63f03UL,
+ 0x208183bbUL, 0x7691e0e3UL, 0x13f65c5bUL, 0xfd59e949UL, 0x983e55f1UL,
+ 0x2106826cUL, 0x44613ed4UL, 0xaace8bc6UL, 0xcfa9377eUL, 0x38417fd6UL,
+ 0x5d26c36eUL, 0xb389767cUL, 0xd6eecac4UL, 0x6fd61d59UL, 0x0ab1a1e1UL,
+ 0xe41e14f3UL, 0x8179a84bUL, 0xd769cb13UL, 0xb20e77abUL, 0x5ca1c2b9UL,
+ 0x39c67e01UL, 0x80fea99cUL, 0xe5991524UL, 0x0b36a036UL, 0x6e511c8eUL,
+ 0xa7166686UL, 0xc271da3eUL, 0x2cde6f2cUL, 0x49b9d394UL, 0xf0810409UL,
+ 0x95e6b8b1UL, 0x7b490da3UL, 0x1e2eb11bUL, 0x483ed243UL, 0x2d596efbUL,
+ 0xc3f6dbe9UL, 0xa6916751UL, 0x1fa9b0ccUL, 0x7ace0c74UL, 0x9461b966UL,
+ 0xf10605deUL
+#endif
+ }
+};
diff --git a/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/deflate.c b/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/deflate.c
new file mode 100644
index 00000000..1ec76144
--- /dev/null
+++ b/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/deflate.c
@@ -0,0 +1,2163 @@
+/* deflate.c -- compress data using the deflation algorithm
+ * Copyright (C) 1995-2017 Jean-loup Gailly and Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/*
+ * ALGORITHM
+ *
+ * The "deflation" process depends on being able to identify portions
+ * of the input text which are identical to earlier input (within a
+ * sliding window trailing behind the input currently being processed).
+ *
+ * The most straightforward technique turns out to be the fastest for
+ * most input files: try all possible matches and select the longest.
+ * The key feature of this algorithm is that insertions into the string
+ * dictionary are very simple and thus fast, and deletions are avoided
+ * completely. Insertions are performed at each input character, whereas
+ * string matches are performed only when the previous match ends. So it
+ * is preferable to spend more time in matches to allow very fast string
+ * insertions and avoid deletions. The matching algorithm for small
+ * strings is inspired from that of Rabin & Karp. A brute force approach
+ * is used to find longer strings when a small match has been found.
+ * A similar algorithm is used in comic (by Jan-Mark Wams) and freeze
+ * (by Leonid Broukhis).
+ * A previous version of this file used a more sophisticated algorithm
+ * (by Fiala and Greene) which is guaranteed to run in linear amortized
+ * time, but has a larger average cost, uses more memory and is patented.
+ * However the F&G algorithm may be faster for some highly redundant
+ * files if the parameter max_chain_length (described below) is too large.
+ *
+ * ACKNOWLEDGEMENTS
+ *
+ * The idea of lazy evaluation of matches is due to Jan-Mark Wams, and
+ * I found it in 'freeze' written by Leonid Broukhis.
+ * Thanks to many people for bug reports and testing.
+ *
+ * REFERENCES
+ *
+ * Deutsch, L.P.,"DEFLATE Compressed Data Format Specification".
+ * Available in http://tools.ietf.org/html/rfc1951
+ *
+ * A description of the Rabin and Karp algorithm is given in the book
+ * "Algorithms" by R. Sedgewick, Addison-Wesley, p252.
+ *
+ * Fiala,E.R., and Greene,D.H.
+ * Data Compression with Finite Windows, Comm.ACM, 32,4 (1989) 490-595
+ *
+ */
+
+/* @(#) $Id$ */
+
+#include "deflate.h"
+
+const char deflate_copyright[] =
+ " deflate 1.2.11 Copyright 1995-2017 Jean-loup Gailly and Mark Adler ";
+/*
+ If you use the zlib library in a product, an acknowledgment is welcome
+ in the documentation of your product. If for some reason you cannot
+ include such an acknowledgment, I would appreciate that you keep this
+ copyright string in the executable of your product.
+ */
+
+/* ===========================================================================
+ * Function prototypes.
+ */
+typedef enum {
+ need_more, /* block not completed, need more input or more output */
+ block_done, /* block flush performed */
+ finish_started, /* finish started, need only more output at next deflate */
+ finish_done /* finish done, accept no more input or output */
+} block_state;
+
+typedef block_state (*compress_func) OF((deflate_state *s, int flush));
+/* Compression function. Returns the block state after the call. */
+
+local int deflateStateCheck OF((z_streamp strm));
+local void slide_hash OF((deflate_state *s));
+local void fill_window OF((deflate_state *s));
+local block_state deflate_stored OF((deflate_state *s, int flush));
+local block_state deflate_fast OF((deflate_state *s, int flush));
+#ifndef FASTEST
+local block_state deflate_slow OF((deflate_state *s, int flush));
+#endif
+local block_state deflate_rle OF((deflate_state *s, int flush));
+local block_state deflate_huff OF((deflate_state *s, int flush));
+local void lm_init OF((deflate_state *s));
+local void putShortMSB OF((deflate_state *s, uInt b));
+local void flush_pending OF((z_streamp strm));
+local unsigned read_buf OF((z_streamp strm, Bytef *buf, unsigned size));
+#ifdef ASMV
+# pragma message("Assembler code may have bugs -- use at your own risk")
+ void match_init OF((void)); /* asm code initialization */
+ uInt longest_match OF((deflate_state *s, IPos cur_match));
+#else
+local uInt longest_match OF((deflate_state *s, IPos cur_match));
+#endif
+
+#ifdef ZLIB_DEBUG
+local void check_match OF((deflate_state *s, IPos start, IPos match,
+ int length));
+#endif
+
+/* ===========================================================================
+ * Local data
+ */
+
+#define NIL 0
+/* Tail of hash chains */
+
+#ifndef TOO_FAR
+# define TOO_FAR 4096
+#endif
+/* Matches of length 3 are discarded if their distance exceeds TOO_FAR */
+
+/* Values for max_lazy_match, good_match and max_chain_length, depending on
+ * the desired pack level (0..9). The values given below have been tuned to
+ * exclude worst case performance for pathological files. Better values may be
+ * found for specific files.
+ */
+typedef struct config_s {
+ ush good_length; /* reduce lazy search above this match length */
+ ush max_lazy; /* do not perform lazy search above this match length */
+ ush nice_length; /* quit search above this match length */
+ ush max_chain;
+ compress_func func;
+} config;
+
+#ifdef FASTEST
+local const config configuration_table[2] = {
+/* good lazy nice chain */
+/* 0 */ {0, 0, 0, 0, deflate_stored}, /* store only */
+/* 1 */ {4, 4, 8, 4, deflate_fast}}; /* max speed, no lazy matches */
+#else
+local const config configuration_table[10] = {
+/* good lazy nice chain */
+/* 0 */ {0, 0, 0, 0, deflate_stored}, /* store only */
+/* 1 */ {4, 4, 8, 4, deflate_fast}, /* max speed, no lazy matches */
+/* 2 */ {4, 5, 16, 8, deflate_fast},
+/* 3 */ {4, 6, 32, 32, deflate_fast},
+
+/* 4 */ {4, 4, 16, 16, deflate_slow}, /* lazy matches */
+/* 5 */ {8, 16, 32, 32, deflate_slow},
+/* 6 */ {8, 16, 128, 128, deflate_slow},
+/* 7 */ {8, 32, 128, 256, deflate_slow},
+/* 8 */ {32, 128, 258, 1024, deflate_slow},
+/* 9 */ {32, 258, 258, 4096, deflate_slow}}; /* max compression */
+#endif
+
+/* Note: the deflate() code requires max_lazy >= MIN_MATCH and max_chain >= 4
+ * For deflate_fast() (levels <= 3) good is ignored and lazy has a different
+ * meaning.
+ */
+
+/* rank Z_BLOCK between Z_NO_FLUSH and Z_PARTIAL_FLUSH */
+#define RANK(f) (((f) * 2) - ((f) > 4 ? 9 : 0))
+
+/* ===========================================================================
+ * Update a hash value with the given input byte
+ * IN assertion: all calls to UPDATE_HASH are made with consecutive input
+ * characters, so that a running hash key can be computed from the previous
+ * key instead of complete recalculation each time.
+ */
+#define UPDATE_HASH(s,h,c) (h = (((h)<<s->hash_shift) ^ (c)) & s->hash_mask)
+
+
+/* ===========================================================================
+ * Insert string str in the dictionary and set match_head to the previous head
+ * of the hash chain (the most recent string with same hash key). Return
+ * the previous length of the hash chain.
+ * If this file is compiled with -DFASTEST, the compression level is forced
+ * to 1, and no hash chains are maintained.
+ * IN assertion: all calls to INSERT_STRING are made with consecutive input
+ * characters and the first MIN_MATCH bytes of str are valid (except for
+ * the last MIN_MATCH-1 bytes of the input file).
+ */
+#ifdef FASTEST
+#define INSERT_STRING(s, str, match_head) \
+ (UPDATE_HASH(s, s->ins_h, s->window[(str) + (MIN_MATCH-1)]), \
+ match_head = s->head[s->ins_h], \
+ s->head[s->ins_h] = (Pos)(str))
+#else
+#define INSERT_STRING(s, str, match_head) \
+ (UPDATE_HASH(s, s->ins_h, s->window[(str) + (MIN_MATCH-1)]), \
+ match_head = s->prev[(str) & s->w_mask] = s->head[s->ins_h], \
+ s->head[s->ins_h] = (Pos)(str))
+#endif
+
+/* ===========================================================================
+ * Initialize the hash table (avoiding 64K overflow for 16 bit systems).
+ * prev[] will be initialized on the fly.
+ */
+#define CLEAR_HASH(s) \
+ s->head[s->hash_size-1] = NIL; \
+ zmemzero((Bytef *)s->head, (unsigned)(s->hash_size-1)*sizeof(*s->head));
+
+/* ===========================================================================
+ * Slide the hash table when sliding the window down (could be avoided with 32
+ * bit values at the expense of memory usage). We slide even when level == 0 to
+ * keep the hash table consistent if we switch back to level > 0 later.
+ */
+local void slide_hash(s)
+ deflate_state *s;
+{
+ unsigned n, m;
+ Posf *p;
+ uInt wsize = s->w_size;
+
+ n = s->hash_size;
+ p = &s->head[n];
+ do {
+ m = *--p;
+ *p = (Pos)(m >= wsize ? m - wsize : NIL);
+ } while (--n);
+ n = wsize;
+#ifndef FASTEST
+ p = &s->prev[n];
+ do {
+ m = *--p;
+ *p = (Pos)(m >= wsize ? m - wsize : NIL);
+ /* If n is not on any hash chain, prev[n] is garbage but
+ * its value will never be used.
+ */
+ } while (--n);
+#endif
+}
+
+/* ========================================================================= */
+int ZEXPORT deflateInit_(strm, level, version, stream_size)
+ z_streamp strm;
+ int level;
+ const char *version;
+ int stream_size;
+{
+ return deflateInit2_(strm, level, Z_DEFLATED, MAX_WBITS, DEF_MEM_LEVEL,
+ Z_DEFAULT_STRATEGY, version, stream_size);
+ /* To do: ignore strm->next_in if we use it as window */
+}
+
+/* ========================================================================= */
+int ZEXPORT deflateInit2_(strm, level, method, windowBits, memLevel, strategy,
+ version, stream_size)
+ z_streamp strm;
+ int level;
+ int method;
+ int windowBits;
+ int memLevel;
+ int strategy;
+ const char *version;
+ int stream_size;
+{
+ deflate_state *s;
+ int wrap = 1;
+ static const char my_version[] = ZLIB_VERSION;
+
+ ushf *overlay;
+ /* We overlay pending_buf and d_buf+l_buf. This works since the average
+ * output size for (length,distance) codes is <= 24 bits.
+ */
+
+ if (version == Z_NULL || version[0] != my_version[0] ||
+ stream_size != sizeof(z_stream)) {
+ return Z_VERSION_ERROR;
+ }
+ if (strm == Z_NULL) return Z_STREAM_ERROR;
+
+ strm->msg = Z_NULL;
+ if (strm->zalloc == (alloc_func)0) {
+#ifdef Z_SOLO
+ return Z_STREAM_ERROR;
+#else
+ strm->zalloc = zcalloc;
+ strm->opaque = (voidpf)0;
+#endif
+ }
+ if (strm->zfree == (free_func)0)
+#ifdef Z_SOLO
+ return Z_STREAM_ERROR;
+#else
+ strm->zfree = zcfree;
+#endif
+
+#ifdef FASTEST
+ if (level != 0) level = 1;
+#else
+ if (level == Z_DEFAULT_COMPRESSION) level = 6;
+#endif
+
+ if (windowBits < 0) { /* suppress zlib wrapper */
+ wrap = 0;
+ windowBits = -windowBits;
+ }
+#ifdef GZIP
+ else if (windowBits > 15) {
+ wrap = 2; /* write gzip wrapper instead */
+ windowBits -= 16;
+ }
+#endif
+ if (memLevel < 1 || memLevel > MAX_MEM_LEVEL || method != Z_DEFLATED ||
+ windowBits < 8 || windowBits > 15 || level < 0 || level > 9 ||
+ strategy < 0 || strategy > Z_FIXED || (windowBits == 8 && wrap != 1)) {
+ return Z_STREAM_ERROR;
+ }
+ if (windowBits == 8) windowBits = 9; /* until 256-byte window bug fixed */
+ s = (deflate_state *) ZALLOC(strm, 1, sizeof(deflate_state));
+ if (s == Z_NULL) return Z_MEM_ERROR;
+ strm->state = (struct internal_state FAR *)s;
+ s->strm = strm;
+ s->status = INIT_STATE; /* to pass state test in deflateReset() */
+
+ s->wrap = wrap;
+ s->gzhead = Z_NULL;
+ s->w_bits = (uInt)windowBits;
+ s->w_size = 1 << s->w_bits;
+ s->w_mask = s->w_size - 1;
+
+ s->hash_bits = (uInt)memLevel + 7;
+ s->hash_size = 1 << s->hash_bits;
+ s->hash_mask = s->hash_size - 1;
+ s->hash_shift = ((s->hash_bits+MIN_MATCH-1)/MIN_MATCH);
+
+ s->window = (Bytef *) ZALLOC(strm, s->w_size, 2*sizeof(Byte));
+ s->prev = (Posf *) ZALLOC(strm, s->w_size, sizeof(Pos));
+ s->head = (Posf *) ZALLOC(strm, s->hash_size, sizeof(Pos));
+
+ s->high_water = 0; /* nothing written to s->window yet */
+
+ s->lit_bufsize = 1 << (memLevel + 6); /* 16K elements by default */
+
+ overlay = (ushf *) ZALLOC(strm, s->lit_bufsize, sizeof(ush)+2);
+ s->pending_buf = (uchf *) overlay;
+ s->pending_buf_size = (ulg)s->lit_bufsize * (sizeof(ush)+2L);
+
+ if (s->window == Z_NULL || s->prev == Z_NULL || s->head == Z_NULL ||
+ s->pending_buf == Z_NULL) {
+ s->status = FINISH_STATE;
+ strm->msg = ERR_MSG(Z_MEM_ERROR);
+ deflateEnd (strm);
+ return Z_MEM_ERROR;
+ }
+ s->d_buf = overlay + s->lit_bufsize/sizeof(ush);
+ s->l_buf = s->pending_buf + (1+sizeof(ush))*s->lit_bufsize;
+
+ s->level = level;
+ s->strategy = strategy;
+ s->method = (Byte)method;
+
+ return deflateReset(strm);
+}
+
+/* =========================================================================
+ * Check for a valid deflate stream state. Return 0 if ok, 1 if not.
+ */
+local int deflateStateCheck (strm)
+ z_streamp strm;
+{
+ deflate_state *s;
+ if (strm == Z_NULL ||
+ strm->zalloc == (alloc_func)0 || strm->zfree == (free_func)0)
+ return 1;
+ s = strm->state;
+ if (s == Z_NULL || s->strm != strm || (s->status != INIT_STATE &&
+#ifdef GZIP
+ s->status != GZIP_STATE &&
+#endif
+ s->status != EXTRA_STATE &&
+ s->status != NAME_STATE &&
+ s->status != COMMENT_STATE &&
+ s->status != HCRC_STATE &&
+ s->status != BUSY_STATE &&
+ s->status != FINISH_STATE))
+ return 1;
+ return 0;
+}
+
+/* ========================================================================= */
+int ZEXPORT deflateSetDictionary (strm, dictionary, dictLength)
+ z_streamp strm;
+ const Bytef *dictionary;
+ uInt dictLength;
+{
+ deflate_state *s;
+ uInt str, n;
+ int wrap;
+ unsigned avail;
+ z_const unsigned char *next;
+
+ if (deflateStateCheck(strm) || dictionary == Z_NULL)
+ return Z_STREAM_ERROR;
+ s = strm->state;
+ wrap = s->wrap;
+ if (wrap == 2 || (wrap == 1 && s->status != INIT_STATE) || s->lookahead)
+ return Z_STREAM_ERROR;
+
+ /* when using zlib wrappers, compute Adler-32 for provided dictionary */
+ if (wrap == 1)
+ strm->adler = adler32(strm->adler, dictionary, dictLength);
+ s->wrap = 0; /* avoid computing Adler-32 in read_buf */
+
+ /* if dictionary would fill window, just replace the history */
+ if (dictLength >= s->w_size) {
+ if (wrap == 0) { /* already empty otherwise */
+ CLEAR_HASH(s);
+ s->strstart = 0;
+ s->block_start = 0L;
+ s->insert = 0;
+ }
+ dictionary += dictLength - s->w_size; /* use the tail */
+ dictLength = s->w_size;
+ }
+
+ /* insert dictionary into window and hash */
+ avail = strm->avail_in;
+ next = strm->next_in;
+ strm->avail_in = dictLength;
+ strm->next_in = (z_const Bytef *)dictionary;
+ fill_window(s);
+ while (s->lookahead >= MIN_MATCH) {
+ str = s->strstart;
+ n = s->lookahead - (MIN_MATCH-1);
+ do {
+ UPDATE_HASH(s, s->ins_h, s->window[str + MIN_MATCH-1]);
+#ifndef FASTEST
+ s->prev[str & s->w_mask] = s->head[s->ins_h];
+#endif
+ s->head[s->ins_h] = (Pos)str;
+ str++;
+ } while (--n);
+ s->strstart = str;
+ s->lookahead = MIN_MATCH-1;
+ fill_window(s);
+ }
+ s->strstart += s->lookahead;
+ s->block_start = (long)s->strstart;
+ s->insert = s->lookahead;
+ s->lookahead = 0;
+ s->match_length = s->prev_length = MIN_MATCH-1;
+ s->match_available = 0;
+ strm->next_in = next;
+ strm->avail_in = avail;
+ s->wrap = wrap;
+ return Z_OK;
+}
+
+/* ========================================================================= */
+int ZEXPORT deflateGetDictionary (strm, dictionary, dictLength)
+ z_streamp strm;
+ Bytef *dictionary;
+ uInt *dictLength;
+{
+ deflate_state *s;
+ uInt len;
+
+ if (deflateStateCheck(strm))
+ return Z_STREAM_ERROR;
+ s = strm->state;
+ len = s->strstart + s->lookahead;
+ if (len > s->w_size)
+ len = s->w_size;
+ if (dictionary != Z_NULL && len)
+ zmemcpy(dictionary, s->window + s->strstart + s->lookahead - len, len);
+ if (dictLength != Z_NULL)
+ *dictLength = len;
+ return Z_OK;
+}
+
+/* ========================================================================= */
+int ZEXPORT deflateResetKeep (strm)
+ z_streamp strm;
+{
+ deflate_state *s;
+
+ if (deflateStateCheck(strm)) {
+ return Z_STREAM_ERROR;
+ }
+
+ strm->total_in = strm->total_out = 0;
+ strm->msg = Z_NULL; /* use zfree if we ever allocate msg dynamically */
+ strm->data_type = Z_UNKNOWN;
+
+ s = (deflate_state *)strm->state;
+ s->pending = 0;
+ s->pending_out = s->pending_buf;
+
+ if (s->wrap < 0) {
+ s->wrap = -s->wrap; /* was made negative by deflate(..., Z_FINISH); */
+ }
+ s->status =
+#ifdef GZIP
+ s->wrap == 2 ? GZIP_STATE :
+#endif
+ s->wrap ? INIT_STATE : BUSY_STATE;
+ strm->adler =
+#ifdef GZIP
+ s->wrap == 2 ? crc32(0L, Z_NULL, 0) :
+#endif
+ adler32(0L, Z_NULL, 0);
+ s->last_flush = Z_NO_FLUSH;
+
+ _tr_init(s);
+
+ return Z_OK;
+}
+
+/* ========================================================================= */
+int ZEXPORT deflateReset (strm)
+ z_streamp strm;
+{
+ int ret;
+
+ ret = deflateResetKeep(strm);
+ if (ret == Z_OK)
+ lm_init(strm->state);
+ return ret;
+}
+
+/* ========================================================================= */
+int ZEXPORT deflateSetHeader (strm, head)
+ z_streamp strm;
+ gz_headerp head;
+{
+ if (deflateStateCheck(strm) || strm->state->wrap != 2)
+ return Z_STREAM_ERROR;
+ strm->state->gzhead = head;
+ return Z_OK;
+}
+
+/* ========================================================================= */
+int ZEXPORT deflatePending (strm, pending, bits)
+ unsigned *pending;
+ int *bits;
+ z_streamp strm;
+{
+ if (deflateStateCheck(strm)) return Z_STREAM_ERROR;
+ if (pending != Z_NULL)
+ *pending = strm->state->pending;
+ if (bits != Z_NULL)
+ *bits = strm->state->bi_valid;
+ return Z_OK;
+}
+
+/* ========================================================================= */
+int ZEXPORT deflatePrime (strm, bits, value)
+ z_streamp strm;
+ int bits;
+ int value;
+{
+ deflate_state *s;
+ int put;
+
+ if (deflateStateCheck(strm)) return Z_STREAM_ERROR;
+ s = strm->state;
+ if ((Bytef *)(s->d_buf) < s->pending_out + ((Buf_size + 7) >> 3))
+ return Z_BUF_ERROR;
+ do {
+ put = Buf_size - s->bi_valid;
+ if (put > bits)
+ put = bits;
+ s->bi_buf |= (ush)((value & ((1 << put) - 1)) << s->bi_valid);
+ s->bi_valid += put;
+ _tr_flush_bits(s);
+ value >>= put;
+ bits -= put;
+ } while (bits);
+ return Z_OK;
+}
+
+/* ========================================================================= */
+int ZEXPORT deflateParams(strm, level, strategy)
+ z_streamp strm;
+ int level;
+ int strategy;
+{
+ deflate_state *s;
+ compress_func func;
+
+ if (deflateStateCheck(strm)) return Z_STREAM_ERROR;
+ s = strm->state;
+
+#ifdef FASTEST
+ if (level != 0) level = 1;
+#else
+ if (level == Z_DEFAULT_COMPRESSION) level = 6;
+#endif
+ if (level < 0 || level > 9 || strategy < 0 || strategy > Z_FIXED) {
+ return Z_STREAM_ERROR;
+ }
+ func = configuration_table[s->level].func;
+
+ if ((strategy != s->strategy || func != configuration_table[level].func) &&
+ s->high_water) {
+ /* Flush the last buffer: */
+ int err = deflate(strm, Z_BLOCK);
+ if (err == Z_STREAM_ERROR)
+ return err;
+ if (strm->avail_out == 0)
+ return Z_BUF_ERROR;
+ }
+ if (s->level != level) {
+ if (s->level == 0 && s->matches != 0) {
+ if (s->matches == 1)
+ slide_hash(s);
+ else
+ CLEAR_HASH(s);
+ s->matches = 0;
+ }
+ s->level = level;
+ s->max_lazy_match = configuration_table[level].max_lazy;
+ s->good_match = configuration_table[level].good_length;
+ s->nice_match = configuration_table[level].nice_length;
+ s->max_chain_length = configuration_table[level].max_chain;
+ }
+ s->strategy = strategy;
+ return Z_OK;
+}
+
+/* ========================================================================= */
+int ZEXPORT deflateTune(strm, good_length, max_lazy, nice_length, max_chain)
+ z_streamp strm;
+ int good_length;
+ int max_lazy;
+ int nice_length;
+ int max_chain;
+{
+ deflate_state *s;
+
+ if (deflateStateCheck(strm)) return Z_STREAM_ERROR;
+ s = strm->state;
+ s->good_match = (uInt)good_length;
+ s->max_lazy_match = (uInt)max_lazy;
+ s->nice_match = nice_length;
+ s->max_chain_length = (uInt)max_chain;
+ return Z_OK;
+}
+
+/* =========================================================================
+ * For the default windowBits of 15 and memLevel of 8, this function returns
+ * a close to exact, as well as small, upper bound on the compressed size.
+ * They are coded as constants here for a reason--if the #define's are
+ * changed, then this function needs to be changed as well. The return
+ * value for 15 and 8 only works for those exact settings.
+ *
+ * For any setting other than those defaults for windowBits and memLevel,
+ * the value returned is a conservative worst case for the maximum expansion
+ * resulting from using fixed blocks instead of stored blocks, which deflate
+ * can emit on compressed data for some combinations of the parameters.
+ *
+ * This function could be more sophisticated to provide closer upper bounds for
+ * every combination of windowBits and memLevel. But even the conservative
+ * upper bound of about 14% expansion does not seem onerous for output buffer
+ * allocation.
+ */
+uLong ZEXPORT deflateBound(strm, sourceLen)
+ z_streamp strm;
+ uLong sourceLen;
+{
+ deflate_state *s;
+ uLong complen, wraplen;
+
+ /* conservative upper bound for compressed data */
+ complen = sourceLen +
+ ((sourceLen + 7) >> 3) + ((sourceLen + 63) >> 6) + 5;
+
+ /* if can't get parameters, return conservative bound plus zlib wrapper */
+ if (deflateStateCheck(strm))
+ return complen + 6;
+
+ /* compute wrapper length */
+ s = strm->state;
+ switch (s->wrap) {
+ case 0: /* raw deflate */
+ wraplen = 0;
+ break;
+ case 1: /* zlib wrapper */
+ wraplen = 6 + (s->strstart ? 4 : 0);
+ break;
+#ifdef GZIP
+ case 2: /* gzip wrapper */
+ wraplen = 18;
+ if (s->gzhead != Z_NULL) { /* user-supplied gzip header */
+ Bytef *str;
+ if (s->gzhead->extra != Z_NULL)
+ wraplen += 2 + s->gzhead->extra_len;
+ str = s->gzhead->name;
+ if (str != Z_NULL)
+ do {
+ wraplen++;
+ } while (*str++);
+ str = s->gzhead->comment;
+ if (str != Z_NULL)
+ do {
+ wraplen++;
+ } while (*str++);
+ if (s->gzhead->hcrc)
+ wraplen += 2;
+ }
+ break;
+#endif
+ default: /* for compiler happiness */
+ wraplen = 6;
+ }
+
+ /* if not default parameters, return conservative bound */
+ if (s->w_bits != 15 || s->hash_bits != 8 + 7)
+ return complen + wraplen;
+
+ /* default settings: return tight bound for that case */
+ return sourceLen + (sourceLen >> 12) + (sourceLen >> 14) +
+ (sourceLen >> 25) + 13 - 6 + wraplen;
+}
+
+/* =========================================================================
+ * Put a short in the pending buffer. The 16-bit value is put in MSB order.
+ * IN assertion: the stream state is correct and there is enough room in
+ * pending_buf.
+ */
+local void putShortMSB (s, b)
+ deflate_state *s;
+ uInt b;
+{
+ put_byte(s, (Byte)(b >> 8));
+ put_byte(s, (Byte)(b & 0xff));
+}
+
+/* =========================================================================
+ * Flush as much pending output as possible. All deflate() output, except for
+ * some deflate_stored() output, goes through this function so some
+ * applications may wish to modify it to avoid allocating a large
+ * strm->next_out buffer and copying into it. (See also read_buf()).
+ */
+local void flush_pending(strm)
+ z_streamp strm;
+{
+ unsigned len;
+ deflate_state *s = strm->state;
+
+ _tr_flush_bits(s);
+ len = s->pending;
+ if (len > strm->avail_out) len = strm->avail_out;
+ if (len == 0) return;
+
+ zmemcpy(strm->next_out, s->pending_out, len);
+ strm->next_out += len;
+ s->pending_out += len;
+ strm->total_out += len;
+ strm->avail_out -= len;
+ s->pending -= len;
+ if (s->pending == 0) {
+ s->pending_out = s->pending_buf;
+ }
+}
+
+/* ===========================================================================
+ * Update the header CRC with the bytes s->pending_buf[beg..s->pending - 1].
+ */
+#define HCRC_UPDATE(beg) \
+ do { \
+ if (s->gzhead->hcrc && s->pending > (beg)) \
+ strm->adler = crc32(strm->adler, s->pending_buf + (beg), \
+ s->pending - (beg)); \
+ } while (0)
+
+/* ========================================================================= */
+int ZEXPORT deflate (strm, flush)
+ z_streamp strm;
+ int flush;
+{
+ int old_flush; /* value of flush param for previous deflate call */
+ deflate_state *s;
+
+ if (deflateStateCheck(strm) || flush > Z_BLOCK || flush < 0) {
+ return Z_STREAM_ERROR;
+ }
+ s = strm->state;
+
+ if (strm->next_out == Z_NULL ||
+ (strm->avail_in != 0 && strm->next_in == Z_NULL) ||
+ (s->status == FINISH_STATE && flush != Z_FINISH)) {
+ ERR_RETURN(strm, Z_STREAM_ERROR);
+ }
+ if (strm->avail_out == 0) ERR_RETURN(strm, Z_BUF_ERROR);
+
+ old_flush = s->last_flush;
+ s->last_flush = flush;
+
+ /* Flush as much pending output as possible */
+ if (s->pending != 0) {
+ flush_pending(strm);
+ if (strm->avail_out == 0) {
+ /* Since avail_out is 0, deflate will be called again with
+ * more output space, but possibly with both pending and
+ * avail_in equal to zero. There won't be anything to do,
+ * but this is not an error situation so make sure we
+ * return OK instead of BUF_ERROR at next call of deflate:
+ */
+ s->last_flush = -1;
+ return Z_OK;
+ }
+
+ /* Make sure there is something to do and avoid duplicate consecutive
+ * flushes. For repeated and useless calls with Z_FINISH, we keep
+ * returning Z_STREAM_END instead of Z_BUF_ERROR.
+ */
+ } else if (strm->avail_in == 0 && RANK(flush) <= RANK(old_flush) &&
+ flush != Z_FINISH) {
+ ERR_RETURN(strm, Z_BUF_ERROR);
+ }
+
+ /* User must not provide more input after the first FINISH: */
+ if (s->status == FINISH_STATE && strm->avail_in != 0) {
+ ERR_RETURN(strm, Z_BUF_ERROR);
+ }
+
+ /* Write the header */
+ if (s->status == INIT_STATE) {
+ /* zlib header */
+ uInt header = (Z_DEFLATED + ((s->w_bits-8)<<4)) << 8;
+ uInt level_flags;
+
+ if (s->strategy >= Z_HUFFMAN_ONLY || s->level < 2)
+ level_flags = 0;
+ else if (s->level < 6)
+ level_flags = 1;
+ else if (s->level == 6)
+ level_flags = 2;
+ else
+ level_flags = 3;
+ header |= (level_flags << 6);
+ if (s->strstart != 0) header |= PRESET_DICT;
+ header += 31 - (header % 31);
+
+ putShortMSB(s, header);
+
+ /* Save the adler32 of the preset dictionary: */
+ if (s->strstart != 0) {
+ putShortMSB(s, (uInt)(strm->adler >> 16));
+ putShortMSB(s, (uInt)(strm->adler & 0xffff));
+ }
+ strm->adler = adler32(0L, Z_NULL, 0);
+ s->status = BUSY_STATE;
+
+ /* Compression must start with an empty pending buffer */
+ flush_pending(strm);
+ if (s->pending != 0) {
+ s->last_flush = -1;
+ return Z_OK;
+ }
+ }
+#ifdef GZIP
+ if (s->status == GZIP_STATE) {
+ /* gzip header */
+ strm->adler = crc32(0L, Z_NULL, 0);
+ put_byte(s, 31);
+ put_byte(s, 139);
+ put_byte(s, 8);
+ if (s->gzhead == Z_NULL) {
+ put_byte(s, 0);
+ put_byte(s, 0);
+ put_byte(s, 0);
+ put_byte(s, 0);
+ put_byte(s, 0);
+ put_byte(s, s->level == 9 ? 2 :
+ (s->strategy >= Z_HUFFMAN_ONLY || s->level < 2 ?
+ 4 : 0));
+ put_byte(s, OS_CODE);
+ s->status = BUSY_STATE;
+
+ /* Compression must start with an empty pending buffer */
+ flush_pending(strm);
+ if (s->pending != 0) {
+ s->last_flush = -1;
+ return Z_OK;
+ }
+ }
+ else {
+ put_byte(s, (s->gzhead->text ? 1 : 0) +
+ (s->gzhead->hcrc ? 2 : 0) +
+ (s->gzhead->extra == Z_NULL ? 0 : 4) +
+ (s->gzhead->name == Z_NULL ? 0 : 8) +
+ (s->gzhead->comment == Z_NULL ? 0 : 16)
+ );
+ put_byte(s, (Byte)(s->gzhead->time & 0xff));
+ put_byte(s, (Byte)((s->gzhead->time >> 8) & 0xff));
+ put_byte(s, (Byte)((s->gzhead->time >> 16) & 0xff));
+ put_byte(s, (Byte)((s->gzhead->time >> 24) & 0xff));
+ put_byte(s, s->level == 9 ? 2 :
+ (s->strategy >= Z_HUFFMAN_ONLY || s->level < 2 ?
+ 4 : 0));
+ put_byte(s, s->gzhead->os & 0xff);
+ if (s->gzhead->extra != Z_NULL) {
+ put_byte(s, s->gzhead->extra_len & 0xff);
+ put_byte(s, (s->gzhead->extra_len >> 8) & 0xff);
+ }
+ if (s->gzhead->hcrc)
+ strm->adler = crc32(strm->adler, s->pending_buf,
+ s->pending);
+ s->gzindex = 0;
+ s->status = EXTRA_STATE;
+ }
+ }
+ if (s->status == EXTRA_STATE) {
+ if (s->gzhead->extra != Z_NULL) {
+ ulg beg = s->pending; /* start of bytes to update crc */
+ uInt left = (s->gzhead->extra_len & 0xffff) - s->gzindex;
+ while (s->pending + left > s->pending_buf_size) {
+ uInt copy = s->pending_buf_size - s->pending;
+ zmemcpy(s->pending_buf + s->pending,
+ s->gzhead->extra + s->gzindex, copy);
+ s->pending = s->pending_buf_size;
+ HCRC_UPDATE(beg);
+ s->gzindex += copy;
+ flush_pending(strm);
+ if (s->pending != 0) {
+ s->last_flush = -1;
+ return Z_OK;
+ }
+ beg = 0;
+ left -= copy;
+ }
+ zmemcpy(s->pending_buf + s->pending,
+ s->gzhead->extra + s->gzindex, left);
+ s->pending += left;
+ HCRC_UPDATE(beg);
+ s->gzindex = 0;
+ }
+ s->status = NAME_STATE;
+ }
+ if (s->status == NAME_STATE) {
+ if (s->gzhead->name != Z_NULL) {
+ ulg beg = s->pending; /* start of bytes to update crc */
+ int val;
+ do {
+ if (s->pending == s->pending_buf_size) {
+ HCRC_UPDATE(beg);
+ flush_pending(strm);
+ if (s->pending != 0) {
+ s->last_flush = -1;
+ return Z_OK;
+ }
+ beg = 0;
+ }
+ val = s->gzhead->name[s->gzindex++];
+ put_byte(s, val);
+ } while (val != 0);
+ HCRC_UPDATE(beg);
+ s->gzindex = 0;
+ }
+ s->status = COMMENT_STATE;
+ }
+ if (s->status == COMMENT_STATE) {
+ if (s->gzhead->comment != Z_NULL) {
+ ulg beg = s->pending; /* start of bytes to update crc */
+ int val;
+ do {
+ if (s->pending == s->pending_buf_size) {
+ HCRC_UPDATE(beg);
+ flush_pending(strm);
+ if (s->pending != 0) {
+ s->last_flush = -1;
+ return Z_OK;
+ }
+ beg = 0;
+ }
+ val = s->gzhead->comment[s->gzindex++];
+ put_byte(s, val);
+ } while (val != 0);
+ HCRC_UPDATE(beg);
+ }
+ s->status = HCRC_STATE;
+ }
+ if (s->status == HCRC_STATE) {
+ if (s->gzhead->hcrc) {
+ if (s->pending + 2 > s->pending_buf_size) {
+ flush_pending(strm);
+ if (s->pending != 0) {
+ s->last_flush = -1;
+ return Z_OK;
+ }
+ }
+ put_byte(s, (Byte)(strm->adler & 0xff));
+ put_byte(s, (Byte)((strm->adler >> 8) & 0xff));
+ strm->adler = crc32(0L, Z_NULL, 0);
+ }
+ s->status = BUSY_STATE;
+
+ /* Compression must start with an empty pending buffer */
+ flush_pending(strm);
+ if (s->pending != 0) {
+ s->last_flush = -1;
+ return Z_OK;
+ }
+ }
+#endif
+
+ /* Start a new block or continue the current one.
+ */
+ if (strm->avail_in != 0 || s->lookahead != 0 ||
+ (flush != Z_NO_FLUSH && s->status != FINISH_STATE)) {
+ block_state bstate;
+
+ bstate = s->level == 0 ? deflate_stored(s, flush) :
+ s->strategy == Z_HUFFMAN_ONLY ? deflate_huff(s, flush) :
+ s->strategy == Z_RLE ? deflate_rle(s, flush) :
+ (*(configuration_table[s->level].func))(s, flush);
+
+ if (bstate == finish_started || bstate == finish_done) {
+ s->status = FINISH_STATE;
+ }
+ if (bstate == need_more || bstate == finish_started) {
+ if (strm->avail_out == 0) {
+ s->last_flush = -1; /* avoid BUF_ERROR next call, see above */
+ }
+ return Z_OK;
+ /* If flush != Z_NO_FLUSH && avail_out == 0, the next call
+ * of deflate should use the same flush parameter to make sure
+ * that the flush is complete. So we don't have to output an
+ * empty block here, this will be done at next call. This also
+ * ensures that for a very small output buffer, we emit at most
+ * one empty block.
+ */
+ }
+ if (bstate == block_done) {
+ if (flush == Z_PARTIAL_FLUSH) {
+ _tr_align(s);
+ } else if (flush != Z_BLOCK) { /* FULL_FLUSH or SYNC_FLUSH */
+ _tr_stored_block(s, (char*)0, 0L, 0);
+ /* For a full flush, this empty block will be recognized
+ * as a special marker by inflate_sync().
+ */
+ if (flush == Z_FULL_FLUSH) {
+ CLEAR_HASH(s); /* forget history */
+ if (s->lookahead == 0) {
+ s->strstart = 0;
+ s->block_start = 0L;
+ s->insert = 0;
+ }
+ }
+ }
+ flush_pending(strm);
+ if (strm->avail_out == 0) {
+ s->last_flush = -1; /* avoid BUF_ERROR at next call, see above */
+ return Z_OK;
+ }
+ }
+ }
+
+ if (flush != Z_FINISH) return Z_OK;
+ if (s->wrap <= 0) return Z_STREAM_END;
+
+ /* Write the trailer */
+#ifdef GZIP
+ if (s->wrap == 2) {
+ put_byte(s, (Byte)(strm->adler & 0xff));
+ put_byte(s, (Byte)((strm->adler >> 8) & 0xff));
+ put_byte(s, (Byte)((strm->adler >> 16) & 0xff));
+ put_byte(s, (Byte)((strm->adler >> 24) & 0xff));
+ put_byte(s, (Byte)(strm->total_in & 0xff));
+ put_byte(s, (Byte)((strm->total_in >> 8) & 0xff));
+ put_byte(s, (Byte)((strm->total_in >> 16) & 0xff));
+ put_byte(s, (Byte)((strm->total_in >> 24) & 0xff));
+ }
+ else
+#endif
+ {
+ putShortMSB(s, (uInt)(strm->adler >> 16));
+ putShortMSB(s, (uInt)(strm->adler & 0xffff));
+ }
+ flush_pending(strm);
+ /* If avail_out is zero, the application will call deflate again
+ * to flush the rest.
+ */
+ if (s->wrap > 0) s->wrap = -s->wrap; /* write the trailer only once! */
+ return s->pending != 0 ? Z_OK : Z_STREAM_END;
+}
+
+/* ========================================================================= */
+int ZEXPORT deflateEnd (strm)
+ z_streamp strm;
+{
+ int status;
+
+ if (deflateStateCheck(strm)) return Z_STREAM_ERROR;
+
+ status = strm->state->status;
+
+ /* Deallocate in reverse order of allocations: */
+ TRY_FREE(strm, strm->state->pending_buf);
+ TRY_FREE(strm, strm->state->head);
+ TRY_FREE(strm, strm->state->prev);
+ TRY_FREE(strm, strm->state->window);
+
+ ZFREE(strm, strm->state);
+ strm->state = Z_NULL;
+
+ return status == BUSY_STATE ? Z_DATA_ERROR : Z_OK;
+}
+
+/* =========================================================================
+ * Copy the source state to the destination state.
+ * To simplify the source, this is not supported for 16-bit MSDOS (which
+ * doesn't have enough memory anyway to duplicate compression states).
+ */
+int ZEXPORT deflateCopy (dest, source)
+ z_streamp dest;
+ z_streamp source;
+{
+#ifdef MAXSEG_64K
+ return Z_STREAM_ERROR;
+#else
+ deflate_state *ds;
+ deflate_state *ss;
+ ushf *overlay;
+
+
+ if (deflateStateCheck(source) || dest == Z_NULL) {
+ return Z_STREAM_ERROR;
+ }
+
+ ss = source->state;
+
+ zmemcpy((voidpf)dest, (voidpf)source, sizeof(z_stream));
+
+ ds = (deflate_state *) ZALLOC(dest, 1, sizeof(deflate_state));
+ if (ds == Z_NULL) return Z_MEM_ERROR;
+ dest->state = (struct internal_state FAR *) ds;
+ zmemcpy((voidpf)ds, (voidpf)ss, sizeof(deflate_state));
+ ds->strm = dest;
+
+ ds->window = (Bytef *) ZALLOC(dest, ds->w_size, 2*sizeof(Byte));
+ ds->prev = (Posf *) ZALLOC(dest, ds->w_size, sizeof(Pos));
+ ds->head = (Posf *) ZALLOC(dest, ds->hash_size, sizeof(Pos));
+ overlay = (ushf *) ZALLOC(dest, ds->lit_bufsize, sizeof(ush)+2);
+ ds->pending_buf = (uchf *) overlay;
+
+ if (ds->window == Z_NULL || ds->prev == Z_NULL || ds->head == Z_NULL ||
+ ds->pending_buf == Z_NULL) {
+ deflateEnd (dest);
+ return Z_MEM_ERROR;
+ }
+ /* following zmemcpy do not work for 16-bit MSDOS */
+ zmemcpy(ds->window, ss->window, ds->w_size * 2 * sizeof(Byte));
+ zmemcpy((voidpf)ds->prev, (voidpf)ss->prev, ds->w_size * sizeof(Pos));
+ zmemcpy((voidpf)ds->head, (voidpf)ss->head, ds->hash_size * sizeof(Pos));
+ zmemcpy(ds->pending_buf, ss->pending_buf, (uInt)ds->pending_buf_size);
+
+ ds->pending_out = ds->pending_buf + (ss->pending_out - ss->pending_buf);
+ ds->d_buf = overlay + ds->lit_bufsize/sizeof(ush);
+ ds->l_buf = ds->pending_buf + (1+sizeof(ush))*ds->lit_bufsize;
+
+ ds->l_desc.dyn_tree = ds->dyn_ltree;
+ ds->d_desc.dyn_tree = ds->dyn_dtree;
+ ds->bl_desc.dyn_tree = ds->bl_tree;
+
+ return Z_OK;
+#endif /* MAXSEG_64K */
+}
+
+/* ===========================================================================
+ * Read a new buffer from the current input stream, update the adler32
+ * and total number of bytes read. All deflate() input goes through
+ * this function so some applications may wish to modify it to avoid
+ * allocating a large strm->next_in buffer and copying from it.
+ * (See also flush_pending()).
+ */
+local unsigned read_buf(strm, buf, size)
+ z_streamp strm;
+ Bytef *buf;
+ unsigned size;
+{
+ unsigned len = strm->avail_in;
+
+ if (len > size) len = size;
+ if (len == 0) return 0;
+
+ strm->avail_in -= len;
+
+ zmemcpy(buf, strm->next_in, len);
+ if (strm->state->wrap == 1) {
+ strm->adler = adler32(strm->adler, buf, len);
+ }
+#ifdef GZIP
+ else if (strm->state->wrap == 2) {
+ strm->adler = crc32(strm->adler, buf, len);
+ }
+#endif
+ strm->next_in += len;
+ strm->total_in += len;
+
+ return len;
+}
+
+/* ===========================================================================
+ * Initialize the "longest match" routines for a new zlib stream
+ */
+local void lm_init (s)
+ deflate_state *s;
+{
+ s->window_size = (ulg)2L*s->w_size;
+
+ CLEAR_HASH(s);
+
+ /* Set the default configuration parameters:
+ */
+ s->max_lazy_match = configuration_table[s->level].max_lazy;
+ s->good_match = configuration_table[s->level].good_length;
+ s->nice_match = configuration_table[s->level].nice_length;
+ s->max_chain_length = configuration_table[s->level].max_chain;
+
+ s->strstart = 0;
+ s->block_start = 0L;
+ s->lookahead = 0;
+ s->insert = 0;
+ s->match_length = s->prev_length = MIN_MATCH-1;
+ s->match_available = 0;
+ s->ins_h = 0;
+#ifndef FASTEST
+#ifdef ASMV
+ match_init(); /* initialize the asm code */
+#endif
+#endif
+}
+
+#ifndef FASTEST
+/* ===========================================================================
+ * Set match_start to the longest match starting at the given string and
+ * return its length. Matches shorter or equal to prev_length are discarded,
+ * in which case the result is equal to prev_length and match_start is
+ * garbage.
+ * IN assertions: cur_match is the head of the hash chain for the current
+ * string (strstart) and its distance is <= MAX_DIST, and prev_length >= 1
+ * OUT assertion: the match length is not greater than s->lookahead.
+ */
+#ifndef ASMV
+/* For 80x86 and 680x0, an optimized version will be provided in match.asm or
+ * match.S. The code will be functionally equivalent.
+ */
+local uInt longest_match(s, cur_match)
+ deflate_state *s;
+ IPos cur_match; /* current match */
+{
+ unsigned chain_length = s->max_chain_length;/* max hash chain length */
+ register Bytef *scan = s->window + s->strstart; /* current string */
+ register Bytef *match; /* matched string */
+ register int len; /* length of current match */
+ int best_len = (int)s->prev_length; /* best match length so far */
+ int nice_match = s->nice_match; /* stop if match long enough */
+ IPos limit = s->strstart > (IPos)MAX_DIST(s) ?
+ s->strstart - (IPos)MAX_DIST(s) : NIL;
+ /* Stop when cur_match becomes <= limit. To simplify the code,
+ * we prevent matches with the string of window index 0.
+ */
+ Posf *prev = s->prev;
+ uInt wmask = s->w_mask;
+
+#ifdef UNALIGNED_OK
+ /* Compare two bytes at a time. Note: this is not always beneficial.
+ * Try with and without -DUNALIGNED_OK to check.
+ */
+ register Bytef *strend = s->window + s->strstart + MAX_MATCH - 1;
+ register ush scan_start = *(ushf*)scan;
+ register ush scan_end = *(ushf*)(scan+best_len-1);
+#else
+ register Bytef *strend = s->window + s->strstart + MAX_MATCH;
+ register Byte scan_end1 = scan[best_len-1];
+ register Byte scan_end = scan[best_len];
+#endif
+
+ /* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16.
+ * It is easy to get rid of this optimization if necessary.
+ */
+ Assert(s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever");
+
+ /* Do not waste too much time if we already have a good match: */
+ if (s->prev_length >= s->good_match) {
+ chain_length >>= 2;
+ }
+ /* Do not look for matches beyond the end of the input. This is necessary
+ * to make deflate deterministic.
+ */
+ if ((uInt)nice_match > s->lookahead) nice_match = (int)s->lookahead;
+
+ Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, "need lookahead");
+
+ do {
+ Assert(cur_match < s->strstart, "no future");
+ match = s->window + cur_match;
+
+ /* Skip to next match if the match length cannot increase
+ * or if the match length is less than 2. Note that the checks below
+ * for insufficient lookahead only occur occasionally for performance
+ * reasons. Therefore uninitialized memory will be accessed, and
+ * conditional jumps will be made that depend on those values.
+ * However the length of the match is limited to the lookahead, so
+ * the output of deflate is not affected by the uninitialized values.
+ */
+#if (defined(UNALIGNED_OK) && MAX_MATCH == 258)
+ /* This code assumes sizeof(unsigned short) == 2. Do not use
+ * UNALIGNED_OK if your compiler uses a different size.
+ */
+ if (*(ushf*)(match+best_len-1) != scan_end ||
+ *(ushf*)match != scan_start) continue;
+
+ /* It is not necessary to compare scan[2] and match[2] since they are
+ * always equal when the other bytes match, given that the hash keys
+ * are equal and that HASH_BITS >= 8. Compare 2 bytes at a time at
+ * strstart+3, +5, ... up to strstart+257. We check for insufficient
+ * lookahead only every 4th comparison; the 128th check will be made
+ * at strstart+257. If MAX_MATCH-2 is not a multiple of 8, it is
+ * necessary to put more guard bytes at the end of the window, or
+ * to check more often for insufficient lookahead.
+ */
+ Assert(scan[2] == match[2], "scan[2]?");
+ scan++, match++;
+ do {
+ } while (*(ushf*)(scan+=2) == *(ushf*)(match+=2) &&
+ *(ushf*)(scan+=2) == *(ushf*)(match+=2) &&
+ *(ushf*)(scan+=2) == *(ushf*)(match+=2) &&
+ *(ushf*)(scan+=2) == *(ushf*)(match+=2) &&
+ scan < strend);
+ /* The funny "do {}" generates better code on most compilers */
+
+ /* Here, scan <= window+strstart+257 */
+ Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan");
+ if (*scan == *match) scan++;
+
+ len = (MAX_MATCH - 1) - (int)(strend-scan);
+ scan = strend - (MAX_MATCH-1);
+
+#else /* UNALIGNED_OK */
+
+ if (match[best_len] != scan_end ||
+ match[best_len-1] != scan_end1 ||
+ *match != *scan ||
+ *++match != scan[1]) continue;
+
+ /* The check at best_len-1 can be removed because it will be made
+ * again later. (This heuristic is not always a win.)
+ * It is not necessary to compare scan[2] and match[2] since they
+ * are always equal when the other bytes match, given that
+ * the hash keys are equal and that HASH_BITS >= 8.
+ */
+ scan += 2, match++;
+ Assert(*scan == *match, "match[2]?");
+
+ /* We check for insufficient lookahead only every 8th comparison;
+ * the 256th check will be made at strstart+258.
+ */
+ do {
+ } while (*++scan == *++match && *++scan == *++match &&
+ *++scan == *++match && *++scan == *++match &&
+ *++scan == *++match && *++scan == *++match &&
+ *++scan == *++match && *++scan == *++match &&
+ scan < strend);
+
+ Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan");
+
+ len = MAX_MATCH - (int)(strend - scan);
+ scan = strend - MAX_MATCH;
+
+#endif /* UNALIGNED_OK */
+
+ if (len > best_len) {
+ s->match_start = cur_match;
+ best_len = len;
+ if (len >= nice_match) break;
+#ifdef UNALIGNED_OK
+ scan_end = *(ushf*)(scan+best_len-1);
+#else
+ scan_end1 = scan[best_len-1];
+ scan_end = scan[best_len];
+#endif
+ }
+ } while ((cur_match = prev[cur_match & wmask]) > limit
+ && --chain_length != 0);
+
+ if ((uInt)best_len <= s->lookahead) return (uInt)best_len;
+ return s->lookahead;
+}
+#endif /* ASMV */
+
+#else /* FASTEST */
+
+/* ---------------------------------------------------------------------------
+ * Optimized version for FASTEST only
+ */
+local uInt longest_match(s, cur_match)
+ deflate_state *s;
+ IPos cur_match; /* current match */
+{
+ register Bytef *scan = s->window + s->strstart; /* current string */
+ register Bytef *match; /* matched string */
+ register int len; /* length of current match */
+ register Bytef *strend = s->window + s->strstart + MAX_MATCH;
+
+ /* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16.
+ * It is easy to get rid of this optimization if necessary.
+ */
+ Assert(s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever");
+
+ Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, "need lookahead");
+
+ Assert(cur_match < s->strstart, "no future");
+
+ match = s->window + cur_match;
+
+ /* Return failure if the match length is less than 2:
+ */
+ if (match[0] != scan[0] || match[1] != scan[1]) return MIN_MATCH-1;
+
+ /* The check at best_len-1 can be removed because it will be made
+ * again later. (This heuristic is not always a win.)
+ * It is not necessary to compare scan[2] and match[2] since they
+ * are always equal when the other bytes match, given that
+ * the hash keys are equal and that HASH_BITS >= 8.
+ */
+ scan += 2, match += 2;
+ Assert(*scan == *match, "match[2]?");
+
+ /* We check for insufficient lookahead only every 8th comparison;
+ * the 256th check will be made at strstart+258.
+ */
+ do {
+ } while (*++scan == *++match && *++scan == *++match &&
+ *++scan == *++match && *++scan == *++match &&
+ *++scan == *++match && *++scan == *++match &&
+ *++scan == *++match && *++scan == *++match &&
+ scan < strend);
+
+ Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan");
+
+ len = MAX_MATCH - (int)(strend - scan);
+
+ if (len < MIN_MATCH) return MIN_MATCH - 1;
+
+ s->match_start = cur_match;
+ return (uInt)len <= s->lookahead ? (uInt)len : s->lookahead;
+}
+
+#endif /* FASTEST */
+
+#ifdef ZLIB_DEBUG
+
+#define EQUAL 0
+/* result of memcmp for equal strings */
+
+/* ===========================================================================
+ * Check that the match at match_start is indeed a match.
+ */
+local void check_match(s, start, match, length)
+ deflate_state *s;
+ IPos start, match;
+ int length;
+{
+ /* check that the match is indeed a match */
+ if (zmemcmp(s->window + match,
+ s->window + start, length) != EQUAL) {
+ fprintf(stderr, " start %u, match %u, length %d\n",
+ start, match, length);
+ do {
+ fprintf(stderr, "%c%c", s->window[match++], s->window[start++]);
+ } while (--length != 0);
+ z_error("invalid match");
+ }
+ if (z_verbose > 1) {
+ fprintf(stderr,"\\[%d,%d]", start-match, length);
+ do { putc(s->window[start++], stderr); } while (--length != 0);
+ }
+}
+#else
+# define check_match(s, start, match, length)
+#endif /* ZLIB_DEBUG */
+
+/* ===========================================================================
+ * Fill the window when the lookahead becomes insufficient.
+ * Updates strstart and lookahead.
+ *
+ * IN assertion: lookahead < MIN_LOOKAHEAD
+ * OUT assertions: strstart <= window_size-MIN_LOOKAHEAD
+ * At least one byte has been read, or avail_in == 0; reads are
+ * performed for at least two bytes (required for the zip translate_eol
+ * option -- not supported here).
+ */
+local void fill_window(s)
+ deflate_state *s;
+{
+ unsigned n;
+ unsigned more; /* Amount of free space at the end of the window. */
+ uInt wsize = s->w_size;
+
+ Assert(s->lookahead < MIN_LOOKAHEAD, "already enough lookahead");
+
+ do {
+ more = (unsigned)(s->window_size -(ulg)s->lookahead -(ulg)s->strstart);
+
+ /* Deal with !@#$% 64K limit: */
+ if (sizeof(int) <= 2) {
+ if (more == 0 && s->strstart == 0 && s->lookahead == 0) {
+ more = wsize;
+
+ } else if (more == (unsigned)(-1)) {
+ /* Very unlikely, but possible on 16 bit machine if
+ * strstart == 0 && lookahead == 1 (input done a byte at time)
+ */
+ more--;
+ }
+ }
+
+ /* If the window is almost full and there is insufficient lookahead,
+ * move the upper half to the lower one to make room in the upper half.
+ */
+ if (s->strstart >= wsize+MAX_DIST(s)) {
+
+ zmemcpy(s->window, s->window+wsize, (unsigned)wsize - more);
+ s->match_start -= wsize;
+ s->strstart -= wsize; /* we now have strstart >= MAX_DIST */
+ s->block_start -= (long) wsize;
+ slide_hash(s);
+ more += wsize;
+ }
+ if (s->strm->avail_in == 0) break;
+
+ /* If there was no sliding:
+ * strstart <= WSIZE+MAX_DIST-1 && lookahead <= MIN_LOOKAHEAD - 1 &&
+ * more == window_size - lookahead - strstart
+ * => more >= window_size - (MIN_LOOKAHEAD-1 + WSIZE + MAX_DIST-1)
+ * => more >= window_size - 2*WSIZE + 2
+ * In the BIG_MEM or MMAP case (not yet supported),
+ * window_size == input_size + MIN_LOOKAHEAD &&
+ * strstart + s->lookahead <= input_size => more >= MIN_LOOKAHEAD.
+ * Otherwise, window_size == 2*WSIZE so more >= 2.
+ * If there was sliding, more >= WSIZE. So in all cases, more >= 2.
+ */
+ Assert(more >= 2, "more < 2");
+
+ n = read_buf(s->strm, s->window + s->strstart + s->lookahead, more);
+ s->lookahead += n;
+
+ /* Initialize the hash value now that we have some input: */
+ if (s->lookahead + s->insert >= MIN_MATCH) {
+ uInt str = s->strstart - s->insert;
+ s->ins_h = s->window[str];
+ UPDATE_HASH(s, s->ins_h, s->window[str + 1]);
+#if MIN_MATCH != 3
+ Call UPDATE_HASH() MIN_MATCH-3 more times
+#endif
+ while (s->insert) {
+ UPDATE_HASH(s, s->ins_h, s->window[str + MIN_MATCH-1]);
+#ifndef FASTEST
+ s->prev[str & s->w_mask] = s->head[s->ins_h];
+#endif
+ s->head[s->ins_h] = (Pos)str;
+ str++;
+ s->insert--;
+ if (s->lookahead + s->insert < MIN_MATCH)
+ break;
+ }
+ }
+ /* If the whole input has less than MIN_MATCH bytes, ins_h is garbage,
+ * but this is not important since only literal bytes will be emitted.
+ */
+
+ } while (s->lookahead < MIN_LOOKAHEAD && s->strm->avail_in != 0);
+
+ /* If the WIN_INIT bytes after the end of the current data have never been
+ * written, then zero those bytes in order to avoid memory check reports of
+ * the use of uninitialized (or uninitialised as Julian writes) bytes by
+ * the longest match routines. Update the high water mark for the next
+ * time through here. WIN_INIT is set to MAX_MATCH since the longest match
+ * routines allow scanning to strstart + MAX_MATCH, ignoring lookahead.
+ */
+ if (s->high_water < s->window_size) {
+ ulg curr = s->strstart + (ulg)(s->lookahead);
+ ulg init;
+
+ if (s->high_water < curr) {
+ /* Previous high water mark below current data -- zero WIN_INIT
+ * bytes or up to end of window, whichever is less.
+ */
+ init = s->window_size - curr;
+ if (init > WIN_INIT)
+ init = WIN_INIT;
+ zmemzero(s->window + curr, (unsigned)init);
+ s->high_water = curr + init;
+ }
+ else if (s->high_water < (ulg)curr + WIN_INIT) {
+ /* High water mark at or above current data, but below current data
+ * plus WIN_INIT -- zero out to current data plus WIN_INIT, or up
+ * to end of window, whichever is less.
+ */
+ init = (ulg)curr + WIN_INIT - s->high_water;
+ if (init > s->window_size - s->high_water)
+ init = s->window_size - s->high_water;
+ zmemzero(s->window + s->high_water, (unsigned)init);
+ s->high_water += init;
+ }
+ }
+
+ Assert((ulg)s->strstart <= s->window_size - MIN_LOOKAHEAD,
+ "not enough room for search");
+}
+
+/* ===========================================================================
+ * Flush the current block, with given end-of-file flag.
+ * IN assertion: strstart is set to the end of the current match.
+ */
+#define FLUSH_BLOCK_ONLY(s, last) { \
+ _tr_flush_block(s, (s->block_start >= 0L ? \
+ (charf *)&s->window[(unsigned)s->block_start] : \
+ (charf *)Z_NULL), \
+ (ulg)((long)s->strstart - s->block_start), \
+ (last)); \
+ s->block_start = s->strstart; \
+ flush_pending(s->strm); \
+ Tracev((stderr,"[FLUSH]")); \
+}
+
+/* Same but force premature exit if necessary. */
+#define FLUSH_BLOCK(s, last) { \
+ FLUSH_BLOCK_ONLY(s, last); \
+ if (s->strm->avail_out == 0) return (last) ? finish_started : need_more; \
+}
+
+/* Maximum stored block length in deflate format (not including header). */
+#define MAX_STORED 65535
+
+/* Minimum of a and b. */
+#define MIN(a, b) ((a) > (b) ? (b) : (a))
+
+/* ===========================================================================
+ * Copy without compression as much as possible from the input stream, return
+ * the current block state.
+ *
+ * In case deflateParams() is used to later switch to a non-zero compression
+ * level, s->matches (otherwise unused when storing) keeps track of the number
+ * of hash table slides to perform. If s->matches is 1, then one hash table
+ * slide will be done when switching. If s->matches is 2, the maximum value
+ * allowed here, then the hash table will be cleared, since two or more slides
+ * is the same as a clear.
+ *
+ * deflate_stored() is written to minimize the number of times an input byte is
+ * copied. It is most efficient with large input and output buffers, which
+ * maximizes the opportunites to have a single copy from next_in to next_out.
+ */
+local block_state deflate_stored(s, flush)
+ deflate_state *s;
+ int flush;
+{
+ /* Smallest worthy block size when not flushing or finishing. By default
+ * this is 32K. This can be as small as 507 bytes for memLevel == 1. For
+ * large input and output buffers, the stored block size will be larger.
+ */
+ unsigned min_block = MIN(s->pending_buf_size - 5, s->w_size);
+
+ /* Copy as many min_block or larger stored blocks directly to next_out as
+ * possible. If flushing, copy the remaining available input to next_out as
+ * stored blocks, if there is enough space.
+ */
+ unsigned len, left, have, last = 0;
+ unsigned used = s->strm->avail_in;
+ do {
+ /* Set len to the maximum size block that we can copy directly with the
+ * available input data and output space. Set left to how much of that
+ * would be copied from what's left in the window.
+ */
+ len = MAX_STORED; /* maximum deflate stored block length */
+ have = (s->bi_valid + 42) >> 3; /* number of header bytes */
+ if (s->strm->avail_out < have) /* need room for header */
+ break;
+ /* maximum stored block length that will fit in avail_out: */
+ have = s->strm->avail_out - have;
+ left = s->strstart - s->block_start; /* bytes left in window */
+ if (len > (ulg)left + s->strm->avail_in)
+ len = left + s->strm->avail_in; /* limit len to the input */
+ if (len > have)
+ len = have; /* limit len to the output */
+
+ /* If the stored block would be less than min_block in length, or if
+ * unable to copy all of the available input when flushing, then try
+ * copying to the window and the pending buffer instead. Also don't
+ * write an empty block when flushing -- deflate() does that.
+ */
+ if (len < min_block && ((len == 0 && flush != Z_FINISH) ||
+ flush == Z_NO_FLUSH ||
+ len != left + s->strm->avail_in))
+ break;
+
+ /* Make a dummy stored block in pending to get the header bytes,
+ * including any pending bits. This also updates the debugging counts.
+ */
+ last = flush == Z_FINISH && len == left + s->strm->avail_in ? 1 : 0;
+ _tr_stored_block(s, (char *)0, 0L, last);
+
+ /* Replace the lengths in the dummy stored block with len. */
+ s->pending_buf[s->pending - 4] = len;
+ s->pending_buf[s->pending - 3] = len >> 8;
+ s->pending_buf[s->pending - 2] = ~len;
+ s->pending_buf[s->pending - 1] = ~len >> 8;
+
+ /* Write the stored block header bytes. */
+ flush_pending(s->strm);
+
+#ifdef ZLIB_DEBUG
+ /* Update debugging counts for the data about to be copied. */
+ s->compressed_len += len << 3;
+ s->bits_sent += len << 3;
+#endif
+
+ /* Copy uncompressed bytes from the window to next_out. */
+ if (left) {
+ if (left > len)
+ left = len;
+ zmemcpy(s->strm->next_out, s->window + s->block_start, left);
+ s->strm->next_out += left;
+ s->strm->avail_out -= left;
+ s->strm->total_out += left;
+ s->block_start += left;
+ len -= left;
+ }
+
+ /* Copy uncompressed bytes directly from next_in to next_out, updating
+ * the check value.
+ */
+ if (len) {
+ read_buf(s->strm, s->strm->next_out, len);
+ s->strm->next_out += len;
+ s->strm->avail_out -= len;
+ s->strm->total_out += len;
+ }
+ } while (last == 0);
+
+ /* Update the sliding window with the last s->w_size bytes of the copied
+ * data, or append all of the copied data to the existing window if less
+ * than s->w_size bytes were copied. Also update the number of bytes to
+ * insert in the hash tables, in the event that deflateParams() switches to
+ * a non-zero compression level.
+ */
+ used -= s->strm->avail_in; /* number of input bytes directly copied */
+ if (used) {
+ /* If any input was used, then no unused input remains in the window,
+ * therefore s->block_start == s->strstart.
+ */
+ if (used >= s->w_size) { /* supplant the previous history */
+ s->matches = 2; /* clear hash */
+ zmemcpy(s->window, s->strm->next_in - s->w_size, s->w_size);
+ s->strstart = s->w_size;
+ }
+ else {
+ if (s->window_size - s->strstart <= used) {
+ /* Slide the window down. */
+ s->strstart -= s->w_size;
+ zmemcpy(s->window, s->window + s->w_size, s->strstart);
+ if (s->matches < 2)
+ s->matches++; /* add a pending slide_hash() */
+ }
+ zmemcpy(s->window + s->strstart, s->strm->next_in - used, used);
+ s->strstart += used;
+ }
+ s->block_start = s->strstart;
+ s->insert += MIN(used, s->w_size - s->insert);
+ }
+ if (s->high_water < s->strstart)
+ s->high_water = s->strstart;
+
+ /* If the last block was written to next_out, then done. */
+ if (last)
+ return finish_done;
+
+ /* If flushing and all input has been consumed, then done. */
+ if (flush != Z_NO_FLUSH && flush != Z_FINISH &&
+ s->strm->avail_in == 0 && (long)s->strstart == s->block_start)
+ return block_done;
+
+ /* Fill the window with any remaining input. */
+ have = s->window_size - s->strstart - 1;
+ if (s->strm->avail_in > have && s->block_start >= (long)s->w_size) {
+ /* Slide the window down. */
+ s->block_start -= s->w_size;
+ s->strstart -= s->w_size;
+ zmemcpy(s->window, s->window + s->w_size, s->strstart);
+ if (s->matches < 2)
+ s->matches++; /* add a pending slide_hash() */
+ have += s->w_size; /* more space now */
+ }
+ if (have > s->strm->avail_in)
+ have = s->strm->avail_in;
+ if (have) {
+ read_buf(s->strm, s->window + s->strstart, have);
+ s->strstart += have;
+ }
+ if (s->high_water < s->strstart)
+ s->high_water = s->strstart;
+
+ /* There was not enough avail_out to write a complete worthy or flushed
+ * stored block to next_out. Write a stored block to pending instead, if we
+ * have enough input for a worthy block, or if flushing and there is enough
+ * room for the remaining input as a stored block in the pending buffer.
+ */
+ have = (s->bi_valid + 42) >> 3; /* number of header bytes */
+ /* maximum stored block length that will fit in pending: */
+ have = MIN(s->pending_buf_size - have, MAX_STORED);
+ min_block = MIN(have, s->w_size);
+ left = s->strstart - s->block_start;
+ if (left >= min_block ||
+ ((left || flush == Z_FINISH) && flush != Z_NO_FLUSH &&
+ s->strm->avail_in == 0 && left <= have)) {
+ len = MIN(left, have);
+ last = flush == Z_FINISH && s->strm->avail_in == 0 &&
+ len == left ? 1 : 0;
+ _tr_stored_block(s, (charf *)s->window + s->block_start, len, last);
+ s->block_start += len;
+ flush_pending(s->strm);
+ }
+
+ /* We've done all we can with the available input and output. */
+ return last ? finish_started : need_more;
+}
+
+/* ===========================================================================
+ * Compress as much as possible from the input stream, return the current
+ * block state.
+ * This function does not perform lazy evaluation of matches and inserts
+ * new strings in the dictionary only for unmatched strings or for short
+ * matches. It is used only for the fast compression options.
+ */
+local block_state deflate_fast(s, flush)
+ deflate_state *s;
+ int flush;
+{
+ IPos hash_head; /* head of the hash chain */
+ int bflush; /* set if current block must be flushed */
+
+ for (;;) {
+ /* Make sure that we always have enough lookahead, except
+ * at the end of the input file. We need MAX_MATCH bytes
+ * for the next match, plus MIN_MATCH bytes to insert the
+ * string following the next match.
+ */
+ if (s->lookahead < MIN_LOOKAHEAD) {
+ fill_window(s);
+ if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) {
+ return need_more;
+ }
+ if (s->lookahead == 0) break; /* flush the current block */
+ }
+
+ /* Insert the string window[strstart .. strstart+2] in the
+ * dictionary, and set hash_head to the head of the hash chain:
+ */
+ hash_head = NIL;
+ if (s->lookahead >= MIN_MATCH) {
+ INSERT_STRING(s, s->strstart, hash_head);
+ }
+
+ /* Find the longest match, discarding those <= prev_length.
+ * At this point we have always match_length < MIN_MATCH
+ */
+ if (hash_head != NIL && s->strstart - hash_head <= MAX_DIST(s)) {
+ /* To simplify the code, we prevent matches with the string
+ * of window index 0 (in particular we have to avoid a match
+ * of the string with itself at the start of the input file).
+ */
+ s->match_length = longest_match (s, hash_head);
+ /* longest_match() sets match_start */
+ }
+ if (s->match_length >= MIN_MATCH) {
+ check_match(s, s->strstart, s->match_start, s->match_length);
+
+ _tr_tally_dist(s, s->strstart - s->match_start,
+ s->match_length - MIN_MATCH, bflush);
+
+ s->lookahead -= s->match_length;
+
+ /* Insert new strings in the hash table only if the match length
+ * is not too large. This saves time but degrades compression.
+ */
+#ifndef FASTEST
+ if (s->match_length <= s->max_insert_length &&
+ s->lookahead >= MIN_MATCH) {
+ s->match_length--; /* string at strstart already in table */
+ do {
+ s->strstart++;
+ INSERT_STRING(s, s->strstart, hash_head);
+ /* strstart never exceeds WSIZE-MAX_MATCH, so there are
+ * always MIN_MATCH bytes ahead.
+ */
+ } while (--s->match_length != 0);
+ s->strstart++;
+ } else
+#endif
+ {
+ s->strstart += s->match_length;
+ s->match_length = 0;
+ s->ins_h = s->window[s->strstart];
+ UPDATE_HASH(s, s->ins_h, s->window[s->strstart+1]);
+#if MIN_MATCH != 3
+ Call UPDATE_HASH() MIN_MATCH-3 more times
+#endif
+ /* If lookahead < MIN_MATCH, ins_h is garbage, but it does not
+ * matter since it will be recomputed at next deflate call.
+ */
+ }
+ } else {
+ /* No match, output a literal byte */
+ Tracevv((stderr,"%c", s->window[s->strstart]));
+ _tr_tally_lit (s, s->window[s->strstart], bflush);
+ s->lookahead--;
+ s->strstart++;
+ }
+ if (bflush) FLUSH_BLOCK(s, 0);
+ }
+ s->insert = s->strstart < MIN_MATCH-1 ? s->strstart : MIN_MATCH-1;
+ if (flush == Z_FINISH) {
+ FLUSH_BLOCK(s, 1);
+ return finish_done;
+ }
+ if (s->last_lit)
+ FLUSH_BLOCK(s, 0);
+ return block_done;
+}
+
+#ifndef FASTEST
+/* ===========================================================================
+ * Same as above, but achieves better compression. We use a lazy
+ * evaluation for matches: a match is finally adopted only if there is
+ * no better match at the next window position.
+ */
+local block_state deflate_slow(s, flush)
+ deflate_state *s;
+ int flush;
+{
+ IPos hash_head; /* head of hash chain */
+ int bflush; /* set if current block must be flushed */
+
+ /* Process the input block. */
+ for (;;) {
+ /* Make sure that we always have enough lookahead, except
+ * at the end of the input file. We need MAX_MATCH bytes
+ * for the next match, plus MIN_MATCH bytes to insert the
+ * string following the next match.
+ */
+ if (s->lookahead < MIN_LOOKAHEAD) {
+ fill_window(s);
+ if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) {
+ return need_more;
+ }
+ if (s->lookahead == 0) break; /* flush the current block */
+ }
+
+ /* Insert the string window[strstart .. strstart+2] in the
+ * dictionary, and set hash_head to the head of the hash chain:
+ */
+ hash_head = NIL;
+ if (s->lookahead >= MIN_MATCH) {
+ INSERT_STRING(s, s->strstart, hash_head);
+ }
+
+ /* Find the longest match, discarding those <= prev_length.
+ */
+ s->prev_length = s->match_length, s->prev_match = s->match_start;
+ s->match_length = MIN_MATCH-1;
+
+ if (hash_head != NIL && s->prev_length < s->max_lazy_match &&
+ s->strstart - hash_head <= MAX_DIST(s)) {
+ /* To simplify the code, we prevent matches with the string
+ * of window index 0 (in particular we have to avoid a match
+ * of the string with itself at the start of the input file).
+ */
+ s->match_length = longest_match (s, hash_head);
+ /* longest_match() sets match_start */
+
+ if (s->match_length <= 5 && (s->strategy == Z_FILTERED
+#if TOO_FAR <= 32767
+ || (s->match_length == MIN_MATCH &&
+ s->strstart - s->match_start > TOO_FAR)
+#endif
+ )) {
+
+ /* If prev_match is also MIN_MATCH, match_start is garbage
+ * but we will ignore the current match anyway.
+ */
+ s->match_length = MIN_MATCH-1;
+ }
+ }
+ /* If there was a match at the previous step and the current
+ * match is not better, output the previous match:
+ */
+ if (s->prev_length >= MIN_MATCH && s->match_length <= s->prev_length) {
+ uInt max_insert = s->strstart + s->lookahead - MIN_MATCH;
+ /* Do not insert strings in hash table beyond this. */
+
+ check_match(s, s->strstart-1, s->prev_match, s->prev_length);
+
+ _tr_tally_dist(s, s->strstart -1 - s->prev_match,
+ s->prev_length - MIN_MATCH, bflush);
+
+ /* Insert in hash table all strings up to the end of the match.
+ * strstart-1 and strstart are already inserted. If there is not
+ * enough lookahead, the last two strings are not inserted in
+ * the hash table.
+ */
+ s->lookahead -= s->prev_length-1;
+ s->prev_length -= 2;
+ do {
+ if (++s->strstart <= max_insert) {
+ INSERT_STRING(s, s->strstart, hash_head);
+ }
+ } while (--s->prev_length != 0);
+ s->match_available = 0;
+ s->match_length = MIN_MATCH-1;
+ s->strstart++;
+
+ if (bflush) FLUSH_BLOCK(s, 0);
+
+ } else if (s->match_available) {
+ /* If there was no match at the previous position, output a
+ * single literal. If there was a match but the current match
+ * is longer, truncate the previous match to a single literal.
+ */
+ Tracevv((stderr,"%c", s->window[s->strstart-1]));
+ _tr_tally_lit(s, s->window[s->strstart-1], bflush);
+ if (bflush) {
+ FLUSH_BLOCK_ONLY(s, 0);
+ }
+ s->strstart++;
+ s->lookahead--;
+ if (s->strm->avail_out == 0) return need_more;
+ } else {
+ /* There is no previous match to compare with, wait for
+ * the next step to decide.
+ */
+ s->match_available = 1;
+ s->strstart++;
+ s->lookahead--;
+ }
+ }
+ Assert (flush != Z_NO_FLUSH, "no flush?");
+ if (s->match_available) {
+ Tracevv((stderr,"%c", s->window[s->strstart-1]));
+ _tr_tally_lit(s, s->window[s->strstart-1], bflush);
+ s->match_available = 0;
+ }
+ s->insert = s->strstart < MIN_MATCH-1 ? s->strstart : MIN_MATCH-1;
+ if (flush == Z_FINISH) {
+ FLUSH_BLOCK(s, 1);
+ return finish_done;
+ }
+ if (s->last_lit)
+ FLUSH_BLOCK(s, 0);
+ return block_done;
+}
+#endif /* FASTEST */
+
+/* ===========================================================================
+ * For Z_RLE, simply look for runs of bytes, generate matches only of distance
+ * one. Do not maintain a hash table. (It will be regenerated if this run of
+ * deflate switches away from Z_RLE.)
+ */
+local block_state deflate_rle(s, flush)
+ deflate_state *s;
+ int flush;
+{
+ int bflush; /* set if current block must be flushed */
+ uInt prev; /* byte at distance one to match */
+ Bytef *scan, *strend; /* scan goes up to strend for length of run */
+
+ for (;;) {
+ /* Make sure that we always have enough lookahead, except
+ * at the end of the input file. We need MAX_MATCH bytes
+ * for the longest run, plus one for the unrolled loop.
+ */
+ if (s->lookahead <= MAX_MATCH) {
+ fill_window(s);
+ if (s->lookahead <= MAX_MATCH && flush == Z_NO_FLUSH) {
+ return need_more;
+ }
+ if (s->lookahead == 0) break; /* flush the current block */
+ }
+
+ /* See how many times the previous byte repeats */
+ s->match_length = 0;
+ if (s->lookahead >= MIN_MATCH && s->strstart > 0) {
+ scan = s->window + s->strstart - 1;
+ prev = *scan;
+ if (prev == *++scan && prev == *++scan && prev == *++scan) {
+ strend = s->window + s->strstart + MAX_MATCH;
+ do {
+ } while (prev == *++scan && prev == *++scan &&
+ prev == *++scan && prev == *++scan &&
+ prev == *++scan && prev == *++scan &&
+ prev == *++scan && prev == *++scan &&
+ scan < strend);
+ s->match_length = MAX_MATCH - (uInt)(strend - scan);
+ if (s->match_length > s->lookahead)
+ s->match_length = s->lookahead;
+ }
+ Assert(scan <= s->window+(uInt)(s->window_size-1), "wild scan");
+ }
+
+ /* Emit match if have run of MIN_MATCH or longer, else emit literal */
+ if (s->match_length >= MIN_MATCH) {
+ check_match(s, s->strstart, s->strstart - 1, s->match_length);
+
+ _tr_tally_dist(s, 1, s->match_length - MIN_MATCH, bflush);
+
+ s->lookahead -= s->match_length;
+ s->strstart += s->match_length;
+ s->match_length = 0;
+ } else {
+ /* No match, output a literal byte */
+ Tracevv((stderr,"%c", s->window[s->strstart]));
+ _tr_tally_lit (s, s->window[s->strstart], bflush);
+ s->lookahead--;
+ s->strstart++;
+ }
+ if (bflush) FLUSH_BLOCK(s, 0);
+ }
+ s->insert = 0;
+ if (flush == Z_FINISH) {
+ FLUSH_BLOCK(s, 1);
+ return finish_done;
+ }
+ if (s->last_lit)
+ FLUSH_BLOCK(s, 0);
+ return block_done;
+}
+
+/* ===========================================================================
+ * For Z_HUFFMAN_ONLY, do not look for matches. Do not maintain a hash table.
+ * (It will be regenerated if this run of deflate switches away from Huffman.)
+ */
+local block_state deflate_huff(s, flush)
+ deflate_state *s;
+ int flush;
+{
+ int bflush; /* set if current block must be flushed */
+
+ for (;;) {
+ /* Make sure that we have a literal to write. */
+ if (s->lookahead == 0) {
+ fill_window(s);
+ if (s->lookahead == 0) {
+ if (flush == Z_NO_FLUSH)
+ return need_more;
+ break; /* flush the current block */
+ }
+ }
+
+ /* Output a literal byte */
+ s->match_length = 0;
+ Tracevv((stderr,"%c", s->window[s->strstart]));
+ _tr_tally_lit (s, s->window[s->strstart], bflush);
+ s->lookahead--;
+ s->strstart++;
+ if (bflush) FLUSH_BLOCK(s, 0);
+ }
+ s->insert = 0;
+ if (flush == Z_FINISH) {
+ FLUSH_BLOCK(s, 1);
+ return finish_done;
+ }
+ if (s->last_lit)
+ FLUSH_BLOCK(s, 0);
+ return block_done;
+}
diff --git a/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/deflate.h b/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/deflate.h
new file mode 100644
index 00000000..23ecdd31
--- /dev/null
+++ b/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/deflate.h
@@ -0,0 +1,349 @@
+/* deflate.h -- internal compression state
+ * Copyright (C) 1995-2016 Jean-loup Gailly
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* WARNING: this file should *not* be used by applications. It is
+ part of the implementation of the compression library and is
+ subject to change. Applications should only use zlib.h.
+ */
+
+/* @(#) $Id$ */
+
+#ifndef DEFLATE_H
+#define DEFLATE_H
+
+#include "zutil.h"
+
+/* define NO_GZIP when compiling if you want to disable gzip header and
+ trailer creation by deflate(). NO_GZIP would be used to avoid linking in
+ the crc code when it is not needed. For shared libraries, gzip encoding
+ should be left enabled. */
+#ifndef NO_GZIP
+# define GZIP
+#endif
+
+/* ===========================================================================
+ * Internal compression state.
+ */
+
+#define LENGTH_CODES 29
+/* number of length codes, not counting the special END_BLOCK code */
+
+#define LITERALS 256
+/* number of literal bytes 0..255 */
+
+#define L_CODES (LITERALS+1+LENGTH_CODES)
+/* number of Literal or Length codes, including the END_BLOCK code */
+
+#define D_CODES 30
+/* number of distance codes */
+
+#define BL_CODES 19
+/* number of codes used to transfer the bit lengths */
+
+#define HEAP_SIZE (2*L_CODES+1)
+/* maximum heap size */
+
+#define MAX_BITS 15
+/* All codes must not exceed MAX_BITS bits */
+
+#define Buf_size 16
+/* size of bit buffer in bi_buf */
+
+#define INIT_STATE 42 /* zlib header -> BUSY_STATE */
+#ifdef GZIP
+# define GZIP_STATE 57 /* gzip header -> BUSY_STATE | EXTRA_STATE */
+#endif
+#define EXTRA_STATE 69 /* gzip extra block -> NAME_STATE */
+#define NAME_STATE 73 /* gzip file name -> COMMENT_STATE */
+#define COMMENT_STATE 91 /* gzip comment -> HCRC_STATE */
+#define HCRC_STATE 103 /* gzip header CRC -> BUSY_STATE */
+#define BUSY_STATE 113 /* deflate -> FINISH_STATE */
+#define FINISH_STATE 666 /* stream complete */
+/* Stream status */
+
+
+/* Data structure describing a single value and its code string. */
+typedef struct ct_data_s {
+ union {
+ ush freq; /* frequency count */
+ ush code; /* bit string */
+ } fc;
+ union {
+ ush dad; /* father node in Huffman tree */
+ ush len; /* length of bit string */
+ } dl;
+} FAR ct_data;
+
+#define Freq fc.freq
+#define Code fc.code
+#define Dad dl.dad
+#define Len dl.len
+
+typedef struct static_tree_desc_s static_tree_desc;
+
+typedef struct tree_desc_s {
+ ct_data *dyn_tree; /* the dynamic tree */
+ int max_code; /* largest code with non zero frequency */
+ const static_tree_desc *stat_desc; /* the corresponding static tree */
+} FAR tree_desc;
+
+typedef ush Pos;
+typedef Pos FAR Posf;
+typedef unsigned IPos;
+
+/* A Pos is an index in the character window. We use short instead of int to
+ * save space in the various tables. IPos is used only for parameter passing.
+ */
+
+typedef struct internal_state {
+ z_streamp strm; /* pointer back to this zlib stream */
+ int status; /* as the name implies */
+ Bytef *pending_buf; /* output still pending */
+ ulg pending_buf_size; /* size of pending_buf */
+ Bytef *pending_out; /* next pending byte to output to the stream */
+ ulg pending; /* nb of bytes in the pending buffer */
+ int wrap; /* bit 0 true for zlib, bit 1 true for gzip */
+ gz_headerp gzhead; /* gzip header information to write */
+ ulg gzindex; /* where in extra, name, or comment */
+ Byte method; /* can only be DEFLATED */
+ int last_flush; /* value of flush param for previous deflate call */
+
+ /* used by deflate.c: */
+
+ uInt w_size; /* LZ77 window size (32K by default) */
+ uInt w_bits; /* log2(w_size) (8..16) */
+ uInt w_mask; /* w_size - 1 */
+
+ Bytef *window;
+ /* Sliding window. Input bytes are read into the second half of the window,
+ * and move to the first half later to keep a dictionary of at least wSize
+ * bytes. With this organization, matches are limited to a distance of
+ * wSize-MAX_MATCH bytes, but this ensures that IO is always
+ * performed with a length multiple of the block size. Also, it limits
+ * the window size to 64K, which is quite useful on MSDOS.
+ * To do: use the user input buffer as sliding window.
+ */
+
+ ulg window_size;
+ /* Actual size of window: 2*wSize, except when the user input buffer
+ * is directly used as sliding window.
+ */
+
+ Posf *prev;
+ /* Link to older string with same hash index. To limit the size of this
+ * array to 64K, this link is maintained only for the last 32K strings.
+ * An index in this array is thus a window index modulo 32K.
+ */
+
+ Posf *head; /* Heads of the hash chains or NIL. */
+
+ uInt ins_h; /* hash index of string to be inserted */
+ uInt hash_size; /* number of elements in hash table */
+ uInt hash_bits; /* log2(hash_size) */
+ uInt hash_mask; /* hash_size-1 */
+
+ uInt hash_shift;
+ /* Number of bits by which ins_h must be shifted at each input
+ * step. It must be such that after MIN_MATCH steps, the oldest
+ * byte no longer takes part in the hash key, that is:
+ * hash_shift * MIN_MATCH >= hash_bits
+ */
+
+ long block_start;
+ /* Window position at the beginning of the current output block. Gets
+ * negative when the window is moved backwards.
+ */
+
+ uInt match_length; /* length of best match */
+ IPos prev_match; /* previous match */
+ int match_available; /* set if previous match exists */
+ uInt strstart; /* start of string to insert */
+ uInt match_start; /* start of matching string */
+ uInt lookahead; /* number of valid bytes ahead in window */
+
+ uInt prev_length;
+ /* Length of the best match at previous step. Matches not greater than this
+ * are discarded. This is used in the lazy match evaluation.
+ */
+
+ uInt max_chain_length;
+ /* To speed up deflation, hash chains are never searched beyond this
+ * length. A higher limit improves compression ratio but degrades the
+ * speed.
+ */
+
+ uInt max_lazy_match;
+ /* Attempt to find a better match only when the current match is strictly
+ * smaller than this value. This mechanism is used only for compression
+ * levels >= 4.
+ */
+# define max_insert_length max_lazy_match
+ /* Insert new strings in the hash table only if the match length is not
+ * greater than this length. This saves time but degrades compression.
+ * max_insert_length is used only for compression levels <= 3.
+ */
+
+ int level; /* compression level (1..9) */
+ int strategy; /* favor or force Huffman coding*/
+
+ uInt good_match;
+ /* Use a faster search when the previous match is longer than this */
+
+ int nice_match; /* Stop searching when current match exceeds this */
+
+ /* used by trees.c: */
+ /* Didn't use ct_data typedef below to suppress compiler warning */
+ struct ct_data_s dyn_ltree[HEAP_SIZE]; /* literal and length tree */
+ struct ct_data_s dyn_dtree[2*D_CODES+1]; /* distance tree */
+ struct ct_data_s bl_tree[2*BL_CODES+1]; /* Huffman tree for bit lengths */
+
+ struct tree_desc_s l_desc; /* desc. for literal tree */
+ struct tree_desc_s d_desc; /* desc. for distance tree */
+ struct tree_desc_s bl_desc; /* desc. for bit length tree */
+
+ ush bl_count[MAX_BITS+1];
+ /* number of codes at each bit length for an optimal tree */
+
+ int heap[2*L_CODES+1]; /* heap used to build the Huffman trees */
+ int heap_len; /* number of elements in the heap */
+ int heap_max; /* element of largest frequency */
+ /* The sons of heap[n] are heap[2*n] and heap[2*n+1]. heap[0] is not used.
+ * The same heap array is used to build all trees.
+ */
+
+ uch depth[2*L_CODES+1];
+ /* Depth of each subtree used as tie breaker for trees of equal frequency
+ */
+
+ uchf *l_buf; /* buffer for literals or lengths */
+
+ uInt lit_bufsize;
+ /* Size of match buffer for literals/lengths. There are 4 reasons for
+ * limiting lit_bufsize to 64K:
+ * - frequencies can be kept in 16 bit counters
+ * - if compression is not successful for the first block, all input
+ * data is still in the window so we can still emit a stored block even
+ * when input comes from standard input. (This can also be done for
+ * all blocks if lit_bufsize is not greater than 32K.)
+ * - if compression is not successful for a file smaller than 64K, we can
+ * even emit a stored file instead of a stored block (saving 5 bytes).
+ * This is applicable only for zip (not gzip or zlib).
+ * - creating new Huffman trees less frequently may not provide fast
+ * adaptation to changes in the input data statistics. (Take for
+ * example a binary file with poorly compressible code followed by
+ * a highly compressible string table.) Smaller buffer sizes give
+ * fast adaptation but have of course the overhead of transmitting
+ * trees more frequently.
+ * - I can't count above 4
+ */
+
+ uInt last_lit; /* running index in l_buf */
+
+ ushf *d_buf;
+ /* Buffer for distances. To simplify the code, d_buf and l_buf have
+ * the same number of elements. To use different lengths, an extra flag
+ * array would be necessary.
+ */
+
+ ulg opt_len; /* bit length of current block with optimal trees */
+ ulg static_len; /* bit length of current block with static trees */
+ uInt matches; /* number of string matches in current block */
+ uInt insert; /* bytes at end of window left to insert */
+
+#ifdef ZLIB_DEBUG
+ ulg compressed_len; /* total bit length of compressed file mod 2^32 */
+ ulg bits_sent; /* bit length of compressed data sent mod 2^32 */
+#endif
+
+ ush bi_buf;
+ /* Output buffer. bits are inserted starting at the bottom (least
+ * significant bits).
+ */
+ int bi_valid;
+ /* Number of valid bits in bi_buf. All bits above the last valid bit
+ * are always zero.
+ */
+
+ ulg high_water;
+ /* High water mark offset in window for initialized bytes -- bytes above
+ * this are set to zero in order to avoid memory check warnings when
+ * longest match routines access bytes past the input. This is then
+ * updated to the new high water mark.
+ */
+
+} FAR deflate_state;
+
+/* Output a byte on the stream.
+ * IN assertion: there is enough room in pending_buf.
+ */
+#define put_byte(s, c) {s->pending_buf[s->pending++] = (Bytef)(c);}
+
+
+#define MIN_LOOKAHEAD (MAX_MATCH+MIN_MATCH+1)
+/* Minimum amount of lookahead, except at the end of the input file.
+ * See deflate.c for comments about the MIN_MATCH+1.
+ */
+
+#define MAX_DIST(s) ((s)->w_size-MIN_LOOKAHEAD)
+/* In order to simplify the code, particularly on 16 bit machines, match
+ * distances are limited to MAX_DIST instead of WSIZE.
+ */
+
+#define WIN_INIT MAX_MATCH
+/* Number of bytes after end of data in window to initialize in order to avoid
+ memory checker errors from longest match routines */
+
+ /* in trees.c */
+void ZLIB_INTERNAL _tr_init OF((deflate_state *s));
+int ZLIB_INTERNAL _tr_tally OF((deflate_state *s, unsigned dist, unsigned lc));
+void ZLIB_INTERNAL _tr_flush_block OF((deflate_state *s, charf *buf,
+ ulg stored_len, int last));
+void ZLIB_INTERNAL _tr_flush_bits OF((deflate_state *s));
+void ZLIB_INTERNAL _tr_align OF((deflate_state *s));
+void ZLIB_INTERNAL _tr_stored_block OF((deflate_state *s, charf *buf,
+ ulg stored_len, int last));
+
+#define d_code(dist) \
+ ((dist) < 256 ? _dist_code[dist] : _dist_code[256+((dist)>>7)])
+/* Mapping from a distance to a distance code. dist is the distance - 1 and
+ * must not have side effects. _dist_code[256] and _dist_code[257] are never
+ * used.
+ */
+
+#ifndef ZLIB_DEBUG
+/* Inline versions of _tr_tally for speed: */
+
+#if defined(GEN_TREES_H) || !defined(STDC)
+ extern uch ZLIB_INTERNAL _length_code[];
+ extern uch ZLIB_INTERNAL _dist_code[];
+#else
+ extern const uch ZLIB_INTERNAL _length_code[];
+ extern const uch ZLIB_INTERNAL _dist_code[];
+#endif
+
+# define _tr_tally_lit(s, c, flush) \
+ { uch cc = (c); \
+ s->d_buf[s->last_lit] = 0; \
+ s->l_buf[s->last_lit++] = cc; \
+ s->dyn_ltree[cc].Freq++; \
+ flush = (s->last_lit == s->lit_bufsize-1); \
+ }
+# define _tr_tally_dist(s, distance, length, flush) \
+ { uch len = (uch)(length); \
+ ush dist = (ush)(distance); \
+ s->d_buf[s->last_lit] = dist; \
+ s->l_buf[s->last_lit++] = len; \
+ dist--; \
+ s->dyn_ltree[_length_code[len]+LITERALS+1].Freq++; \
+ s->dyn_dtree[d_code(dist)].Freq++; \
+ flush = (s->last_lit == s->lit_bufsize-1); \
+ }
+#else
+# define _tr_tally_lit(s, c, flush) flush = _tr_tally(s, 0, c)
+# define _tr_tally_dist(s, distance, length, flush) \
+ flush = _tr_tally(s, distance, length)
+#endif
+
+#endif /* DEFLATE_H */
diff --git a/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/gzclose.c b/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/gzclose.c
new file mode 100644
index 00000000..caeb99a3
--- /dev/null
+++ b/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/gzclose.c
@@ -0,0 +1,25 @@
+/* gzclose.c -- zlib gzclose() function
+ * Copyright (C) 2004, 2010 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+#include "gzguts.h"
+
+/* gzclose() is in a separate file so that it is linked in only if it is used.
+ That way the other gzclose functions can be used instead to avoid linking in
+ unneeded compression or decompression routines. */
+int ZEXPORT gzclose(file)
+ gzFile file;
+{
+#ifndef NO_GZCOMPRESS
+ gz_statep state;
+
+ if (file == NULL)
+ return Z_STREAM_ERROR;
+ state = (gz_statep)file;
+
+ return state->mode == GZ_READ ? gzclose_r(file) : gzclose_w(file);
+#else
+ return gzclose_r(file);
+#endif
+}
diff --git a/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/gzguts.h b/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/gzguts.h
new file mode 100644
index 00000000..990a4d25
--- /dev/null
+++ b/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/gzguts.h
@@ -0,0 +1,218 @@
+/* gzguts.h -- zlib internal header definitions for gz* operations
+ * Copyright (C) 2004, 2005, 2010, 2011, 2012, 2013, 2016 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+#ifdef _LARGEFILE64_SOURCE
+# ifndef _LARGEFILE_SOURCE
+# define _LARGEFILE_SOURCE 1
+# endif
+# ifdef _FILE_OFFSET_BITS
+# undef _FILE_OFFSET_BITS
+# endif
+#endif
+
+#ifdef HAVE_HIDDEN
+# define ZLIB_INTERNAL __attribute__((visibility ("hidden")))
+#else
+# define ZLIB_INTERNAL
+#endif
+
+#include <stdio.h>
+#include "zlib.h"
+#ifdef STDC
+# include <string.h>
+# include <stdlib.h>
+# include <limits.h>
+#endif
+
+#ifndef _POSIX_SOURCE
+# define _POSIX_SOURCE
+#endif
+#include <fcntl.h>
+
+#ifdef _WIN32
+# include <stddef.h>
+#endif
+
+#if defined(__TURBOC__) || defined(_MSC_VER) || defined(_WIN32)
+# include <io.h>
+#endif
+
+#if defined(_WIN32) || defined(__CYGWIN__)
+# define WIDECHAR
+#endif
+
+#ifdef WINAPI_FAMILY
+# define open _open
+# define read _read
+# define write _write
+# define close _close
+#endif
+
+#ifdef NO_DEFLATE /* for compatibility with old definition */
+# define NO_GZCOMPRESS
+#endif
+
+#if defined(STDC99) || (defined(__TURBOC__) && __TURBOC__ >= 0x550)
+# ifndef HAVE_VSNPRINTF
+# define HAVE_VSNPRINTF
+# endif
+#endif
+
+#if defined(__CYGWIN__)
+# ifndef HAVE_VSNPRINTF
+# define HAVE_VSNPRINTF
+# endif
+#endif
+
+#if defined(MSDOS) && defined(__BORLANDC__) && (BORLANDC > 0x410)
+# ifndef HAVE_VSNPRINTF
+# define HAVE_VSNPRINTF
+# endif
+#endif
+
+#ifndef HAVE_VSNPRINTF
+# ifdef MSDOS
+/* vsnprintf may exist on some MS-DOS compilers (DJGPP?),
+ but for now we just assume it doesn't. */
+# define NO_vsnprintf
+# endif
+# ifdef __TURBOC__
+# define NO_vsnprintf
+# endif
+# ifdef WIN32
+/* In Win32, vsnprintf is available as the "non-ANSI" _vsnprintf. */
+# if !defined(vsnprintf) && !defined(NO_vsnprintf)
+# if !defined(_MSC_VER) || ( defined(_MSC_VER) && _MSC_VER < 1500 )
+# define vsnprintf _vsnprintf
+# endif
+# endif
+# endif
+# ifdef __SASC
+# define NO_vsnprintf
+# endif
+# ifdef VMS
+# define NO_vsnprintf
+# endif
+# ifdef __OS400__
+# define NO_vsnprintf
+# endif
+# ifdef __MVS__
+# define NO_vsnprintf
+# endif
+#endif
+
+/* unlike snprintf (which is required in C99), _snprintf does not guarantee
+ null termination of the result -- however this is only used in gzlib.c where
+ the result is assured to fit in the space provided */
+#if defined(_MSC_VER) && _MSC_VER < 1900
+# define snprintf _snprintf
+#endif
+
+#ifndef local
+# define local static
+#endif
+/* since "static" is used to mean two completely different things in C, we
+ define "local" for the non-static meaning of "static", for readability
+ (compile with -Dlocal if your debugger can't find static symbols) */
+
+/* gz* functions always use library allocation functions */
+#ifndef STDC
+ extern voidp malloc OF((uInt size));
+ extern void free OF((voidpf ptr));
+#endif
+
+/* get errno and strerror definition */
+#if defined UNDER_CE
+# include <windows.h>
+# define zstrerror() gz_strwinerror((DWORD)GetLastError())
+#else
+# ifndef NO_STRERROR
+# include <errno.h>
+# define zstrerror() strerror(errno)
+# else
+# define zstrerror() "stdio error (consult errno)"
+# endif
+#endif
+
+/* provide prototypes for these when building zlib without LFS */
+#if !defined(_LARGEFILE64_SOURCE) || _LFS64_LARGEFILE-0 == 0
+ ZEXTERN gzFile ZEXPORT gzopen64 OF((const char *, const char *));
+ ZEXTERN z_off64_t ZEXPORT gzseek64 OF((gzFile, z_off64_t, int));
+ ZEXTERN z_off64_t ZEXPORT gztell64 OF((gzFile));
+ ZEXTERN z_off64_t ZEXPORT gzoffset64 OF((gzFile));
+#endif
+
+/* default memLevel */
+#if MAX_MEM_LEVEL >= 8
+# define DEF_MEM_LEVEL 8
+#else
+# define DEF_MEM_LEVEL MAX_MEM_LEVEL
+#endif
+
+/* default i/o buffer size -- double this for output when reading (this and
+ twice this must be able to fit in an unsigned type) */
+#define GZBUFSIZE 8192
+
+/* gzip modes, also provide a little integrity check on the passed structure */
+#define GZ_NONE 0
+#define GZ_READ 7247
+#define GZ_WRITE 31153
+#define GZ_APPEND 1 /* mode set to GZ_WRITE after the file is opened */
+
+/* values for gz_state how */
+#define LOOK 0 /* look for a gzip header */
+#define COPY 1 /* copy input directly */
+#define GZIP 2 /* decompress a gzip stream */
+
+/* internal gzip file state data structure */
+typedef struct {
+ /* exposed contents for gzgetc() macro */
+ struct gzFile_s x; /* "x" for exposed */
+ /* x.have: number of bytes available at x.next */
+ /* x.next: next output data to deliver or write */
+ /* x.pos: current position in uncompressed data */
+ /* used for both reading and writing */
+ int mode; /* see gzip modes above */
+ int fd; /* file descriptor */
+ char *path; /* path or fd for error messages */
+ unsigned size; /* buffer size, zero if not allocated yet */
+ unsigned want; /* requested buffer size, default is GZBUFSIZE */
+ unsigned char *in; /* input buffer (double-sized when writing) */
+ unsigned char *out; /* output buffer (double-sized when reading) */
+ int direct; /* 0 if processing gzip, 1 if transparent */
+ /* just for reading */
+ int how; /* 0: get header, 1: copy, 2: decompress */
+ z_off64_t start; /* where the gzip data started, for rewinding */
+ int eof; /* true if end of input file reached */
+ int past; /* true if read requested past end */
+ /* just for writing */
+ int level; /* compression level */
+ int strategy; /* compression strategy */
+ /* seek request */
+ z_off64_t skip; /* amount to skip (already rewound if backwards) */
+ int seek; /* true if seek request pending */
+ /* error information */
+ int err; /* error code */
+ char *msg; /* error message */
+ /* zlib inflate or deflate stream */
+ z_stream strm; /* stream structure in-place (not a pointer) */
+} gz_state;
+typedef gz_state FAR *gz_statep;
+
+/* shared functions */
+void ZLIB_INTERNAL gz_error OF((gz_statep, int, const char *));
+#if defined UNDER_CE
+char ZLIB_INTERNAL *gz_strwinerror OF((DWORD error));
+#endif
+
+/* GT_OFF(x), where x is an unsigned value, is true if x > maximum z_off64_t
+ value -- needed when comparing unsigned to z_off64_t, which is signed
+ (possible z_off64_t types off_t, off64_t, and long are all signed) */
+#ifdef INT_MAX
+# define GT_OFF(x) (sizeof(int) == sizeof(z_off64_t) && (x) > INT_MAX)
+#else
+unsigned ZLIB_INTERNAL gz_intmax OF((void));
+# define GT_OFF(x) (sizeof(int) == sizeof(z_off64_t) && (x) > gz_intmax())
+#endif
diff --git a/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/gzlib.c b/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/gzlib.c
new file mode 100644
index 00000000..4105e6af
--- /dev/null
+++ b/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/gzlib.c
@@ -0,0 +1,637 @@
+/* gzlib.c -- zlib functions common to reading and writing gzip files
+ * Copyright (C) 2004-2017 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+#include "gzguts.h"
+
+#if defined(_WIN32) && !defined(__BORLANDC__) && !defined(__MINGW32__)
+# define LSEEK _lseeki64
+#else
+#if defined(_LARGEFILE64_SOURCE) && _LFS64_LARGEFILE-0
+# define LSEEK lseek64
+#else
+# define LSEEK lseek
+#endif
+#endif
+
+/* Local functions */
+local void gz_reset OF((gz_statep));
+local gzFile gz_open OF((const void *, int, const char *));
+
+#if defined UNDER_CE
+
+/* Map the Windows error number in ERROR to a locale-dependent error message
+ string and return a pointer to it. Typically, the values for ERROR come
+ from GetLastError.
+
+ The string pointed to shall not be modified by the application, but may be
+ overwritten by a subsequent call to gz_strwinerror
+
+ The gz_strwinerror function does not change the current setting of
+ GetLastError. */
+char ZLIB_INTERNAL *gz_strwinerror (error)
+ DWORD error;
+{
+ static char buf[1024];
+
+ wchar_t *msgbuf;
+ DWORD lasterr = GetLastError();
+ DWORD chars = FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM
+ | FORMAT_MESSAGE_ALLOCATE_BUFFER,
+ NULL,
+ error,
+ 0, /* Default language */
+ (LPVOID)&msgbuf,
+ 0,
+ NULL);
+ if (chars != 0) {
+ /* If there is an \r\n appended, zap it. */
+ if (chars >= 2
+ && msgbuf[chars - 2] == '\r' && msgbuf[chars - 1] == '\n') {
+ chars -= 2;
+ msgbuf[chars] = 0;
+ }
+
+ if (chars > sizeof (buf) - 1) {
+ chars = sizeof (buf) - 1;
+ msgbuf[chars] = 0;
+ }
+
+ wcstombs(buf, msgbuf, chars + 1);
+ LocalFree(msgbuf);
+ }
+ else {
+ sprintf(buf, "unknown win32 error (%ld)", error);
+ }
+
+ SetLastError(lasterr);
+ return buf;
+}
+
+#endif /* UNDER_CE */
+
+/* Reset gzip file state */
+local void gz_reset(state)
+ gz_statep state;
+{
+ state->x.have = 0; /* no output data available */
+ if (state->mode == GZ_READ) { /* for reading ... */
+ state->eof = 0; /* not at end of file */
+ state->past = 0; /* have not read past end yet */
+ state->how = LOOK; /* look for gzip header */
+ }
+ state->seek = 0; /* no seek request pending */
+ gz_error(state, Z_OK, NULL); /* clear error */
+ state->x.pos = 0; /* no uncompressed data yet */
+ state->strm.avail_in = 0; /* no input data yet */
+}
+
+/* Open a gzip file either by name or file descriptor. */
+local gzFile gz_open(path, fd, mode)
+ const void *path;
+ int fd;
+ const char *mode;
+{
+ gz_statep state;
+ z_size_t len;
+ int oflag;
+#ifdef O_CLOEXEC
+ int cloexec = 0;
+#endif
+#ifdef O_EXCL
+ int exclusive = 0;
+#endif
+
+ /* check input */
+ if (path == NULL)
+ return NULL;
+
+ /* allocate gzFile structure to return */
+ state = (gz_statep)malloc(sizeof(gz_state));
+ if (state == NULL)
+ return NULL;
+ state->size = 0; /* no buffers allocated yet */
+ state->want = GZBUFSIZE; /* requested buffer size */
+ state->msg = NULL; /* no error message yet */
+
+ /* interpret mode */
+ state->mode = GZ_NONE;
+ state->level = Z_DEFAULT_COMPRESSION;
+ state->strategy = Z_DEFAULT_STRATEGY;
+ state->direct = 0;
+ while (*mode) {
+ if (*mode >= '0' && *mode <= '9')
+ state->level = *mode - '0';
+ else
+ switch (*mode) {
+ case 'r':
+ state->mode = GZ_READ;
+ break;
+#ifndef NO_GZCOMPRESS
+ case 'w':
+ state->mode = GZ_WRITE;
+ break;
+ case 'a':
+ state->mode = GZ_APPEND;
+ break;
+#endif
+ case '+': /* can't read and write at the same time */
+ free(state);
+ return NULL;
+ case 'b': /* ignore -- will request binary anyway */
+ break;
+#ifdef O_CLOEXEC
+ case 'e':
+ cloexec = 1;
+ break;
+#endif
+#ifdef O_EXCL
+ case 'x':
+ exclusive = 1;
+ break;
+#endif
+ case 'f':
+ state->strategy = Z_FILTERED;
+ break;
+ case 'h':
+ state->strategy = Z_HUFFMAN_ONLY;
+ break;
+ case 'R':
+ state->strategy = Z_RLE;
+ break;
+ case 'F':
+ state->strategy = Z_FIXED;
+ break;
+ case 'T':
+ state->direct = 1;
+ break;
+ default: /* could consider as an error, but just ignore */
+ ;
+ }
+ mode++;
+ }
+
+ /* must provide an "r", "w", or "a" */
+ if (state->mode == GZ_NONE) {
+ free(state);
+ return NULL;
+ }
+
+ /* can't force transparent read */
+ if (state->mode == GZ_READ) {
+ if (state->direct) {
+ free(state);
+ return NULL;
+ }
+ state->direct = 1; /* for empty file */
+ }
+
+ /* save the path name for error messages */
+#ifdef WIDECHAR
+ if (fd == -2) {
+ len = wcstombs(NULL, path, 0);
+ if (len == (z_size_t)-1)
+ len = 0;
+ }
+ else
+#endif
+ len = strlen((const char *)path);
+ state->path = (char *)malloc(len + 1);
+ if (state->path == NULL) {
+ free(state);
+ return NULL;
+ }
+#ifdef WIDECHAR
+ if (fd == -2)
+ if (len)
+ wcstombs(state->path, path, len + 1);
+ else
+ *(state->path) = 0;
+ else
+#endif
+#if !defined(NO_snprintf) && !defined(NO_vsnprintf)
+ (void)snprintf(state->path, len + 1, "%s", (const char *)path);
+#else
+ strcpy(state->path, path);
+#endif
+
+ /* compute the flags for open() */
+ oflag =
+#ifdef O_LARGEFILE
+ O_LARGEFILE |
+#endif
+#ifdef O_BINARY
+ O_BINARY |
+#endif
+#ifdef O_CLOEXEC
+ (cloexec ? O_CLOEXEC : 0) |
+#endif
+ (state->mode == GZ_READ ?
+ O_RDONLY :
+ (O_WRONLY | O_CREAT |
+#ifdef O_EXCL
+ (exclusive ? O_EXCL : 0) |
+#endif
+ (state->mode == GZ_WRITE ?
+ O_TRUNC :
+ O_APPEND)));
+
+ /* open the file with the appropriate flags (or just use fd) */
+ state->fd = fd > -1 ? fd : (
+#ifdef WIDECHAR
+ fd == -2 ? _wopen(path, oflag, 0666) :
+#endif
+ open((const char *)path, oflag, 0666));
+ if (state->fd == -1) {
+ free(state->path);
+ free(state);
+ return NULL;
+ }
+ if (state->mode == GZ_APPEND) {
+ LSEEK(state->fd, 0, SEEK_END); /* so gzoffset() is correct */
+ state->mode = GZ_WRITE; /* simplify later checks */
+ }
+
+ /* save the current position for rewinding (only if reading) */
+ if (state->mode == GZ_READ) {
+ state->start = LSEEK(state->fd, 0, SEEK_CUR);
+ if (state->start == -1) state->start = 0;
+ }
+
+ /* initialize stream */
+ gz_reset(state);
+
+ /* return stream */
+ return (gzFile)state;
+}
+
+/* -- see zlib.h -- */
+gzFile ZEXPORT gzopen(path, mode)
+ const char *path;
+ const char *mode;
+{
+ return gz_open(path, -1, mode);
+}
+
+/* -- see zlib.h -- */
+gzFile ZEXPORT gzopen64(path, mode)
+ const char *path;
+ const char *mode;
+{
+ return gz_open(path, -1, mode);
+}
+
+/* -- see zlib.h -- */
+gzFile ZEXPORT gzdopen(fd, mode)
+ int fd;
+ const char *mode;
+{
+ char *path; /* identifier for error messages */
+ gzFile gz;
+
+ if (fd == -1 || (path = (char *)malloc(7 + 3 * sizeof(int))) == NULL)
+ return NULL;
+#if !defined(NO_snprintf) && !defined(NO_vsnprintf)
+ (void)snprintf(path, 7 + 3 * sizeof(int), "<fd:%d>", fd);
+#else
+ sprintf(path, "<fd:%d>", fd); /* for debugging */
+#endif
+ gz = gz_open(path, fd, mode);
+ free(path);
+ return gz;
+}
+
+/* -- see zlib.h -- */
+#ifdef WIDECHAR
+gzFile ZEXPORT gzopen_w(path, mode)
+ const wchar_t *path;
+ const char *mode;
+{
+ return gz_open(path, -2, mode);
+}
+#endif
+
+/* -- see zlib.h -- */
+int ZEXPORT gzbuffer(file, size)
+ gzFile file;
+ unsigned size;
+{
+ gz_statep state;
+
+ /* get internal structure and check integrity */
+ if (file == NULL)
+ return -1;
+ state = (gz_statep)file;
+ if (state->mode != GZ_READ && state->mode != GZ_WRITE)
+ return -1;
+
+ /* make sure we haven't already allocated memory */
+ if (state->size != 0)
+ return -1;
+
+ /* check and set requested size */
+ if ((size << 1) < size)
+ return -1; /* need to be able to double it */
+ if (size < 2)
+ size = 2; /* need two bytes to check magic header */
+ state->want = size;
+ return 0;
+}
+
+/* -- see zlib.h -- */
+int ZEXPORT gzrewind(file)
+ gzFile file;
+{
+ gz_statep state;
+
+ /* get internal structure */
+ if (file == NULL)
+ return -1;
+ state = (gz_statep)file;
+
+ /* check that we're reading and that there's no error */
+ if (state->mode != GZ_READ ||
+ (state->err != Z_OK && state->err != Z_BUF_ERROR))
+ return -1;
+
+ /* back up and start over */
+ if (LSEEK(state->fd, state->start, SEEK_SET) == -1)
+ return -1;
+ gz_reset(state);
+ return 0;
+}
+
+/* -- see zlib.h -- */
+z_off64_t ZEXPORT gzseek64(file, offset, whence)
+ gzFile file;
+ z_off64_t offset;
+ int whence;
+{
+ unsigned n;
+ z_off64_t ret;
+ gz_statep state;
+
+ /* get internal structure and check integrity */
+ if (file == NULL)
+ return -1;
+ state = (gz_statep)file;
+ if (state->mode != GZ_READ && state->mode != GZ_WRITE)
+ return -1;
+
+ /* check that there's no error */
+ if (state->err != Z_OK && state->err != Z_BUF_ERROR)
+ return -1;
+
+ /* can only seek from start or relative to current position */
+ if (whence != SEEK_SET && whence != SEEK_CUR)
+ return -1;
+
+ /* normalize offset to a SEEK_CUR specification */
+ if (whence == SEEK_SET)
+ offset -= state->x.pos;
+ else if (state->seek)
+ offset += state->skip;
+ state->seek = 0;
+
+ /* if within raw area while reading, just go there */
+ if (state->mode == GZ_READ && state->how == COPY &&
+ state->x.pos + offset >= 0) {
+ ret = LSEEK(state->fd, offset - state->x.have, SEEK_CUR);
+ if (ret == -1)
+ return -1;
+ state->x.have = 0;
+ state->eof = 0;
+ state->past = 0;
+ state->seek = 0;
+ gz_error(state, Z_OK, NULL);
+ state->strm.avail_in = 0;
+ state->x.pos += offset;
+ return state->x.pos;
+ }
+
+ /* calculate skip amount, rewinding if needed for back seek when reading */
+ if (offset < 0) {
+ if (state->mode != GZ_READ) /* writing -- can't go backwards */
+ return -1;
+ offset += state->x.pos;
+ if (offset < 0) /* before start of file! */
+ return -1;
+ if (gzrewind(file) == -1) /* rewind, then skip to offset */
+ return -1;
+ }
+
+ /* if reading, skip what's in output buffer (one less gzgetc() check) */
+ if (state->mode == GZ_READ) {
+ n = GT_OFF(state->x.have) || (z_off64_t)state->x.have > offset ?
+ (unsigned)offset : state->x.have;
+ state->x.have -= n;
+ state->x.next += n;
+ state->x.pos += n;
+ offset -= n;
+ }
+
+ /* request skip (if not zero) */
+ if (offset) {
+ state->seek = 1;
+ state->skip = offset;
+ }
+ return state->x.pos + offset;
+}
+
+/* -- see zlib.h -- */
+z_off_t ZEXPORT gzseek(file, offset, whence)
+ gzFile file;
+ z_off_t offset;
+ int whence;
+{
+ z_off64_t ret;
+
+ ret = gzseek64(file, (z_off64_t)offset, whence);
+ return ret == (z_off_t)ret ? (z_off_t)ret : -1;
+}
+
+/* -- see zlib.h -- */
+z_off64_t ZEXPORT gztell64(file)
+ gzFile file;
+{
+ gz_statep state;
+
+ /* get internal structure and check integrity */
+ if (file == NULL)
+ return -1;
+ state = (gz_statep)file;
+ if (state->mode != GZ_READ && state->mode != GZ_WRITE)
+ return -1;
+
+ /* return position */
+ return state->x.pos + (state->seek ? state->skip : 0);
+}
+
+/* -- see zlib.h -- */
+z_off_t ZEXPORT gztell(file)
+ gzFile file;
+{
+ z_off64_t ret;
+
+ ret = gztell64(file);
+ return ret == (z_off_t)ret ? (z_off_t)ret : -1;
+}
+
+/* -- see zlib.h -- */
+z_off64_t ZEXPORT gzoffset64(file)
+ gzFile file;
+{
+ z_off64_t offset;
+ gz_statep state;
+
+ /* get internal structure and check integrity */
+ if (file == NULL)
+ return -1;
+ state = (gz_statep)file;
+ if (state->mode != GZ_READ && state->mode != GZ_WRITE)
+ return -1;
+
+ /* compute and return effective offset in file */
+ offset = LSEEK(state->fd, 0, SEEK_CUR);
+ if (offset == -1)
+ return -1;
+ if (state->mode == GZ_READ) /* reading */
+ offset -= state->strm.avail_in; /* don't count buffered input */
+ return offset;
+}
+
+/* -- see zlib.h -- */
+z_off_t ZEXPORT gzoffset(file)
+ gzFile file;
+{
+ z_off64_t ret;
+
+ ret = gzoffset64(file);
+ return ret == (z_off_t)ret ? (z_off_t)ret : -1;
+}
+
+/* -- see zlib.h -- */
+int ZEXPORT gzeof(file)
+ gzFile file;
+{
+ gz_statep state;
+
+ /* get internal structure and check integrity */
+ if (file == NULL)
+ return 0;
+ state = (gz_statep)file;
+ if (state->mode != GZ_READ && state->mode != GZ_WRITE)
+ return 0;
+
+ /* return end-of-file state */
+ return state->mode == GZ_READ ? state->past : 0;
+}
+
+/* -- see zlib.h -- */
+const char * ZEXPORT gzerror(file, errnum)
+ gzFile file;
+ int *errnum;
+{
+ gz_statep state;
+
+ /* get internal structure and check integrity */
+ if (file == NULL)
+ return NULL;
+ state = (gz_statep)file;
+ if (state->mode != GZ_READ && state->mode != GZ_WRITE)
+ return NULL;
+
+ /* return error information */
+ if (errnum != NULL)
+ *errnum = state->err;
+ return state->err == Z_MEM_ERROR ? "out of memory" :
+ (state->msg == NULL ? "" : state->msg);
+}
+
+/* -- see zlib.h -- */
+void ZEXPORT gzclearerr(file)
+ gzFile file;
+{
+ gz_statep state;
+
+ /* get internal structure and check integrity */
+ if (file == NULL)
+ return;
+ state = (gz_statep)file;
+ if (state->mode != GZ_READ && state->mode != GZ_WRITE)
+ return;
+
+ /* clear error and end-of-file */
+ if (state->mode == GZ_READ) {
+ state->eof = 0;
+ state->past = 0;
+ }
+ gz_error(state, Z_OK, NULL);
+}
+
+/* Create an error message in allocated memory and set state->err and
+ state->msg accordingly. Free any previous error message already there. Do
+ not try to free or allocate space if the error is Z_MEM_ERROR (out of
+ memory). Simply save the error message as a static string. If there is an
+ allocation failure constructing the error message, then convert the error to
+ out of memory. */
+void ZLIB_INTERNAL gz_error(state, err, msg)
+ gz_statep state;
+ int err;
+ const char *msg;
+{
+ /* free previously allocated message and clear */
+ if (state->msg != NULL) {
+ if (state->err != Z_MEM_ERROR)
+ free(state->msg);
+ state->msg = NULL;
+ }
+
+ /* if fatal, set state->x.have to 0 so that the gzgetc() macro fails */
+ if (err != Z_OK && err != Z_BUF_ERROR)
+ state->x.have = 0;
+
+ /* set error code, and if no message, then done */
+ state->err = err;
+ if (msg == NULL)
+ return;
+
+ /* for an out of memory error, return literal string when requested */
+ if (err == Z_MEM_ERROR)
+ return;
+
+ /* construct error message with path */
+ if ((state->msg = (char *)malloc(strlen(state->path) + strlen(msg) + 3)) ==
+ NULL) {
+ state->err = Z_MEM_ERROR;
+ return;
+ }
+#if !defined(NO_snprintf) && !defined(NO_vsnprintf)
+ (void)snprintf(state->msg, strlen(state->path) + strlen(msg) + 3,
+ "%s%s%s", state->path, ": ", msg);
+#else
+ strcpy(state->msg, state->path);
+ strcat(state->msg, ": ");
+ strcat(state->msg, msg);
+#endif
+}
+
+#ifndef INT_MAX
+/* portably return maximum value for an int (when limits.h presumed not
+ available) -- we need to do this to cover cases where 2's complement not
+ used, since C standard permits 1's complement and sign-bit representations,
+ otherwise we could just use ((unsigned)-1) >> 1 */
+unsigned ZLIB_INTERNAL gz_intmax()
+{
+ unsigned p, q;
+
+ p = 1;
+ do {
+ q = p;
+ p <<= 1;
+ p++;
+ } while (p > q);
+ return q >> 1;
+}
+#endif
diff --git a/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/gzread.c b/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/gzread.c
new file mode 100644
index 00000000..08aa0de5
--- /dev/null
+++ b/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/gzread.c
@@ -0,0 +1,656 @@
+/* gzread.c -- zlib functions for reading gzip files
+ * Copyright (C) 2004, 2005, 2010, 2011, 2012, 2013, 2016 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+#include "gzguts.h"
+#ifndef __clang_analyzer__
+
+/* Local functions */
+local int gz_load OF((gz_statep, unsigned char *, unsigned, unsigned *));
+local int gz_avail OF((gz_statep));
+local int gz_look OF((gz_statep));
+local int gz_decomp OF((gz_statep));
+local int gz_fetch OF((gz_statep));
+local int gz_skip OF((gz_statep, z_off64_t));
+local z_size_t gz_read OF((gz_statep, voidp, z_size_t));
+
+/* Use read() to load a buffer -- return -1 on error, otherwise 0. Read from
+ state->fd, and update state->eof, state->err, and state->msg as appropriate.
+ This function needs to loop on read(), since read() is not guaranteed to
+ read the number of bytes requested, depending on the type of descriptor. */
+local int gz_load(state, buf, len, have)
+ gz_statep state;
+ unsigned char *buf;
+ unsigned len;
+ unsigned *have;
+{
+ int ret;
+ unsigned get, max = ((unsigned)-1 >> 2) + 1;
+
+ *have = 0;
+ do {
+ get = len - *have;
+ if (get > max)
+ get = max;
+ ret = read(state->fd, buf + *have, get);
+ if (ret <= 0)
+ break;
+ *have += (unsigned)ret;
+ } while (*have < len);
+ if (ret < 0) {
+ gz_error(state, Z_ERRNO, zstrerror());
+ return -1;
+ }
+ if (ret == 0)
+ state->eof = 1;
+ return 0;
+}
+
+/* Load up input buffer and set eof flag if last data loaded -- return -1 on
+ error, 0 otherwise. Note that the eof flag is set when the end of the input
+ file is reached, even though there may be unused data in the buffer. Once
+ that data has been used, no more attempts will be made to read the file.
+ If strm->avail_in != 0, then the current data is moved to the beginning of
+ the input buffer, and then the remainder of the buffer is loaded with the
+ available data from the input file. */
+local int gz_avail(state)
+ gz_statep state;
+{
+ unsigned got;
+ z_streamp strm = &(state->strm);
+
+ if (state->err != Z_OK && state->err != Z_BUF_ERROR)
+ return -1;
+ if (state->eof == 0) {
+ if (strm->avail_in) { /* copy what's there to the start */
+ unsigned char *p = state->in;
+ unsigned const char *q = strm->next_in;
+ unsigned n = strm->avail_in;
+ do {
+ *p++ = *q++;
+ } while (--n);
+ }
+ if (gz_load(state, state->in + strm->avail_in,
+ state->size - strm->avail_in, &got) == -1)
+ return -1;
+ strm->avail_in += got;
+ strm->next_in = state->in;
+ }
+ return 0;
+}
+
+/* Look for gzip header, set up for inflate or copy. state->x.have must be 0.
+ If this is the first time in, allocate required memory. state->how will be
+ left unchanged if there is no more input data available, will be set to COPY
+ if there is no gzip header and direct copying will be performed, or it will
+ be set to GZIP for decompression. If direct copying, then leftover input
+ data from the input buffer will be copied to the output buffer. In that
+ case, all further file reads will be directly to either the output buffer or
+ a user buffer. If decompressing, the inflate state will be initialized.
+ gz_look() will return 0 on success or -1 on failure. */
+local int gz_look(state)
+ gz_statep state;
+{
+ z_streamp strm = &(state->strm);
+
+ /* allocate read buffers and inflate memory */
+ if (state->size == 0) {
+ /* allocate buffers */
+ state->in = (unsigned char *)malloc(state->want);
+ state->out = (unsigned char *)malloc(state->want << 1);
+ if (state->in == NULL || state->out == NULL) {
+ free(state->out);
+ free(state->in);
+ gz_error(state, Z_MEM_ERROR, "out of memory");
+ return -1;
+ }
+ state->size = state->want;
+
+ /* allocate inflate memory */
+ state->strm.zalloc = Z_NULL;
+ state->strm.zfree = Z_NULL;
+ state->strm.opaque = Z_NULL;
+ state->strm.avail_in = 0;
+ state->strm.next_in = Z_NULL;
+ if (inflateInit2(&(state->strm), 15 + 16) != Z_OK) { /* gunzip */
+ free(state->out);
+ free(state->in);
+ state->size = 0;
+ gz_error(state, Z_MEM_ERROR, "out of memory");
+ return -1;
+ }
+ }
+
+ /* get at least the magic bytes in the input buffer */
+ if (strm->avail_in < 2) {
+ if (gz_avail(state) == -1)
+ return -1;
+ if (strm->avail_in == 0)
+ return 0;
+ }
+
+ /* look for gzip magic bytes -- if there, do gzip decoding (note: there is
+ a logical dilemma here when considering the case of a partially written
+ gzip file, to wit, if a single 31 byte is written, then we cannot tell
+ whether this is a single-byte file, or just a partially written gzip
+ file -- for here we assume that if a gzip file is being written, then
+ the header will be written in a single operation, so that reading a
+ single byte is sufficient indication that it is not a gzip file) */
+ if (strm->avail_in > 1 &&
+ strm->next_in[0] == 31 && strm->next_in[1] == 139) {
+ inflateReset(strm);
+ state->how = GZIP;
+ state->direct = 0;
+ return 0;
+ }
+
+ /* no gzip header -- if we were decoding gzip before, then this is trailing
+ garbage. Ignore the trailing garbage and finish. */
+ if (state->direct == 0) {
+ strm->avail_in = 0;
+ state->eof = 1;
+ state->x.have = 0;
+ return 0;
+ }
+
+ /* doing raw i/o, copy any leftover input to output -- this assumes that
+ the output buffer is larger than the input buffer, which also assures
+ space for gzungetc() */
+ state->x.next = state->out;
+ if (strm->avail_in) {
+ memcpy(state->x.next, strm->next_in, strm->avail_in);
+ state->x.have = strm->avail_in;
+ strm->avail_in = 0;
+ }
+ state->how = COPY;
+ state->direct = 1;
+ return 0;
+}
+
+/* Decompress from input to the provided next_out and avail_out in the state.
+ On return, state->x.have and state->x.next point to the just decompressed
+ data. If the gzip stream completes, state->how is reset to LOOK to look for
+ the next gzip stream or raw data, once state->x.have is depleted. Returns 0
+ on success, -1 on failure. */
+local int gz_decomp(state)
+ gz_statep state;
+{
+ int ret = Z_OK;
+ unsigned had;
+ z_streamp strm = &(state->strm);
+
+ /* fill output buffer up to end of deflate stream */
+ had = strm->avail_out;
+ do {
+ /* get more input for inflate() */
+ if (strm->avail_in == 0 && gz_avail(state) == -1)
+ return -1;
+ if (strm->avail_in == 0) {
+ gz_error(state, Z_BUF_ERROR, "unexpected end of file");
+ break;
+ }
+
+ /* decompress and handle errors */
+ ret = inflate(strm, Z_NO_FLUSH);
+ if (ret == Z_STREAM_ERROR || ret == Z_NEED_DICT) {
+ gz_error(state, Z_STREAM_ERROR,
+ "internal error: inflate stream corrupt");
+ return -1;
+ }
+ if (ret == Z_MEM_ERROR) {
+ gz_error(state, Z_MEM_ERROR, "out of memory");
+ return -1;
+ }
+ if (ret == Z_DATA_ERROR) { /* deflate stream invalid */
+ gz_error(state, Z_DATA_ERROR,
+ strm->msg == NULL ? "compressed data error" : strm->msg);
+ return -1;
+ }
+ } while (strm->avail_out && ret != Z_STREAM_END);
+
+ /* update available output */
+ state->x.have = had - strm->avail_out;
+ state->x.next = strm->next_out - state->x.have;
+
+ /* if the gzip stream completed successfully, look for another */
+ if (ret == Z_STREAM_END)
+ state->how = LOOK;
+
+ /* good decompression */
+ return 0;
+}
+
+/* Fetch data and put it in the output buffer. Assumes state->x.have is 0.
+ Data is either copied from the input file or decompressed from the input
+ file depending on state->how. If state->how is LOOK, then a gzip header is
+ looked for to determine whether to copy or decompress. Returns -1 on error,
+ otherwise 0. gz_fetch() will leave state->how as COPY or GZIP unless the
+ end of the input file has been reached and all data has been processed. */
+local int gz_fetch(state)
+ gz_statep state;
+{
+ z_streamp strm = &(state->strm);
+
+ do {
+ switch(state->how) {
+ case LOOK: /* -> LOOK, COPY (only if never GZIP), or GZIP */
+ if (gz_look(state) == -1)
+ return -1;
+ if (state->how == LOOK)
+ return 0;
+ break;
+ case COPY: /* -> COPY */
+ if (gz_load(state, state->out, state->size << 1, &(state->x.have))
+ == -1)
+ return -1;
+ state->x.next = state->out;
+ return 0;
+ case GZIP: /* -> GZIP or LOOK (if end of gzip stream) */
+ strm->avail_out = state->size << 1;
+ strm->next_out = state->out;
+ if (gz_decomp(state) == -1)
+ return -1;
+ }
+ } while (state->x.have == 0 && (!state->eof || strm->avail_in));
+ return 0;
+}
+
+/* Skip len uncompressed bytes of output. Return -1 on error, 0 on success. */
+local int gz_skip(state, len)
+ gz_statep state;
+ z_off64_t len;
+{
+ unsigned n;
+
+ /* skip over len bytes or reach end-of-file, whichever comes first */
+ while (len)
+ /* skip over whatever is in output buffer */
+ if (state->x.have) {
+ n = GT_OFF(state->x.have) || (z_off64_t)state->x.have > len ?
+ (unsigned)len : state->x.have;
+ state->x.have -= n;
+ state->x.next += n;
+ state->x.pos += n;
+ len -= n;
+ }
+
+ /* output buffer empty -- return if we're at the end of the input */
+ else if (state->eof && state->strm.avail_in == 0)
+ break;
+
+ /* need more data to skip -- load up output buffer */
+ else {
+ /* get more output, looking for header if required */
+ if (gz_fetch(state) == -1)
+ return -1;
+ }
+ return 0;
+}
+
+/* Read len bytes into buf from file, or less than len up to the end of the
+ input. Return the number of bytes read. If zero is returned, either the
+ end of file was reached, or there was an error. state->err must be
+ consulted in that case to determine which. */
+local z_size_t gz_read(state, buf, len)
+ gz_statep state;
+ voidp buf;
+ z_size_t len;
+{
+ z_size_t got;
+ unsigned n;
+
+ /* if len is zero, avoid unnecessary operations */
+ if (len == 0)
+ return 0;
+
+ /* process a skip request */
+ if (state->seek) {
+ state->seek = 0;
+ if (gz_skip(state, state->skip) == -1)
+ return 0;
+ }
+
+ /* get len bytes to buf, or less than len if at the end */
+ got = 0;
+ do {
+ /* set n to the maximum amount of len that fits in an unsigned int */
+ n = -1;
+ if (n > len)
+ n = len;
+
+ /* first just try copying data from the output buffer */
+ if (state->x.have) {
+ if (state->x.have < n)
+ n = state->x.have;
+ memcpy(buf, state->x.next, n);
+ state->x.next += n;
+ state->x.have -= n;
+ }
+
+ /* output buffer empty -- return if we're at the end of the input */
+ else if (state->eof && state->strm.avail_in == 0) {
+ state->past = 1; /* tried to read past end */
+ break;
+ }
+
+ /* need output data -- for small len or new stream load up our output
+ buffer */
+ else if (state->how == LOOK || n < (state->size << 1)) {
+ /* get more output, looking for header if required */
+ if (gz_fetch(state) == -1)
+ return 0;
+ continue; /* no progress yet -- go back to copy above */
+ /* the copy above assures that we will leave with space in the
+ output buffer, allowing at least one gzungetc() to succeed */
+ }
+
+ /* large len -- read directly into user buffer */
+ else if (state->how == COPY) { /* read directly */
+ if (gz_load(state, (unsigned char *)buf, n, &n) == -1)
+ return 0;
+ }
+
+ /* large len -- decompress directly into user buffer */
+ else { /* state->how == GZIP */
+ state->strm.avail_out = n;
+ state->strm.next_out = (unsigned char *)buf;
+ if (gz_decomp(state) == -1)
+ return 0;
+ n = state->x.have;
+ state->x.have = 0;
+ }
+
+ /* update progress */
+ len -= n;
+ buf = (char *)buf + n;
+ got += n;
+ state->x.pos += n;
+ } while (len);
+
+ /* return number of bytes read into user buffer */
+ return got;
+}
+
+/* -- see zlib.h -- */
+int ZEXPORT gzread(file, buf, len)
+ gzFile file;
+ voidp buf;
+ unsigned len;
+{
+ gz_statep state;
+
+ /* get internal structure */
+ if (file == NULL)
+ return -1;
+ state = (gz_statep)file;
+
+ /* check that we're reading and that there's no (serious) error */
+ if (state->mode != GZ_READ ||
+ (state->err != Z_OK && state->err != Z_BUF_ERROR))
+ return -1;
+
+ /* since an int is returned, make sure len fits in one, otherwise return
+ with an error (this avoids a flaw in the interface) */
+ if ((int)len < 0) {
+ gz_error(state, Z_STREAM_ERROR, "request does not fit in an int");
+ return -1;
+ }
+
+ /* read len or fewer bytes to buf */
+ len = gz_read(state, buf, len);
+
+ /* check for an error */
+ if (len == 0 && state->err != Z_OK && state->err != Z_BUF_ERROR)
+ return -1;
+
+ /* return the number of bytes read (this is assured to fit in an int) */
+ return (int)len;
+}
+
+/* -- see zlib.h -- */
+z_size_t ZEXPORT gzfread(buf, size, nitems, file)
+ voidp buf;
+ z_size_t size;
+ z_size_t nitems;
+ gzFile file;
+{
+ z_size_t len;
+ gz_statep state;
+
+ /* get internal structure */
+ if (file == NULL)
+ return 0;
+ state = (gz_statep)file;
+
+ /* check that we're reading and that there's no (serious) error */
+ if (state->mode != GZ_READ ||
+ (state->err != Z_OK && state->err != Z_BUF_ERROR))
+ return 0;
+
+ /* compute bytes to read -- error on overflow */
+ len = nitems * size;
+ if (size && len / size != nitems) {
+ gz_error(state, Z_STREAM_ERROR, "request does not fit in a size_t");
+ return 0;
+ }
+
+ /* read len or fewer bytes to buf, return the number of full items read */
+ return len ? gz_read(state, buf, len) / size : 0;
+}
+
+/* -- see zlib.h -- */
+#ifdef Z_PREFIX_SET
+# undef z_gzgetc
+#else
+# undef gzgetc
+#endif
+int ZEXPORT gzgetc(file)
+ gzFile file;
+{
+ int ret;
+ unsigned char buf[1];
+ gz_statep state;
+
+ /* get internal structure */
+ if (file == NULL)
+ return -1;
+ state = (gz_statep)file;
+
+ /* check that we're reading and that there's no (serious) error */
+ if (state->mode != GZ_READ ||
+ (state->err != Z_OK && state->err != Z_BUF_ERROR))
+ return -1;
+
+ /* try output buffer (no need to check for skip request) */
+ if (state->x.have) {
+ state->x.have--;
+ state->x.pos++;
+ return *(state->x.next)++;
+ }
+
+ /* nothing there -- try gz_read() */
+ ret = gz_read(state, buf, 1);
+ return ret < 1 ? -1 : buf[0];
+}
+
+int ZEXPORT gzgetc_(file)
+gzFile file;
+{
+ return gzgetc(file);
+}
+
+/* -- see zlib.h -- */
+int ZEXPORT gzungetc(c, file)
+ int c;
+ gzFile file;
+{
+ gz_statep state;
+
+ /* get internal structure */
+ if (file == NULL)
+ return -1;
+ state = (gz_statep)file;
+
+ /* check that we're reading and that there's no (serious) error */
+ if (state->mode != GZ_READ ||
+ (state->err != Z_OK && state->err != Z_BUF_ERROR))
+ return -1;
+
+ /* process a skip request */
+ if (state->seek) {
+ state->seek = 0;
+ if (gz_skip(state, state->skip) == -1)
+ return -1;
+ }
+
+ /* can't push EOF */
+ if (c < 0)
+ return -1;
+
+ /* if output buffer empty, put byte at end (allows more pushing) */
+ if (state->x.have == 0) {
+ state->x.have = 1;
+ state->x.next = state->out + (state->size << 1) - 1;
+ state->x.next[0] = (unsigned char)c;
+ state->x.pos--;
+ state->past = 0;
+ return c;
+ }
+
+ /* if no room, give up (must have already done a gzungetc()) */
+ if (state->x.have == (state->size << 1)) {
+ gz_error(state, Z_DATA_ERROR, "out of room to push characters");
+ return -1;
+ }
+
+ /* slide output data if needed and insert byte before existing data */
+ if (state->x.next == state->out) {
+ unsigned char *src = state->out + state->x.have;
+ unsigned char *dest = state->out + (state->size << 1);
+ while (src > state->out)
+ *--dest = *--src;
+ state->x.next = dest;
+ }
+ state->x.have++;
+ state->x.next--;
+ state->x.next[0] = (unsigned char)c;
+ state->x.pos--;
+ state->past = 0;
+ return c;
+}
+
+/* -- see zlib.h -- */
+char * ZEXPORT gzgets(file, buf, len)
+ gzFile file;
+ char *buf;
+ int len;
+{
+ unsigned left, n;
+ char *str;
+ unsigned char *eol;
+ gz_statep state;
+
+ /* check parameters and get internal structure */
+ if (file == NULL || buf == NULL || len < 1)
+ return NULL;
+ state = (gz_statep)file;
+
+ /* check that we're reading and that there's no (serious) error */
+ if (state->mode != GZ_READ ||
+ (state->err != Z_OK && state->err != Z_BUF_ERROR))
+ return NULL;
+
+ /* process a skip request */
+ if (state->seek) {
+ state->seek = 0;
+ if (gz_skip(state, state->skip) == -1)
+ return NULL;
+ }
+
+ /* copy output bytes up to new line or len - 1, whichever comes first --
+ append a terminating zero to the string (we don't check for a zero in
+ the contents, let the user worry about that) */
+ str = buf;
+ left = (unsigned)len - 1;
+ if (left) do {
+ /* assure that something is in the output buffer */
+ if (state->x.have == 0 && gz_fetch(state) == -1)
+ return NULL; /* error */
+ if (state->x.have == 0) { /* end of file */
+ state->past = 1; /* read past end */
+ break; /* return what we have */
+ }
+
+ /* look for end-of-line in current output buffer */
+ n = state->x.have > left ? left : state->x.have;
+ eol = (unsigned char *)memchr(state->x.next, '\n', n);
+ if (eol != NULL)
+ n = (unsigned)(eol - state->x.next) + 1;
+
+ /* copy through end-of-line, or remainder if not found */
+ memcpy(buf, state->x.next, n);
+ state->x.have -= n;
+ state->x.next += n;
+ state->x.pos += n;
+ left -= n;
+ buf += n;
+ } while (left && eol == NULL);
+
+ /* return terminated string, or if nothing, end of file */
+ if (buf == str)
+ return NULL;
+ buf[0] = 0;
+ return str;
+}
+
+/* -- see zlib.h -- */
+int ZEXPORT gzdirect(file)
+ gzFile file;
+{
+ gz_statep state;
+
+ /* get internal structure */
+ if (file == NULL)
+ return 0;
+ state = (gz_statep)file;
+
+ /* if the state is not known, but we can find out, then do so (this is
+ mainly for right after a gzopen() or gzdopen()) */
+ if (state->mode == GZ_READ && state->how == LOOK && state->x.have == 0)
+ (void)gz_look(state);
+
+ /* return 1 if transparent, 0 if processing a gzip stream */
+ return state->direct;
+}
+
+/* -- see zlib.h -- */
+int ZEXPORT gzclose_r(file)
+ gzFile file;
+{
+ int ret, err;
+ gz_statep state;
+
+ /* get internal structure */
+ if (file == NULL)
+ return Z_STREAM_ERROR;
+ state = (gz_statep)file;
+
+ /* check that we're reading */
+ if (state->mode != GZ_READ)
+ return Z_STREAM_ERROR;
+
+ /* free memory and close file */
+ if (state->size) {
+ inflateEnd(&(state->strm));
+ free(state->out);
+ free(state->in);
+ }
+ err = state->err == Z_BUF_ERROR ? Z_BUF_ERROR : Z_OK;
+ gz_error(state, Z_OK, NULL);
+ free(state->path);
+ ret = close(state->fd);
+ free(state);
+ return ret ? Z_ERRNO : err;
+}
+#endif
diff --git a/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/gzwrite.c b/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/gzwrite.c
new file mode 100644
index 00000000..3be0dd0c
--- /dev/null
+++ b/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/gzwrite.c
@@ -0,0 +1,667 @@
+/* gzwrite.c -- zlib functions for writing gzip files
+ * Copyright (C) 2004-2017 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+#include "gzguts.h"
+#ifndef __clang_analyzer__
+
+/* Local functions */
+local int gz_init OF((gz_statep));
+local int gz_comp OF((gz_statep, int));
+local int gz_zero OF((gz_statep, z_off64_t));
+local z_size_t gz_write OF((gz_statep, voidpc, z_size_t));
+
+/* Initialize state for writing a gzip file. Mark initialization by setting
+ state->size to non-zero. Return -1 on a memory allocation failure, or 0 on
+ success. */
+local int gz_init(state)
+ gz_statep state;
+{
+ int ret;
+ z_streamp strm = &(state->strm);
+
+ /* allocate input buffer (double size for gzprintf) */
+ state->in = (unsigned char *)malloc(state->want << 1);
+ if (state->in == NULL) {
+ gz_error(state, Z_MEM_ERROR, "out of memory");
+ return -1;
+ }
+
+ /* only need output buffer and deflate state if compressing */
+ if (!state->direct) {
+ /* allocate output buffer */
+ state->out = (unsigned char *)malloc(state->want);
+ if (state->out == NULL) {
+ free(state->in);
+ gz_error(state, Z_MEM_ERROR, "out of memory");
+ return -1;
+ }
+
+ /* allocate deflate memory, set up for gzip compression */
+ strm->zalloc = Z_NULL;
+ strm->zfree = Z_NULL;
+ strm->opaque = Z_NULL;
+ ret = deflateInit2(strm, state->level, Z_DEFLATED,
+ MAX_WBITS + 16, DEF_MEM_LEVEL, state->strategy);
+ if (ret != Z_OK) {
+ free(state->out);
+ free(state->in);
+ gz_error(state, Z_MEM_ERROR, "out of memory");
+ return -1;
+ }
+ strm->next_in = NULL;
+ }
+
+ /* mark state as initialized */
+ state->size = state->want;
+
+ /* initialize write buffer if compressing */
+ if (!state->direct) {
+ strm->avail_out = state->size;
+ strm->next_out = state->out;
+ state->x.next = strm->next_out;
+ }
+ return 0;
+}
+
+/* Compress whatever is at avail_in and next_in and write to the output file.
+ Return -1 if there is an error writing to the output file or if gz_init()
+ fails to allocate memory, otherwise 0. flush is assumed to be a valid
+ deflate() flush value. If flush is Z_FINISH, then the deflate() state is
+ reset to start a new gzip stream. If gz->direct is true, then simply write
+ to the output file without compressing, and ignore flush. */
+local int gz_comp(state, flush)
+ gz_statep state;
+ int flush;
+{
+ int ret, writ;
+ unsigned have, put, max = ((unsigned)-1 >> 2) + 1;
+ z_streamp strm = &(state->strm);
+
+ /* allocate memory if this is the first time through */
+ if (state->size == 0 && gz_init(state) == -1)
+ return -1;
+
+ /* write directly if requested */
+ if (state->direct) {
+ while (strm->avail_in) {
+ put = strm->avail_in > max ? max : strm->avail_in;
+ writ = write(state->fd, strm->next_in, put);
+ if (writ < 0) {
+ gz_error(state, Z_ERRNO, zstrerror());
+ return -1;
+ }
+ strm->avail_in -= (unsigned)writ;
+ strm->next_in += writ;
+ }
+ return 0;
+ }
+
+ /* run deflate() on provided input until it produces no more output */
+ ret = Z_OK;
+ do {
+ /* write out current buffer contents if full, or if flushing, but if
+ doing Z_FINISH then don't write until we get to Z_STREAM_END */
+ if (strm->avail_out == 0 || (flush != Z_NO_FLUSH &&
+ (flush != Z_FINISH || ret == Z_STREAM_END))) {
+ while (strm->next_out > state->x.next) {
+ put = strm->next_out - state->x.next > (int)max ? max :
+ (unsigned)(strm->next_out - state->x.next);
+ writ = write(state->fd, state->x.next, put);
+ if (writ < 0) {
+ gz_error(state, Z_ERRNO, zstrerror());
+ return -1;
+ }
+ state->x.next += writ;
+ }
+ if (strm->avail_out == 0) {
+ strm->avail_out = state->size;
+ strm->next_out = state->out;
+ state->x.next = state->out;
+ }
+ }
+
+ /* compress */
+ have = strm->avail_out;
+ ret = deflate(strm, flush);
+ if (ret == Z_STREAM_ERROR) {
+ gz_error(state, Z_STREAM_ERROR,
+ "internal error: deflate stream corrupt");
+ return -1;
+ }
+ have -= strm->avail_out;
+ } while (have);
+
+ /* if that completed a deflate stream, allow another to start */
+ if (flush == Z_FINISH)
+ deflateReset(strm);
+
+ /* all done, no errors */
+ return 0;
+}
+
+/* Compress len zeros to output. Return -1 on a write error or memory
+ allocation failure by gz_comp(), or 0 on success. */
+local int gz_zero(state, len)
+ gz_statep state;
+ z_off64_t len;
+{
+ int first;
+ unsigned n;
+ z_streamp strm = &(state->strm);
+
+ /* consume whatever's left in the input buffer */
+ if (strm->avail_in && gz_comp(state, Z_NO_FLUSH) == -1)
+ return -1;
+
+ /* compress len zeros (len guaranteed > 0) */
+ first = 1;
+ while (len) {
+ n = GT_OFF(state->size) || (z_off64_t)state->size > len ?
+ (unsigned)len : state->size;
+ if (first) {
+ memset(state->in, 0, n);
+ first = 0;
+ }
+ strm->avail_in = n;
+ strm->next_in = state->in;
+ state->x.pos += n;
+ if (gz_comp(state, Z_NO_FLUSH) == -1)
+ return -1;
+ len -= n;
+ }
+ return 0;
+}
+
+/* Write len bytes from buf to file. Return the number of bytes written. If
+ the returned value is less than len, then there was an error. */
+local z_size_t gz_write(state, buf, len)
+ gz_statep state;
+ voidpc buf;
+ z_size_t len;
+{
+ z_size_t put = len;
+
+ /* if len is zero, avoid unnecessary operations */
+ if (len == 0)
+ return 0;
+
+ /* allocate memory if this is the first time through */
+ if (state->size == 0 && gz_init(state) == -1)
+ return 0;
+
+ /* check for seek request */
+ if (state->seek) {
+ state->seek = 0;
+ if (gz_zero(state, state->skip) == -1)
+ return 0;
+ }
+
+ /* for small len, copy to input buffer, otherwise compress directly */
+ if (len < state->size) {
+ /* copy to input buffer, compress when full */
+ do {
+ unsigned have, copy;
+
+ if (state->strm.avail_in == 0)
+ state->strm.next_in = state->in;
+ have = (unsigned)((state->strm.next_in + state->strm.avail_in) -
+ state->in);
+ copy = state->size - have;
+ if (copy > len)
+ copy = len;
+ memcpy(state->in + have, buf, copy);
+ state->strm.avail_in += copy;
+ state->x.pos += copy;
+ buf = (const char *)buf + copy;
+ len -= copy;
+ if (len && gz_comp(state, Z_NO_FLUSH) == -1)
+ return 0;
+ } while (len);
+ }
+ else {
+ /* consume whatever's left in the input buffer */
+ if (state->strm.avail_in && gz_comp(state, Z_NO_FLUSH) == -1)
+ return 0;
+
+ /* directly compress user buffer to file */
+ state->strm.next_in = (z_const Bytef *)buf;
+ do {
+ unsigned n = (unsigned)-1;
+ if (n > len)
+ n = len;
+ state->strm.avail_in = n;
+ state->x.pos += n;
+ if (gz_comp(state, Z_NO_FLUSH) == -1)
+ return 0;
+ len -= n;
+ } while (len);
+ }
+
+ /* input was all buffered or compressed */
+ return put;
+}
+
+/* -- see zlib.h -- */
+int ZEXPORT gzwrite(file, buf, len)
+ gzFile file;
+ voidpc buf;
+ unsigned len;
+{
+ gz_statep state;
+
+ /* get internal structure */
+ if (file == NULL)
+ return 0;
+ state = (gz_statep)file;
+
+ /* check that we're writing and that there's no error */
+ if (state->mode != GZ_WRITE || state->err != Z_OK)
+ return 0;
+
+ /* since an int is returned, make sure len fits in one, otherwise return
+ with an error (this avoids a flaw in the interface) */
+ if ((int)len < 0) {
+ gz_error(state, Z_DATA_ERROR, "requested length does not fit in int");
+ return 0;
+ }
+
+ /* write len bytes from buf (the return value will fit in an int) */
+ return (int)gz_write(state, buf, len);
+}
+
+/* -- see zlib.h -- */
+z_size_t ZEXPORT gzfwrite(buf, size, nitems, file)
+ voidpc buf;
+ z_size_t size;
+ z_size_t nitems;
+ gzFile file;
+{
+ z_size_t len;
+ gz_statep state;
+
+ /* get internal structure */
+ if (file == NULL)
+ return 0;
+ state = (gz_statep)file;
+
+ /* check that we're writing and that there's no error */
+ if (state->mode != GZ_WRITE || state->err != Z_OK)
+ return 0;
+
+ /* compute bytes to read -- error on overflow */
+ len = nitems * size;
+ if (size && len / size != nitems) {
+ gz_error(state, Z_STREAM_ERROR, "request does not fit in a size_t");
+ return 0;
+ }
+
+ /* write len bytes to buf, return the number of full items written */
+ return len ? gz_write(state, buf, len) / size : 0;
+}
+
+/* -- see zlib.h -- */
+int ZEXPORT gzputc(file, c)
+ gzFile file;
+ int c;
+{
+ unsigned have;
+ unsigned char buf[1];
+ gz_statep state;
+ z_streamp strm;
+
+ /* get internal structure */
+ if (file == NULL)
+ return -1;
+ state = (gz_statep)file;
+ strm = &(state->strm);
+
+ /* check that we're writing and that there's no error */
+ if (state->mode != GZ_WRITE || state->err != Z_OK)
+ return -1;
+
+ /* check for seek request */
+ if (state->seek) {
+ state->seek = 0;
+ if (gz_zero(state, state->skip) == -1)
+ return -1;
+ }
+
+ /* try writing to input buffer for speed (state->size == 0 if buffer not
+ initialized) */
+ if (state->size) {
+ if (strm->avail_in == 0)
+ strm->next_in = state->in;
+ have = (unsigned)((strm->next_in + strm->avail_in) - state->in);
+ if (have < state->size) {
+ state->in[have] = (unsigned char)c;
+ strm->avail_in++;
+ state->x.pos++;
+ return c & 0xff;
+ }
+ }
+
+ /* no room in buffer or not initialized, use gz_write() */
+ buf[0] = (unsigned char)c;
+ if (gz_write(state, buf, 1) != 1)
+ return -1;
+ return c & 0xff;
+}
+
+/* -- see zlib.h -- */
+int ZEXPORT gzputs(file, str)
+ gzFile file;
+ const char *str;
+{
+ int ret;
+ z_size_t len;
+ gz_statep state;
+
+ /* get internal structure */
+ if (file == NULL)
+ return -1;
+ state = (gz_statep)file;
+
+ /* check that we're writing and that there's no error */
+ if (state->mode != GZ_WRITE || state->err != Z_OK)
+ return -1;
+
+ /* write string */
+ len = strlen(str);
+ ret = gz_write(state, str, len);
+ return ret == 0 && len != 0 ? -1 : ret;
+}
+
+#if defined(STDC) || defined(Z_HAVE_STDARG_H)
+#include <stdarg.h>
+
+/* -- see zlib.h -- */
+int ZEXPORTVA gzvprintf(gzFile file, const char *format, va_list va)
+{
+ int len;
+ unsigned left;
+ char *next;
+ gz_statep state;
+ z_streamp strm;
+
+ /* get internal structure */
+ if (file == NULL)
+ return Z_STREAM_ERROR;
+ state = (gz_statep)file;
+ strm = &(state->strm);
+
+ /* check that we're writing and that there's no error */
+ if (state->mode != GZ_WRITE || state->err != Z_OK)
+ return Z_STREAM_ERROR;
+
+ /* make sure we have some buffer space */
+ if (state->size == 0 && gz_init(state) == -1)
+ return state->err;
+
+ /* check for seek request */
+ if (state->seek) {
+ state->seek = 0;
+ if (gz_zero(state, state->skip) == -1)
+ return state->err;
+ }
+
+ /* do the printf() into the input buffer, put length in len -- the input
+ buffer is double-sized just for this function, so there is guaranteed to
+ be state->size bytes available after the current contents */
+ if (strm->avail_in == 0)
+ strm->next_in = state->in;
+ next = (char *)(state->in + (strm->next_in - state->in) + strm->avail_in);
+ next[state->size - 1] = 0;
+#ifdef NO_vsnprintf
+# ifdef HAS_vsprintf_void
+ (void)vsprintf(next, format, va);
+ for (len = 0; len < state->size; len++)
+ if (next[len] == 0) break;
+# else
+ len = vsprintf(next, format, va);
+# endif
+#else
+# ifdef HAS_vsnprintf_void
+ (void)vsnprintf(next, state->size, format, va);
+ len = strlen(next);
+# else
+ len = vsnprintf(next, state->size, format, va);
+# endif
+#endif
+
+ /* check that printf() results fit in buffer */
+ if (len == 0 || (unsigned)len >= state->size || next[state->size - 1] != 0)
+ return 0;
+
+ /* update buffer and position, compress first half if past that */
+ strm->avail_in += (unsigned)len;
+ state->x.pos += len;
+ if (strm->avail_in >= state->size) {
+ left = strm->avail_in - state->size;
+ strm->avail_in = state->size;
+ if (gz_comp(state, Z_NO_FLUSH) == -1)
+ return state->err;
+ memcpy(state->in, state->in + state->size, left);
+ strm->next_in = state->in;
+ strm->avail_in = left;
+ }
+ return len;
+}
+
+int ZEXPORTVA gzprintf(gzFile file, const char *format, ...)
+{
+ va_list va;
+ int ret;
+
+ va_start(va, format);
+ ret = gzvprintf(file, format, va);
+ va_end(va);
+ return ret;
+}
+
+#else /* !STDC && !Z_HAVE_STDARG_H */
+
+/* -- see zlib.h -- */
+int ZEXPORTVA gzprintf (file, format, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10,
+ a11, a12, a13, a14, a15, a16, a17, a18, a19, a20)
+ gzFile file;
+ const char *format;
+ int a1, a2, a3, a4, a5, a6, a7, a8, a9, a10,
+ a11, a12, a13, a14, a15, a16, a17, a18, a19, a20;
+{
+ unsigned len, left;
+ char *next;
+ gz_statep state;
+ z_streamp strm;
+
+ /* get internal structure */
+ if (file == NULL)
+ return Z_STREAM_ERROR;
+ state = (gz_statep)file;
+ strm = &(state->strm);
+
+ /* check that can really pass pointer in ints */
+ if (sizeof(int) != sizeof(void *))
+ return Z_STREAM_ERROR;
+
+ /* check that we're writing and that there's no error */
+ if (state->mode != GZ_WRITE || state->err != Z_OK)
+ return Z_STREAM_ERROR;
+
+ /* make sure we have some buffer space */
+ if (state->size == 0 && gz_init(state) == -1)
+ return state->error;
+
+ /* check for seek request */
+ if (state->seek) {
+ state->seek = 0;
+ if (gz_zero(state, state->skip) == -1)
+ return state->error;
+ }
+
+ /* do the printf() into the input buffer, put length in len -- the input
+ buffer is double-sized just for this function, so there is guaranteed to
+ be state->size bytes available after the current contents */
+ if (strm->avail_in == 0)
+ strm->next_in = state->in;
+ next = (char *)(strm->next_in + strm->avail_in);
+ next[state->size - 1] = 0;
+#ifdef NO_snprintf
+# ifdef HAS_sprintf_void
+ sprintf(next, format, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12,
+ a13, a14, a15, a16, a17, a18, a19, a20);
+ for (len = 0; len < size; len++)
+ if (next[len] == 0)
+ break;
+# else
+ len = sprintf(next, format, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11,
+ a12, a13, a14, a15, a16, a17, a18, a19, a20);
+# endif
+#else
+# ifdef HAS_snprintf_void
+ snprintf(next, state->size, format, a1, a2, a3, a4, a5, a6, a7, a8, a9,
+ a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20);
+ len = strlen(next);
+# else
+ len = snprintf(next, state->size, format, a1, a2, a3, a4, a5, a6, a7, a8,
+ a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20);
+# endif
+#endif
+
+ /* check that printf() results fit in buffer */
+ if (len == 0 || len >= state->size || next[state->size - 1] != 0)
+ return 0;
+
+ /* update buffer and position, compress first half if past that */
+ strm->avail_in += len;
+ state->x.pos += len;
+ if (strm->avail_in >= state->size) {
+ left = strm->avail_in - state->size;
+ strm->avail_in = state->size;
+ if (gz_comp(state, Z_NO_FLUSH) == -1)
+ return state->err;
+ memcpy(state->in, state->in + state->size, left);
+ strm->next_in = state->in;
+ strm->avail_in = left;
+ }
+ return (int)len;
+}
+
+#endif
+
+/* -- see zlib.h -- */
+int ZEXPORT gzflush(file, flush)
+ gzFile file;
+ int flush;
+{
+ gz_statep state;
+
+ /* get internal structure */
+ if (file == NULL)
+ return Z_STREAM_ERROR;
+ state = (gz_statep)file;
+
+ /* check that we're writing and that there's no error */
+ if (state->mode != GZ_WRITE || state->err != Z_OK)
+ return Z_STREAM_ERROR;
+
+ /* check flush parameter */
+ if (flush < 0 || flush > Z_FINISH)
+ return Z_STREAM_ERROR;
+
+ /* check for seek request */
+ if (state->seek) {
+ state->seek = 0;
+ if (gz_zero(state, state->skip) == -1)
+ return state->err;
+ }
+
+ /* compress remaining data with requested flush */
+ (void)gz_comp(state, flush);
+ return state->err;
+}
+
+/* -- see zlib.h -- */
+int ZEXPORT gzsetparams(file, level, strategy)
+ gzFile file;
+ int level;
+ int strategy;
+{
+ gz_statep state;
+ z_streamp strm;
+
+ /* get internal structure */
+ if (file == NULL)
+ return Z_STREAM_ERROR;
+ state = (gz_statep)file;
+ strm = &(state->strm);
+
+ /* check that we're writing and that there's no error */
+ if (state->mode != GZ_WRITE || state->err != Z_OK)
+ return Z_STREAM_ERROR;
+
+ /* if no change is requested, then do nothing */
+ if (level == state->level && strategy == state->strategy)
+ return Z_OK;
+
+ /* check for seek request */
+ if (state->seek) {
+ state->seek = 0;
+ if (gz_zero(state, state->skip) == -1)
+ return state->err;
+ }
+
+ /* change compression parameters for subsequent input */
+ if (state->size) {
+ /* flush previous input with previous parameters before changing */
+ if (strm->avail_in && gz_comp(state, Z_BLOCK) == -1)
+ return state->err;
+ deflateParams(strm, level, strategy);
+ }
+ state->level = level;
+ state->strategy = strategy;
+ return Z_OK;
+}
+
+/* -- see zlib.h -- */
+int ZEXPORT gzclose_w(file)
+ gzFile file;
+{
+ int ret = Z_OK;
+ gz_statep state;
+
+ /* get internal structure */
+ if (file == NULL)
+ return Z_STREAM_ERROR;
+ state = (gz_statep)file;
+
+ /* check that we're writing */
+ if (state->mode != GZ_WRITE)
+ return Z_STREAM_ERROR;
+
+ /* check for seek request */
+ if (state->seek) {
+ state->seek = 0;
+ if (gz_zero(state, state->skip) == -1)
+ ret = state->err;
+ }
+
+ /* flush, free memory, and close file */
+ if (gz_comp(state, Z_FINISH) == -1)
+ ret = state->err;
+ if (state->size) {
+ if (!state->direct) {
+ (void)deflateEnd(&(state->strm));
+ free(state->out);
+ }
+ free(state->in);
+ }
+ gz_error(state, Z_OK, NULL);
+ free(state->path);
+ if (close(state->fd) == -1)
+ ret = Z_ERRNO;
+ free(state);
+ return ret;
+}
+#endif
diff --git a/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/infback.c b/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/infback.c
new file mode 100644
index 00000000..59679ecb
--- /dev/null
+++ b/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/infback.c
@@ -0,0 +1,640 @@
+/* infback.c -- inflate using a call-back interface
+ * Copyright (C) 1995-2016 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/*
+ This code is largely copied from inflate.c. Normally either infback.o or
+ inflate.o would be linked into an application--not both. The interface
+ with inffast.c is retained so that optimized assembler-coded versions of
+ inflate_fast() can be used with either inflate.c or infback.c.
+ */
+
+#include "zutil.h"
+#include "inftrees.h"
+#include "inflate.h"
+#include "inffast.h"
+
+/* function prototypes */
+local void fixedtables OF((struct inflate_state FAR *state));
+
+/*
+ strm provides memory allocation functions in zalloc and zfree, or
+ Z_NULL to use the library memory allocation functions.
+
+ windowBits is in the range 8..15, and window is a user-supplied
+ window and output buffer that is 2**windowBits bytes.
+ */
+int ZEXPORT inflateBackInit_(strm, windowBits, window, version, stream_size)
+z_streamp strm;
+int windowBits;
+unsigned char FAR *window;
+const char *version;
+int stream_size;
+{
+ struct inflate_state FAR *state;
+
+ if (version == Z_NULL || version[0] != ZLIB_VERSION[0] ||
+ stream_size != (int)(sizeof(z_stream)))
+ return Z_VERSION_ERROR;
+ if (strm == Z_NULL || window == Z_NULL ||
+ windowBits < 8 || windowBits > 15)
+ return Z_STREAM_ERROR;
+ strm->msg = Z_NULL; /* in case we return an error */
+ if (strm->zalloc == (alloc_func)0) {
+#ifdef Z_SOLO
+ return Z_STREAM_ERROR;
+#else
+ strm->zalloc = zcalloc;
+ strm->opaque = (voidpf)0;
+#endif
+ }
+ if (strm->zfree == (free_func)0)
+#ifdef Z_SOLO
+ return Z_STREAM_ERROR;
+#else
+ strm->zfree = zcfree;
+#endif
+ state = (struct inflate_state FAR *)ZALLOC(strm, 1,
+ sizeof(struct inflate_state));
+ if (state == Z_NULL) return Z_MEM_ERROR;
+ Tracev((stderr, "inflate: allocated\n"));
+ strm->state = (struct internal_state FAR *)state;
+ state->dmax = 32768U;
+ state->wbits = (uInt)windowBits;
+ state->wsize = 1U << windowBits;
+ state->window = window;
+ state->wnext = 0;
+ state->whave = 0;
+ return Z_OK;
+}
+
+/*
+ Return state with length and distance decoding tables and index sizes set to
+ fixed code decoding. Normally this returns fixed tables from inffixed.h.
+ If BUILDFIXED is defined, then instead this routine builds the tables the
+ first time it's called, and returns those tables the first time and
+ thereafter. This reduces the size of the code by about 2K bytes, in
+ exchange for a little execution time. However, BUILDFIXED should not be
+ used for threaded applications, since the rewriting of the tables and virgin
+ may not be thread-safe.
+ */
+local void fixedtables(state)
+struct inflate_state FAR *state;
+{
+#ifdef BUILDFIXED
+ static int virgin = 1;
+ static code *lenfix, *distfix;
+ static code fixed[544];
+
+ /* build fixed huffman tables if first call (may not be thread safe) */
+ if (virgin) {
+ unsigned sym, bits;
+ static code *next;
+
+ /* literal/length table */
+ sym = 0;
+ while (sym < 144) state->lens[sym++] = 8;
+ while (sym < 256) state->lens[sym++] = 9;
+ while (sym < 280) state->lens[sym++] = 7;
+ while (sym < 288) state->lens[sym++] = 8;
+ next = fixed;
+ lenfix = next;
+ bits = 9;
+ inflate_table(LENS, state->lens, 288, &(next), &(bits), state->work);
+
+ /* distance table */
+ sym = 0;
+ while (sym < 32) state->lens[sym++] = 5;
+ distfix = next;
+ bits = 5;
+ inflate_table(DISTS, state->lens, 32, &(next), &(bits), state->work);
+
+ /* do this just once */
+ virgin = 0;
+ }
+#else /* !BUILDFIXED */
+# include "inffixed.h"
+#endif /* BUILDFIXED */
+ state->lencode = lenfix;
+ state->lenbits = 9;
+ state->distcode = distfix;
+ state->distbits = 5;
+}
+
+/* Macros for inflateBack(): */
+
+/* Load returned state from inflate_fast() */
+#define LOAD() \
+ do { \
+ put = strm->next_out; \
+ left = strm->avail_out; \
+ next = strm->next_in; \
+ have = strm->avail_in; \
+ hold = state->hold; \
+ bits = state->bits; \
+ } while (0)
+
+/* Set state from registers for inflate_fast() */
+#define RESTORE() \
+ do { \
+ strm->next_out = put; \
+ strm->avail_out = left; \
+ strm->next_in = next; \
+ strm->avail_in = have; \
+ state->hold = hold; \
+ state->bits = bits; \
+ } while (0)
+
+/* Clear the input bit accumulator */
+#define INITBITS() \
+ do { \
+ hold = 0; \
+ bits = 0; \
+ } while (0)
+
+/* Assure that some input is available. If input is requested, but denied,
+ then return a Z_BUF_ERROR from inflateBack(). */
+#define PULL() \
+ do { \
+ if (have == 0) { \
+ have = in(in_desc, &next); \
+ if (have == 0) { \
+ next = Z_NULL; \
+ ret = Z_BUF_ERROR; \
+ goto inf_leave; \
+ } \
+ } \
+ } while (0)
+
+/* Get a byte of input into the bit accumulator, or return from inflateBack()
+ with an error if there is no input available. */
+#define PULLBYTE() \
+ do { \
+ PULL(); \
+ have--; \
+ hold += (unsigned long)(*next++) << bits; \
+ bits += 8; \
+ } while (0)
+
+/* Assure that there are at least n bits in the bit accumulator. If there is
+ not enough available input to do that, then return from inflateBack() with
+ an error. */
+#define NEEDBITS(n) \
+ do { \
+ while (bits < (unsigned)(n)) \
+ PULLBYTE(); \
+ } while (0)
+
+/* Return the low n bits of the bit accumulator (n < 16) */
+#define BITS(n) \
+ ((unsigned)hold & ((1U << (n)) - 1))
+
+/* Remove n bits from the bit accumulator */
+#define DROPBITS(n) \
+ do { \
+ hold >>= (n); \
+ bits -= (unsigned)(n); \
+ } while (0)
+
+/* Remove zero to seven bits as needed to go to a byte boundary */
+#define BYTEBITS() \
+ do { \
+ hold >>= bits & 7; \
+ bits -= bits & 7; \
+ } while (0)
+
+/* Assure that some output space is available, by writing out the window
+ if it's full. If the write fails, return from inflateBack() with a
+ Z_BUF_ERROR. */
+#define ROOM() \
+ do { \
+ if (left == 0) { \
+ put = state->window; \
+ left = state->wsize; \
+ state->whave = left; \
+ if (out(out_desc, put, left)) { \
+ ret = Z_BUF_ERROR; \
+ goto inf_leave; \
+ } \
+ } \
+ } while (0)
+
+/*
+ strm provides the memory allocation functions and window buffer on input,
+ and provides information on the unused input on return. For Z_DATA_ERROR
+ returns, strm will also provide an error message.
+
+ in() and out() are the call-back input and output functions. When
+ inflateBack() needs more input, it calls in(). When inflateBack() has
+ filled the window with output, or when it completes with data in the
+ window, it calls out() to write out the data. The application must not
+ change the provided input until in() is called again or inflateBack()
+ returns. The application must not change the window/output buffer until
+ inflateBack() returns.
+
+ in() and out() are called with a descriptor parameter provided in the
+ inflateBack() call. This parameter can be a structure that provides the
+ information required to do the read or write, as well as accumulated
+ information on the input and output such as totals and check values.
+
+ in() should return zero on failure. out() should return non-zero on
+ failure. If either in() or out() fails, than inflateBack() returns a
+ Z_BUF_ERROR. strm->next_in can be checked for Z_NULL to see whether it
+ was in() or out() that caused in the error. Otherwise, inflateBack()
+ returns Z_STREAM_END on success, Z_DATA_ERROR for an deflate format
+ error, or Z_MEM_ERROR if it could not allocate memory for the state.
+ inflateBack() can also return Z_STREAM_ERROR if the input parameters
+ are not correct, i.e. strm is Z_NULL or the state was not initialized.
+ */
+int ZEXPORT inflateBack(strm, in, in_desc, out, out_desc)
+z_streamp strm;
+in_func in;
+void FAR *in_desc;
+out_func out;
+void FAR *out_desc;
+{
+ struct inflate_state FAR *state;
+ z_const unsigned char FAR *next; /* next input */
+ unsigned char FAR *put; /* next output */
+ unsigned have, left; /* available input and output */
+ unsigned long hold; /* bit buffer */
+ unsigned bits; /* bits in bit buffer */
+ unsigned copy; /* number of stored or match bytes to copy */
+ unsigned char FAR *from; /* where to copy match bytes from */
+ code here; /* current decoding table entry */
+ code last; /* parent table entry */
+ unsigned len; /* length to copy for repeats, bits to drop */
+ int ret; /* return code */
+ static const unsigned short order[19] = /* permutation of code lengths */
+ {16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
+
+ /* Check that the strm exists and that the state was initialized */
+ if (strm == Z_NULL || strm->state == Z_NULL)
+ return Z_STREAM_ERROR;
+ state = (struct inflate_state FAR *)strm->state;
+
+ /* Reset the state */
+ strm->msg = Z_NULL;
+ state->mode = TYPE;
+ state->last = 0;
+ state->whave = 0;
+ next = strm->next_in;
+ have = next != Z_NULL ? strm->avail_in : 0;
+ hold = 0;
+ bits = 0;
+ put = state->window;
+ left = state->wsize;
+
+ /* Inflate until end of block marked as last */
+ for (;;)
+ switch (state->mode) {
+ case TYPE:
+ /* determine and dispatch block type */
+ if (state->last) {
+ BYTEBITS();
+ state->mode = DONE;
+ break;
+ }
+ NEEDBITS(3);
+ state->last = BITS(1);
+ DROPBITS(1);
+ switch (BITS(2)) {
+ case 0: /* stored block */
+ Tracev((stderr, "inflate: stored block%s\n",
+ state->last ? " (last)" : ""));
+ state->mode = STORED;
+ break;
+ case 1: /* fixed block */
+ fixedtables(state);
+ Tracev((stderr, "inflate: fixed codes block%s\n",
+ state->last ? " (last)" : ""));
+ state->mode = LEN; /* decode codes */
+ break;
+ case 2: /* dynamic block */
+ Tracev((stderr, "inflate: dynamic codes block%s\n",
+ state->last ? " (last)" : ""));
+ state->mode = TABLE;
+ break;
+ case 3:
+ strm->msg = (char *)"invalid block type";
+ state->mode = BAD;
+ }
+ DROPBITS(2);
+ break;
+
+ case STORED:
+ /* get and verify stored block length */
+ BYTEBITS(); /* go to byte boundary */
+ NEEDBITS(32);
+ if ((hold & 0xffff) != ((hold >> 16) ^ 0xffff)) {
+ strm->msg = (char *)"invalid stored block lengths";
+ state->mode = BAD;
+ break;
+ }
+ state->length = (unsigned)hold & 0xffff;
+ Tracev((stderr, "inflate: stored length %u\n",
+ state->length));
+ INITBITS();
+
+ /* copy stored block from input to output */
+ while (state->length != 0) {
+ copy = state->length;
+ PULL();
+ ROOM();
+ if (copy > have) copy = have;
+ if (copy > left) copy = left;
+ zmemcpy(put, next, copy);
+ have -= copy;
+ next += copy;
+ left -= copy;
+ put += copy;
+ state->length -= copy;
+ }
+ Tracev((stderr, "inflate: stored end\n"));
+ state->mode = TYPE;
+ break;
+
+ case TABLE:
+ /* get dynamic table entries descriptor */
+ NEEDBITS(14);
+ state->nlen = BITS(5) + 257;
+ DROPBITS(5);
+ state->ndist = BITS(5) + 1;
+ DROPBITS(5);
+ state->ncode = BITS(4) + 4;
+ DROPBITS(4);
+#ifndef PKZIP_BUG_WORKAROUND
+ if (state->nlen > 286 || state->ndist > 30) {
+ strm->msg = (char *)"too many length or distance symbols";
+ state->mode = BAD;
+ break;
+ }
+#endif
+ Tracev((stderr, "inflate: table sizes ok\n"));
+
+ /* get code length code lengths (not a typo) */
+ state->have = 0;
+ while (state->have < state->ncode) {
+ NEEDBITS(3);
+ state->lens[order[state->have++]] = (unsigned short)BITS(3);
+ DROPBITS(3);
+ }
+ while (state->have < 19)
+ state->lens[order[state->have++]] = 0;
+ state->next = state->codes;
+ state->lencode = (code const FAR *)(state->next);
+ state->lenbits = 7;
+ ret = inflate_table(CODES, state->lens, 19, &(state->next),
+ &(state->lenbits), state->work);
+ if (ret) {
+ strm->msg = (char *)"invalid code lengths set";
+ state->mode = BAD;
+ break;
+ }
+ Tracev((stderr, "inflate: code lengths ok\n"));
+
+ /* get length and distance code code lengths */
+ state->have = 0;
+ while (state->have < state->nlen + state->ndist) {
+ for (;;) {
+ here = state->lencode[BITS(state->lenbits)];
+ if ((unsigned)(here.bits) <= bits) break;
+ PULLBYTE();
+ }
+ if (here.val < 16) {
+ DROPBITS(here.bits);
+ state->lens[state->have++] = here.val;
+ }
+ else {
+ if (here.val == 16) {
+ NEEDBITS(here.bits + 2);
+ DROPBITS(here.bits);
+ if (state->have == 0) {
+ strm->msg = (char *)"invalid bit length repeat";
+ state->mode = BAD;
+ break;
+ }
+ len = (unsigned)(state->lens[state->have - 1]);
+ copy = 3 + BITS(2);
+ DROPBITS(2);
+ }
+ else if (here.val == 17) {
+ NEEDBITS(here.bits + 3);
+ DROPBITS(here.bits);
+ len = 0;
+ copy = 3 + BITS(3);
+ DROPBITS(3);
+ }
+ else {
+ NEEDBITS(here.bits + 7);
+ DROPBITS(here.bits);
+ len = 0;
+ copy = 11 + BITS(7);
+ DROPBITS(7);
+ }
+ if (state->have + copy > state->nlen + state->ndist) {
+ strm->msg = (char *)"invalid bit length repeat";
+ state->mode = BAD;
+ break;
+ }
+ while (copy--)
+ state->lens[state->have++] = (unsigned short)len;
+ }
+ }
+
+ /* handle error breaks in while */
+ if (state->mode == BAD) break;
+
+ /* check for end-of-block code (better have one) */
+ if (state->lens[256] == 0) {
+ strm->msg = (char *)"invalid code -- missing end-of-block";
+ state->mode = BAD;
+ break;
+ }
+
+ /* build code tables -- note: do not change the lenbits or distbits
+ values here (9 and 6) without reading the comments in inftrees.h
+ concerning the ENOUGH constants, which depend on those values */
+ state->next = state->codes;
+ state->lencode = (code const FAR *)(state->next);
+ state->lenbits = 9;
+ ret = inflate_table(LENS, state->lens, state->nlen, &(state->next),
+ &(state->lenbits), state->work);
+ if (ret) {
+ strm->msg = (char *)"invalid literal/lengths set";
+ state->mode = BAD;
+ break;
+ }
+ state->distcode = (code const FAR *)(state->next);
+ state->distbits = 6;
+ ret = inflate_table(DISTS, state->lens + state->nlen, state->ndist,
+ &(state->next), &(state->distbits), state->work);
+ if (ret) {
+ strm->msg = (char *)"invalid distances set";
+ state->mode = BAD;
+ break;
+ }
+ Tracev((stderr, "inflate: codes ok\n"));
+ state->mode = LEN;
+
+ case LEN:
+ /* use inflate_fast() if we have enough input and output */
+ if (have >= 6 && left >= 258) {
+ RESTORE();
+ if (state->whave < state->wsize)
+ state->whave = state->wsize - left;
+ inflate_fast(strm, state->wsize);
+ LOAD();
+ break;
+ }
+
+ /* get a literal, length, or end-of-block code */
+ for (;;) {
+ here = state->lencode[BITS(state->lenbits)];
+ if ((unsigned)(here.bits) <= bits) break;
+ PULLBYTE();
+ }
+ if (here.op && (here.op & 0xf0) == 0) {
+ last = here;
+ for (;;) {
+ here = state->lencode[last.val +
+ (BITS(last.bits + last.op) >> last.bits)];
+ if ((unsigned)(last.bits + here.bits) <= bits) break;
+ PULLBYTE();
+ }
+ DROPBITS(last.bits);
+ }
+ DROPBITS(here.bits);
+ state->length = (unsigned)here.val;
+
+ /* process literal */
+ if (here.op == 0) {
+ Tracevv((stderr, here.val >= 0x20 && here.val < 0x7f ?
+ "inflate: literal '%c'\n" :
+ "inflate: literal 0x%02x\n", here.val));
+ ROOM();
+ *put++ = (unsigned char)(state->length);
+ left--;
+ state->mode = LEN;
+ break;
+ }
+
+ /* process end of block */
+ if (here.op & 32) {
+ Tracevv((stderr, "inflate: end of block\n"));
+ state->mode = TYPE;
+ break;
+ }
+
+ /* invalid code */
+ if (here.op & 64) {
+ strm->msg = (char *)"invalid literal/length code";
+ state->mode = BAD;
+ break;
+ }
+
+ /* length code -- get extra bits, if any */
+ state->extra = (unsigned)(here.op) & 15;
+ if (state->extra != 0) {
+ NEEDBITS(state->extra);
+ state->length += BITS(state->extra);
+ DROPBITS(state->extra);
+ }
+ Tracevv((stderr, "inflate: length %u\n", state->length));
+
+ /* get distance code */
+ for (;;) {
+ here = state->distcode[BITS(state->distbits)];
+ if ((unsigned)(here.bits) <= bits) break;
+ PULLBYTE();
+ }
+ if ((here.op & 0xf0) == 0) {
+ last = here;
+ for (;;) {
+ here = state->distcode[last.val +
+ (BITS(last.bits + last.op) >> last.bits)];
+ if ((unsigned)(last.bits + here.bits) <= bits) break;
+ PULLBYTE();
+ }
+ DROPBITS(last.bits);
+ }
+ DROPBITS(here.bits);
+ if (here.op & 64) {
+ strm->msg = (char *)"invalid distance code";
+ state->mode = BAD;
+ break;
+ }
+ state->offset = (unsigned)here.val;
+
+ /* get distance extra bits, if any */
+ state->extra = (unsigned)(here.op) & 15;
+ if (state->extra != 0) {
+ NEEDBITS(state->extra);
+ state->offset += BITS(state->extra);
+ DROPBITS(state->extra);
+ }
+ if (state->offset > state->wsize - (state->whave < state->wsize ?
+ left : 0)) {
+ strm->msg = (char *)"invalid distance too far back";
+ state->mode = BAD;
+ break;
+ }
+ Tracevv((stderr, "inflate: distance %u\n", state->offset));
+
+ /* copy match from window to output */
+ do {
+ ROOM();
+ copy = state->wsize - state->offset;
+ if (copy < left) {
+ from = put + copy;
+ copy = left - copy;
+ }
+ else {
+ from = put - state->offset;
+ copy = left;
+ }
+ if (copy > state->length) copy = state->length;
+ state->length -= copy;
+ left -= copy;
+ do {
+ *put++ = *from++;
+ } while (--copy);
+ } while (state->length != 0);
+ break;
+
+ case DONE:
+ /* inflate stream terminated properly -- write leftover output */
+ ret = Z_STREAM_END;
+ if (left < state->wsize) {
+ if (out(out_desc, state->window, state->wsize - left))
+ ret = Z_BUF_ERROR;
+ }
+ goto inf_leave;
+
+ case BAD:
+ ret = Z_DATA_ERROR;
+ goto inf_leave;
+
+ default: /* can't happen, but makes compilers happy */
+ ret = Z_STREAM_ERROR;
+ goto inf_leave;
+ }
+
+ /* Return unused input */
+ inf_leave:
+ strm->next_in = next;
+ strm->avail_in = have;
+ return ret;
+}
+
+int ZEXPORT inflateBackEnd(strm)
+z_streamp strm;
+{
+ if (strm == Z_NULL || strm->state == Z_NULL || strm->zfree == (free_func)0)
+ return Z_STREAM_ERROR;
+ ZFREE(strm, strm->state);
+ strm->state = Z_NULL;
+ Tracev((stderr, "inflate: end\n"));
+ return Z_OK;
+}
diff --git a/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/inffast.c b/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/inffast.c
new file mode 100644
index 00000000..0dbd1dbc
--- /dev/null
+++ b/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/inffast.c
@@ -0,0 +1,323 @@
+/* inffast.c -- fast decoding
+ * Copyright (C) 1995-2017 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+#include "zutil.h"
+#include "inftrees.h"
+#include "inflate.h"
+#include "inffast.h"
+
+#ifdef ASMINF
+# pragma message("Assembler code may have bugs -- use at your own risk")
+#else
+
+/*
+ Decode literal, length, and distance codes and write out the resulting
+ literal and match bytes until either not enough input or output is
+ available, an end-of-block is encountered, or a data error is encountered.
+ When large enough input and output buffers are supplied to inflate(), for
+ example, a 16K input buffer and a 64K output buffer, more than 95% of the
+ inflate execution time is spent in this routine.
+
+ Entry assumptions:
+
+ state->mode == LEN
+ strm->avail_in >= 6
+ strm->avail_out >= 258
+ start >= strm->avail_out
+ state->bits < 8
+
+ On return, state->mode is one of:
+
+ LEN -- ran out of enough output space or enough available input
+ TYPE -- reached end of block code, inflate() to interpret next block
+ BAD -- error in block data
+
+ Notes:
+
+ - The maximum input bits used by a length/distance pair is 15 bits for the
+ length code, 5 bits for the length extra, 15 bits for the distance code,
+ and 13 bits for the distance extra. This totals 48 bits, or six bytes.
+ Therefore if strm->avail_in >= 6, then there is enough input to avoid
+ checking for available input while decoding.
+
+ - The maximum bytes that a single length/distance pair can output is 258
+ bytes, which is the maximum length that can be coded. inflate_fast()
+ requires strm->avail_out >= 258 for each loop to avoid checking for
+ output space.
+ */
+void ZLIB_INTERNAL inflate_fast(strm, start)
+z_streamp strm;
+unsigned start; /* inflate()'s starting value for strm->avail_out */
+{
+ struct inflate_state FAR *state;
+ z_const unsigned char FAR *in; /* local strm->next_in */
+ z_const unsigned char FAR *last; /* have enough input while in < last */
+ unsigned char FAR *out; /* local strm->next_out */
+ unsigned char FAR *beg; /* inflate()'s initial strm->next_out */
+ unsigned char FAR *end; /* while out < end, enough space available */
+#ifdef INFLATE_STRICT
+ unsigned dmax; /* maximum distance from zlib header */
+#endif
+ unsigned wsize; /* window size or zero if not using window */
+ unsigned whave; /* valid bytes in the window */
+ unsigned wnext; /* window write index */
+ unsigned char FAR *window; /* allocated sliding window, if wsize != 0 */
+ unsigned long hold; /* local strm->hold */
+ unsigned bits; /* local strm->bits */
+ code const FAR *lcode; /* local strm->lencode */
+ code const FAR *dcode; /* local strm->distcode */
+ unsigned lmask; /* mask for first level of length codes */
+ unsigned dmask; /* mask for first level of distance codes */
+ code here; /* retrieved table entry */
+ unsigned op; /* code bits, operation, extra bits, or */
+ /* window position, window bytes to copy */
+ unsigned len; /* match length, unused bytes */
+ unsigned dist; /* match distance */
+ unsigned char FAR *from; /* where to copy match from */
+
+ /* copy state to local variables */
+ state = (struct inflate_state FAR *)strm->state;
+ in = strm->next_in;
+ last = in + (strm->avail_in - 5);
+ out = strm->next_out;
+ beg = out - (start - strm->avail_out);
+ end = out + (strm->avail_out - 257);
+#ifdef INFLATE_STRICT
+ dmax = state->dmax;
+#endif
+ wsize = state->wsize;
+ whave = state->whave;
+ wnext = state->wnext;
+ window = state->window;
+ hold = state->hold;
+ bits = state->bits;
+ lcode = state->lencode;
+ dcode = state->distcode;
+ lmask = (1U << state->lenbits) - 1;
+ dmask = (1U << state->distbits) - 1;
+
+ /* decode literals and length/distances until end-of-block or not enough
+ input data or output space */
+ do {
+ if (bits < 15) {
+ hold += (unsigned long)(*in++) << bits;
+ bits += 8;
+ hold += (unsigned long)(*in++) << bits;
+ bits += 8;
+ }
+ here = lcode[hold & lmask];
+ dolen:
+ op = (unsigned)(here.bits);
+ hold >>= op;
+ bits -= op;
+ op = (unsigned)(here.op);
+ if (op == 0) { /* literal */
+ Tracevv((stderr, here.val >= 0x20 && here.val < 0x7f ?
+ "inflate: literal '%c'\n" :
+ "inflate: literal 0x%02x\n", here.val));
+ *out++ = (unsigned char)(here.val);
+ }
+ else if (op & 16) { /* length base */
+ len = (unsigned)(here.val);
+ op &= 15; /* number of extra bits */
+ if (op) {
+ if (bits < op) {
+ hold += (unsigned long)(*in++) << bits;
+ bits += 8;
+ }
+ len += (unsigned)hold & ((1U << op) - 1);
+ hold >>= op;
+ bits -= op;
+ }
+ Tracevv((stderr, "inflate: length %u\n", len));
+ if (bits < 15) {
+ hold += (unsigned long)(*in++) << bits;
+ bits += 8;
+ hold += (unsigned long)(*in++) << bits;
+ bits += 8;
+ }
+ here = dcode[hold & dmask];
+ dodist:
+ op = (unsigned)(here.bits);
+ hold >>= op;
+ bits -= op;
+ op = (unsigned)(here.op);
+ if (op & 16) { /* distance base */
+ dist = (unsigned)(here.val);
+ op &= 15; /* number of extra bits */
+ if (bits < op) {
+ hold += (unsigned long)(*in++) << bits;
+ bits += 8;
+ if (bits < op) {
+ hold += (unsigned long)(*in++) << bits;
+ bits += 8;
+ }
+ }
+ dist += (unsigned)hold & ((1U << op) - 1);
+#ifdef INFLATE_STRICT
+ if (dist > dmax) {
+ strm->msg = (char *)"invalid distance too far back";
+ state->mode = BAD;
+ break;
+ }
+#endif
+ hold >>= op;
+ bits -= op;
+ Tracevv((stderr, "inflate: distance %u\n", dist));
+ op = (unsigned)(out - beg); /* max distance in output */
+ if (dist > op) { /* see if copy from window */
+ op = dist - op; /* distance back in window */
+ if (op > whave) {
+ if (state->sane) {
+ strm->msg =
+ (char *)"invalid distance too far back";
+ state->mode = BAD;
+ break;
+ }
+#ifdef INFLATE_ALLOW_INVALID_DISTANCE_TOOFAR_ARRR
+ if (len <= op - whave) {
+ do {
+ *out++ = 0;
+ } while (--len);
+ continue;
+ }
+ len -= op - whave;
+ do {
+ *out++ = 0;
+ } while (--op > whave);
+ if (op == 0) {
+ from = out - dist;
+ do {
+ *out++ = *from++;
+ } while (--len);
+ continue;
+ }
+#endif
+ }
+ from = window;
+ if (wnext == 0) { /* very common case */
+ from += wsize - op;
+ if (op < len) { /* some from window */
+ len -= op;
+ do {
+ *out++ = *from++;
+ } while (--op);
+ from = out - dist; /* rest from output */
+ }
+ }
+ else if (wnext < op) { /* wrap around window */
+ from += wsize + wnext - op;
+ op -= wnext;
+ if (op < len) { /* some from end of window */
+ len -= op;
+ do {
+ *out++ = *from++;
+ } while (--op);
+ from = window;
+ if (wnext < len) { /* some from start of window */
+ op = wnext;
+ len -= op;
+ do {
+ *out++ = *from++;
+ } while (--op);
+ from = out - dist; /* rest from output */
+ }
+ }
+ }
+ else { /* contiguous in window */
+ from += wnext - op;
+ if (op < len) { /* some from window */
+ len -= op;
+ do {
+ *out++ = *from++;
+ } while (--op);
+ from = out - dist; /* rest from output */
+ }
+ }
+ while (len > 2) {
+ *out++ = *from++;
+ *out++ = *from++;
+ *out++ = *from++;
+ len -= 3;
+ }
+ if (len) {
+ *out++ = *from++;
+ if (len > 1)
+ *out++ = *from++;
+ }
+ }
+ else {
+ from = out - dist; /* copy direct from output */
+ do { /* minimum length is three */
+ *out++ = *from++;
+ *out++ = *from++;
+ *out++ = *from++;
+ len -= 3;
+ } while (len > 2);
+ if (len) {
+ *out++ = *from++;
+ if (len > 1)
+ *out++ = *from++;
+ }
+ }
+ }
+ else if ((op & 64) == 0) { /* 2nd level distance code */
+ here = dcode[here.val + (hold & ((1U << op) - 1))];
+ goto dodist;
+ }
+ else {
+ strm->msg = (char *)"invalid distance code";
+ state->mode = BAD;
+ break;
+ }
+ }
+ else if ((op & 64) == 0) { /* 2nd level length code */
+ here = lcode[here.val + (hold & ((1U << op) - 1))];
+ goto dolen;
+ }
+ else if (op & 32) { /* end-of-block */
+ Tracevv((stderr, "inflate: end of block\n"));
+ state->mode = TYPE;
+ break;
+ }
+ else {
+ strm->msg = (char *)"invalid literal/length code";
+ state->mode = BAD;
+ break;
+ }
+ } while (in < last && out < end);
+
+ /* return unused bytes (on entry, bits < 8, so in won't go too far back) */
+ len = bits >> 3;
+ in -= len;
+ bits -= len << 3;
+ hold &= (1U << bits) - 1;
+
+ /* update state and return */
+ strm->next_in = in;
+ strm->next_out = out;
+ strm->avail_in = (unsigned)(in < last ? 5 + (last - in) : 5 - (in - last));
+ strm->avail_out = (unsigned)(out < end ?
+ 257 + (end - out) : 257 - (out - end));
+ state->hold = hold;
+ state->bits = bits;
+ return;
+}
+
+/*
+ inflate_fast() speedups that turned out slower (on a PowerPC G3 750CXe):
+ - Using bit fields for code structure
+ - Different op definition to avoid & for extra bits (do & for table bits)
+ - Three separate decoding do-loops for direct, window, and wnext == 0
+ - Special case for distance > 1 copies to do overlapped load and store copy
+ - Explicit branch predictions (based on measured branch probabilities)
+ - Deferring match copy and interspersed it with decoding subsequent codes
+ - Swapping literal/length else
+ - Swapping window/direct else
+ - Larger unrolled copy loops (three is about right)
+ - Moving len -= 3 statement into middle of loop
+ */
+
+#endif /* !ASMINF */
diff --git a/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/inffast.h b/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/inffast.h
new file mode 100644
index 00000000..e5c1aa4c
--- /dev/null
+++ b/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/inffast.h
@@ -0,0 +1,11 @@
+/* inffast.h -- header to use inffast.c
+ * Copyright (C) 1995-2003, 2010 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* WARNING: this file should *not* be used by applications. It is
+ part of the implementation of the compression library and is
+ subject to change. Applications should only use zlib.h.
+ */
+
+void ZLIB_INTERNAL inflate_fast OF((z_streamp strm, unsigned start));
diff --git a/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/inffixed.h b/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/inffixed.h
new file mode 100644
index 00000000..d6283277
--- /dev/null
+++ b/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/inffixed.h
@@ -0,0 +1,94 @@
+ /* inffixed.h -- table for decoding fixed codes
+ * Generated automatically by makefixed().
+ */
+
+ /* WARNING: this file should *not* be used by applications.
+ It is part of the implementation of this library and is
+ subject to change. Applications should only use zlib.h.
+ */
+
+ static const code lenfix[512] = {
+ {96,7,0},{0,8,80},{0,8,16},{20,8,115},{18,7,31},{0,8,112},{0,8,48},
+ {0,9,192},{16,7,10},{0,8,96},{0,8,32},{0,9,160},{0,8,0},{0,8,128},
+ {0,8,64},{0,9,224},{16,7,6},{0,8,88},{0,8,24},{0,9,144},{19,7,59},
+ {0,8,120},{0,8,56},{0,9,208},{17,7,17},{0,8,104},{0,8,40},{0,9,176},
+ {0,8,8},{0,8,136},{0,8,72},{0,9,240},{16,7,4},{0,8,84},{0,8,20},
+ {21,8,227},{19,7,43},{0,8,116},{0,8,52},{0,9,200},{17,7,13},{0,8,100},
+ {0,8,36},{0,9,168},{0,8,4},{0,8,132},{0,8,68},{0,9,232},{16,7,8},
+ {0,8,92},{0,8,28},{0,9,152},{20,7,83},{0,8,124},{0,8,60},{0,9,216},
+ {18,7,23},{0,8,108},{0,8,44},{0,9,184},{0,8,12},{0,8,140},{0,8,76},
+ {0,9,248},{16,7,3},{0,8,82},{0,8,18},{21,8,163},{19,7,35},{0,8,114},
+ {0,8,50},{0,9,196},{17,7,11},{0,8,98},{0,8,34},{0,9,164},{0,8,2},
+ {0,8,130},{0,8,66},{0,9,228},{16,7,7},{0,8,90},{0,8,26},{0,9,148},
+ {20,7,67},{0,8,122},{0,8,58},{0,9,212},{18,7,19},{0,8,106},{0,8,42},
+ {0,9,180},{0,8,10},{0,8,138},{0,8,74},{0,9,244},{16,7,5},{0,8,86},
+ {0,8,22},{64,8,0},{19,7,51},{0,8,118},{0,8,54},{0,9,204},{17,7,15},
+ {0,8,102},{0,8,38},{0,9,172},{0,8,6},{0,8,134},{0,8,70},{0,9,236},
+ {16,7,9},{0,8,94},{0,8,30},{0,9,156},{20,7,99},{0,8,126},{0,8,62},
+ {0,9,220},{18,7,27},{0,8,110},{0,8,46},{0,9,188},{0,8,14},{0,8,142},
+ {0,8,78},{0,9,252},{96,7,0},{0,8,81},{0,8,17},{21,8,131},{18,7,31},
+ {0,8,113},{0,8,49},{0,9,194},{16,7,10},{0,8,97},{0,8,33},{0,9,162},
+ {0,8,1},{0,8,129},{0,8,65},{0,9,226},{16,7,6},{0,8,89},{0,8,25},
+ {0,9,146},{19,7,59},{0,8,121},{0,8,57},{0,9,210},{17,7,17},{0,8,105},
+ {0,8,41},{0,9,178},{0,8,9},{0,8,137},{0,8,73},{0,9,242},{16,7,4},
+ {0,8,85},{0,8,21},{16,8,258},{19,7,43},{0,8,117},{0,8,53},{0,9,202},
+ {17,7,13},{0,8,101},{0,8,37},{0,9,170},{0,8,5},{0,8,133},{0,8,69},
+ {0,9,234},{16,7,8},{0,8,93},{0,8,29},{0,9,154},{20,7,83},{0,8,125},
+ {0,8,61},{0,9,218},{18,7,23},{0,8,109},{0,8,45},{0,9,186},{0,8,13},
+ {0,8,141},{0,8,77},{0,9,250},{16,7,3},{0,8,83},{0,8,19},{21,8,195},
+ {19,7,35},{0,8,115},{0,8,51},{0,9,198},{17,7,11},{0,8,99},{0,8,35},
+ {0,9,166},{0,8,3},{0,8,131},{0,8,67},{0,9,230},{16,7,7},{0,8,91},
+ {0,8,27},{0,9,150},{20,7,67},{0,8,123},{0,8,59},{0,9,214},{18,7,19},
+ {0,8,107},{0,8,43},{0,9,182},{0,8,11},{0,8,139},{0,8,75},{0,9,246},
+ {16,7,5},{0,8,87},{0,8,23},{64,8,0},{19,7,51},{0,8,119},{0,8,55},
+ {0,9,206},{17,7,15},{0,8,103},{0,8,39},{0,9,174},{0,8,7},{0,8,135},
+ {0,8,71},{0,9,238},{16,7,9},{0,8,95},{0,8,31},{0,9,158},{20,7,99},
+ {0,8,127},{0,8,63},{0,9,222},{18,7,27},{0,8,111},{0,8,47},{0,9,190},
+ {0,8,15},{0,8,143},{0,8,79},{0,9,254},{96,7,0},{0,8,80},{0,8,16},
+ {20,8,115},{18,7,31},{0,8,112},{0,8,48},{0,9,193},{16,7,10},{0,8,96},
+ {0,8,32},{0,9,161},{0,8,0},{0,8,128},{0,8,64},{0,9,225},{16,7,6},
+ {0,8,88},{0,8,24},{0,9,145},{19,7,59},{0,8,120},{0,8,56},{0,9,209},
+ {17,7,17},{0,8,104},{0,8,40},{0,9,177},{0,8,8},{0,8,136},{0,8,72},
+ {0,9,241},{16,7,4},{0,8,84},{0,8,20},{21,8,227},{19,7,43},{0,8,116},
+ {0,8,52},{0,9,201},{17,7,13},{0,8,100},{0,8,36},{0,9,169},{0,8,4},
+ {0,8,132},{0,8,68},{0,9,233},{16,7,8},{0,8,92},{0,8,28},{0,9,153},
+ {20,7,83},{0,8,124},{0,8,60},{0,9,217},{18,7,23},{0,8,108},{0,8,44},
+ {0,9,185},{0,8,12},{0,8,140},{0,8,76},{0,9,249},{16,7,3},{0,8,82},
+ {0,8,18},{21,8,163},{19,7,35},{0,8,114},{0,8,50},{0,9,197},{17,7,11},
+ {0,8,98},{0,8,34},{0,9,165},{0,8,2},{0,8,130},{0,8,66},{0,9,229},
+ {16,7,7},{0,8,90},{0,8,26},{0,9,149},{20,7,67},{0,8,122},{0,8,58},
+ {0,9,213},{18,7,19},{0,8,106},{0,8,42},{0,9,181},{0,8,10},{0,8,138},
+ {0,8,74},{0,9,245},{16,7,5},{0,8,86},{0,8,22},{64,8,0},{19,7,51},
+ {0,8,118},{0,8,54},{0,9,205},{17,7,15},{0,8,102},{0,8,38},{0,9,173},
+ {0,8,6},{0,8,134},{0,8,70},{0,9,237},{16,7,9},{0,8,94},{0,8,30},
+ {0,9,157},{20,7,99},{0,8,126},{0,8,62},{0,9,221},{18,7,27},{0,8,110},
+ {0,8,46},{0,9,189},{0,8,14},{0,8,142},{0,8,78},{0,9,253},{96,7,0},
+ {0,8,81},{0,8,17},{21,8,131},{18,7,31},{0,8,113},{0,8,49},{0,9,195},
+ {16,7,10},{0,8,97},{0,8,33},{0,9,163},{0,8,1},{0,8,129},{0,8,65},
+ {0,9,227},{16,7,6},{0,8,89},{0,8,25},{0,9,147},{19,7,59},{0,8,121},
+ {0,8,57},{0,9,211},{17,7,17},{0,8,105},{0,8,41},{0,9,179},{0,8,9},
+ {0,8,137},{0,8,73},{0,9,243},{16,7,4},{0,8,85},{0,8,21},{16,8,258},
+ {19,7,43},{0,8,117},{0,8,53},{0,9,203},{17,7,13},{0,8,101},{0,8,37},
+ {0,9,171},{0,8,5},{0,8,133},{0,8,69},{0,9,235},{16,7,8},{0,8,93},
+ {0,8,29},{0,9,155},{20,7,83},{0,8,125},{0,8,61},{0,9,219},{18,7,23},
+ {0,8,109},{0,8,45},{0,9,187},{0,8,13},{0,8,141},{0,8,77},{0,9,251},
+ {16,7,3},{0,8,83},{0,8,19},{21,8,195},{19,7,35},{0,8,115},{0,8,51},
+ {0,9,199},{17,7,11},{0,8,99},{0,8,35},{0,9,167},{0,8,3},{0,8,131},
+ {0,8,67},{0,9,231},{16,7,7},{0,8,91},{0,8,27},{0,9,151},{20,7,67},
+ {0,8,123},{0,8,59},{0,9,215},{18,7,19},{0,8,107},{0,8,43},{0,9,183},
+ {0,8,11},{0,8,139},{0,8,75},{0,9,247},{16,7,5},{0,8,87},{0,8,23},
+ {64,8,0},{19,7,51},{0,8,119},{0,8,55},{0,9,207},{17,7,15},{0,8,103},
+ {0,8,39},{0,9,175},{0,8,7},{0,8,135},{0,8,71},{0,9,239},{16,7,9},
+ {0,8,95},{0,8,31},{0,9,159},{20,7,99},{0,8,127},{0,8,63},{0,9,223},
+ {18,7,27},{0,8,111},{0,8,47},{0,9,191},{0,8,15},{0,8,143},{0,8,79},
+ {0,9,255}
+ };
+
+ static const code distfix[32] = {
+ {16,5,1},{23,5,257},{19,5,17},{27,5,4097},{17,5,5},{25,5,1025},
+ {21,5,65},{29,5,16385},{16,5,3},{24,5,513},{20,5,33},{28,5,8193},
+ {18,5,9},{26,5,2049},{22,5,129},{64,5,0},{16,5,2},{23,5,385},
+ {19,5,25},{27,5,6145},{17,5,7},{25,5,1537},{21,5,97},{29,5,24577},
+ {16,5,4},{24,5,769},{20,5,49},{28,5,12289},{18,5,13},{26,5,3073},
+ {22,5,193},{64,5,0}
+ };
diff --git a/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/inflate.c b/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/inflate.c
new file mode 100644
index 00000000..ac333e8c
--- /dev/null
+++ b/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/inflate.c
@@ -0,0 +1,1561 @@
+/* inflate.c -- zlib decompression
+ * Copyright (C) 1995-2016 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/*
+ * Change history:
+ *
+ * 1.2.beta0 24 Nov 2002
+ * - First version -- complete rewrite of inflate to simplify code, avoid
+ * creation of window when not needed, minimize use of window when it is
+ * needed, make inffast.c even faster, implement gzip decoding, and to
+ * improve code readability and style over the previous zlib inflate code
+ *
+ * 1.2.beta1 25 Nov 2002
+ * - Use pointers for available input and output checking in inffast.c
+ * - Remove input and output counters in inffast.c
+ * - Change inffast.c entry and loop from avail_in >= 7 to >= 6
+ * - Remove unnecessary second byte pull from length extra in inffast.c
+ * - Unroll direct copy to three copies per loop in inffast.c
+ *
+ * 1.2.beta2 4 Dec 2002
+ * - Change external routine names to reduce potential conflicts
+ * - Correct filename to inffixed.h for fixed tables in inflate.c
+ * - Make hbuf[] unsigned char to match parameter type in inflate.c
+ * - Change strm->next_out[-state->offset] to *(strm->next_out - state->offset)
+ * to avoid negation problem on Alphas (64 bit) in inflate.c
+ *
+ * 1.2.beta3 22 Dec 2002
+ * - Add comments on state->bits assertion in inffast.c
+ * - Add comments on op field in inftrees.h
+ * - Fix bug in reuse of allocated window after inflateReset()
+ * - Remove bit fields--back to byte structure for speed
+ * - Remove distance extra == 0 check in inflate_fast()--only helps for lengths
+ * - Change post-increments to pre-increments in inflate_fast(), PPC biased?
+ * - Add compile time option, POSTINC, to use post-increments instead (Intel?)
+ * - Make MATCH copy in inflate() much faster for when inflate_fast() not used
+ * - Use local copies of stream next and avail values, as well as local bit
+ * buffer and bit count in inflate()--for speed when inflate_fast() not used
+ *
+ * 1.2.beta4 1 Jan 2003
+ * - Split ptr - 257 statements in inflate_table() to avoid compiler warnings
+ * - Move a comment on output buffer sizes from inffast.c to inflate.c
+ * - Add comments in inffast.c to introduce the inflate_fast() routine
+ * - Rearrange window copies in inflate_fast() for speed and simplification
+ * - Unroll last copy for window match in inflate_fast()
+ * - Use local copies of window variables in inflate_fast() for speed
+ * - Pull out common wnext == 0 case for speed in inflate_fast()
+ * - Make op and len in inflate_fast() unsigned for consistency
+ * - Add FAR to lcode and dcode declarations in inflate_fast()
+ * - Simplified bad distance check in inflate_fast()
+ * - Added inflateBackInit(), inflateBack(), and inflateBackEnd() in new
+ * source file infback.c to provide a call-back interface to inflate for
+ * programs like gzip and unzip -- uses window as output buffer to avoid
+ * window copying
+ *
+ * 1.2.beta5 1 Jan 2003
+ * - Improved inflateBack() interface to allow the caller to provide initial
+ * input in strm.
+ * - Fixed stored blocks bug in inflateBack()
+ *
+ * 1.2.beta6 4 Jan 2003
+ * - Added comments in inffast.c on effectiveness of POSTINC
+ * - Typecasting all around to reduce compiler warnings
+ * - Changed loops from while (1) or do {} while (1) to for (;;), again to
+ * make compilers happy
+ * - Changed type of window in inflateBackInit() to unsigned char *
+ *
+ * 1.2.beta7 27 Jan 2003
+ * - Changed many types to unsigned or unsigned short to avoid warnings
+ * - Added inflateCopy() function
+ *
+ * 1.2.0 9 Mar 2003
+ * - Changed inflateBack() interface to provide separate opaque descriptors
+ * for the in() and out() functions
+ * - Changed inflateBack() argument and in_func typedef to swap the length
+ * and buffer address return values for the input function
+ * - Check next_in and next_out for Z_NULL on entry to inflate()
+ *
+ * The history for versions after 1.2.0 are in ChangeLog in zlib distribution.
+ */
+
+#include "zutil.h"
+#include "inftrees.h"
+#include "inflate.h"
+#include "inffast.h"
+
+#ifdef MAKEFIXED
+# ifndef BUILDFIXED
+# define BUILDFIXED
+# endif
+#endif
+
+/* function prototypes */
+local int inflateStateCheck OF((z_streamp strm));
+local void fixedtables OF((struct inflate_state FAR *state));
+local int updatewindow OF((z_streamp strm, const unsigned char FAR *end,
+ unsigned copy));
+#ifdef BUILDFIXED
+ void makefixed OF((void));
+#endif
+local unsigned syncsearch OF((unsigned FAR *have, const unsigned char FAR *buf,
+ unsigned len));
+
+local int inflateStateCheck(strm)
+z_streamp strm;
+{
+ struct inflate_state FAR *state;
+ if (strm == Z_NULL ||
+ strm->zalloc == (alloc_func)0 || strm->zfree == (free_func)0)
+ return 1;
+ state = (struct inflate_state FAR *)strm->state;
+ if (state == Z_NULL || state->strm != strm ||
+ state->mode < HEAD || state->mode > SYNC)
+ return 1;
+ return 0;
+}
+
+int ZEXPORT inflateResetKeep(strm)
+z_streamp strm;
+{
+ struct inflate_state FAR *state;
+
+ if (inflateStateCheck(strm)) return Z_STREAM_ERROR;
+ state = (struct inflate_state FAR *)strm->state;
+ strm->total_in = strm->total_out = state->total = 0;
+ strm->msg = Z_NULL;
+ if (state->wrap) /* to support ill-conceived Java test suite */
+ strm->adler = state->wrap & 1;
+ state->mode = HEAD;
+ state->last = 0;
+ state->havedict = 0;
+ state->dmax = 32768U;
+ state->head = Z_NULL;
+ state->hold = 0;
+ state->bits = 0;
+ state->lencode = state->distcode = state->next = state->codes;
+ state->sane = 1;
+ state->back = -1;
+ Tracev((stderr, "inflate: reset\n"));
+ return Z_OK;
+}
+
+int ZEXPORT inflateReset(strm)
+z_streamp strm;
+{
+ struct inflate_state FAR *state;
+
+ if (inflateStateCheck(strm)) return Z_STREAM_ERROR;
+ state = (struct inflate_state FAR *)strm->state;
+ state->wsize = 0;
+ state->whave = 0;
+ state->wnext = 0;
+ return inflateResetKeep(strm);
+}
+
+int ZEXPORT inflateReset2(strm, windowBits)
+z_streamp strm;
+int windowBits;
+{
+ int wrap;
+ struct inflate_state FAR *state;
+
+ /* get the state */
+ if (inflateStateCheck(strm)) return Z_STREAM_ERROR;
+ state = (struct inflate_state FAR *)strm->state;
+
+ /* extract wrap request from windowBits parameter */
+ if (windowBits < 0) {
+ wrap = 0;
+ windowBits = -windowBits;
+ }
+ else {
+ wrap = (windowBits >> 4) + 5;
+#ifdef GUNZIP
+ if (windowBits < 48)
+ windowBits &= 15;
+#endif
+ }
+
+ /* set number of window bits, free window if different */
+ if (windowBits && (windowBits < 8 || windowBits > 15))
+ return Z_STREAM_ERROR;
+ if (state->window != Z_NULL && state->wbits != (unsigned)windowBits) {
+ ZFREE(strm, state->window);
+ state->window = Z_NULL;
+ }
+
+ /* update state and reset the rest of it */
+ state->wrap = wrap;
+ state->wbits = (unsigned)windowBits;
+ return inflateReset(strm);
+}
+
+int ZEXPORT inflateInit2_(strm, windowBits, version, stream_size)
+z_streamp strm;
+int windowBits;
+const char *version;
+int stream_size;
+{
+ int ret;
+ struct inflate_state FAR *state;
+
+ if (version == Z_NULL || version[0] != ZLIB_VERSION[0] ||
+ stream_size != (int)(sizeof(z_stream)))
+ return Z_VERSION_ERROR;
+ if (strm == Z_NULL) return Z_STREAM_ERROR;
+ strm->msg = Z_NULL; /* in case we return an error */
+ if (strm->zalloc == (alloc_func)0) {
+#ifdef Z_SOLO
+ return Z_STREAM_ERROR;
+#else
+ strm->zalloc = zcalloc;
+ strm->opaque = (voidpf)0;
+#endif
+ }
+ if (strm->zfree == (free_func)0)
+#ifdef Z_SOLO
+ return Z_STREAM_ERROR;
+#else
+ strm->zfree = zcfree;
+#endif
+ state = (struct inflate_state FAR *)
+ ZALLOC(strm, 1, sizeof(struct inflate_state));
+ if (state == Z_NULL) return Z_MEM_ERROR;
+ Tracev((stderr, "inflate: allocated\n"));
+ strm->state = (struct internal_state FAR *)state;
+ state->strm = strm;
+ state->window = Z_NULL;
+ state->mode = HEAD; /* to pass state test in inflateReset2() */
+ ret = inflateReset2(strm, windowBits);
+ if (ret != Z_OK) {
+ ZFREE(strm, state);
+ strm->state = Z_NULL;
+ }
+ return ret;
+}
+
+int ZEXPORT inflateInit_(strm, version, stream_size)
+z_streamp strm;
+const char *version;
+int stream_size;
+{
+ return inflateInit2_(strm, DEF_WBITS, version, stream_size);
+}
+
+int ZEXPORT inflatePrime(strm, bits, value)
+z_streamp strm;
+int bits;
+int value;
+{
+ struct inflate_state FAR *state;
+
+ if (inflateStateCheck(strm)) return Z_STREAM_ERROR;
+ state = (struct inflate_state FAR *)strm->state;
+ if (bits < 0) {
+ state->hold = 0;
+ state->bits = 0;
+ return Z_OK;
+ }
+ if (bits > 16 || state->bits + (uInt)bits > 32) return Z_STREAM_ERROR;
+ value &= (1L << bits) - 1;
+ state->hold += (unsigned)value << state->bits;
+ state->bits += (uInt)bits;
+ return Z_OK;
+}
+
+/*
+ Return state with length and distance decoding tables and index sizes set to
+ fixed code decoding. Normally this returns fixed tables from inffixed.h.
+ If BUILDFIXED is defined, then instead this routine builds the tables the
+ first time it's called, and returns those tables the first time and
+ thereafter. This reduces the size of the code by about 2K bytes, in
+ exchange for a little execution time. However, BUILDFIXED should not be
+ used for threaded applications, since the rewriting of the tables and virgin
+ may not be thread-safe.
+ */
+local void fixedtables(state)
+struct inflate_state FAR *state;
+{
+#ifdef BUILDFIXED
+ static int virgin = 1;
+ static code *lenfix, *distfix;
+ static code fixed[544];
+
+ /* build fixed huffman tables if first call (may not be thread safe) */
+ if (virgin) {
+ unsigned sym, bits;
+ static code *next;
+
+ /* literal/length table */
+ sym = 0;
+ while (sym < 144) state->lens[sym++] = 8;
+ while (sym < 256) state->lens[sym++] = 9;
+ while (sym < 280) state->lens[sym++] = 7;
+ while (sym < 288) state->lens[sym++] = 8;
+ next = fixed;
+ lenfix = next;
+ bits = 9;
+ inflate_table(LENS, state->lens, 288, &(next), &(bits), state->work);
+
+ /* distance table */
+ sym = 0;
+ while (sym < 32) state->lens[sym++] = 5;
+ distfix = next;
+ bits = 5;
+ inflate_table(DISTS, state->lens, 32, &(next), &(bits), state->work);
+
+ /* do this just once */
+ virgin = 0;
+ }
+#else /* !BUILDFIXED */
+# include "inffixed.h"
+#endif /* BUILDFIXED */
+ state->lencode = lenfix;
+ state->lenbits = 9;
+ state->distcode = distfix;
+ state->distbits = 5;
+}
+
+#ifdef MAKEFIXED
+#include <stdio.h>
+
+/*
+ Write out the inffixed.h that is #include'd above. Defining MAKEFIXED also
+ defines BUILDFIXED, so the tables are built on the fly. makefixed() writes
+ those tables to stdout, which would be piped to inffixed.h. A small program
+ can simply call makefixed to do this:
+
+ void makefixed(void);
+
+ int main(void)
+ {
+ makefixed();
+ return 0;
+ }
+
+ Then that can be linked with zlib built with MAKEFIXED defined and run:
+
+ a.out > inffixed.h
+ */
+void makefixed()
+{
+ unsigned low, size;
+ struct inflate_state state;
+
+ fixedtables(&state);
+ puts(" /* inffixed.h -- table for decoding fixed codes");
+ puts(" * Generated automatically by makefixed().");
+ puts(" */");
+ puts("");
+ puts(" /* WARNING: this file should *not* be used by applications.");
+ puts(" It is part of the implementation of this library and is");
+ puts(" subject to change. Applications should only use zlib.h.");
+ puts(" */");
+ puts("");
+ size = 1U << 9;
+ printf(" static const code lenfix[%u] = {", size);
+ low = 0;
+ for (;;) {
+ if ((low % 7) == 0) printf("\n ");
+ printf("{%u,%u,%d}", (low & 127) == 99 ? 64 : state.lencode[low].op,
+ state.lencode[low].bits, state.lencode[low].val);
+ if (++low == size) break;
+ putchar(',');
+ }
+ puts("\n };");
+ size = 1U << 5;
+ printf("\n static const code distfix[%u] = {", size);
+ low = 0;
+ for (;;) {
+ if ((low % 6) == 0) printf("\n ");
+ printf("{%u,%u,%d}", state.distcode[low].op, state.distcode[low].bits,
+ state.distcode[low].val);
+ if (++low == size) break;
+ putchar(',');
+ }
+ puts("\n };");
+}
+#endif /* MAKEFIXED */
+
+/*
+ Update the window with the last wsize (normally 32K) bytes written before
+ returning. If window does not exist yet, create it. This is only called
+ when a window is already in use, or when output has been written during this
+ inflate call, but the end of the deflate stream has not been reached yet.
+ It is also called to create a window for dictionary data when a dictionary
+ is loaded.
+
+ Providing output buffers larger than 32K to inflate() should provide a speed
+ advantage, since only the last 32K of output is copied to the sliding window
+ upon return from inflate(), and since all distances after the first 32K of
+ output will fall in the output data, making match copies simpler and faster.
+ The advantage may be dependent on the size of the processor's data caches.
+ */
+local int updatewindow(strm, end, copy)
+z_streamp strm;
+const Bytef *end;
+unsigned copy;
+{
+ struct inflate_state FAR *state;
+ unsigned dist;
+
+ state = (struct inflate_state FAR *)strm->state;
+
+ /* if it hasn't been done already, allocate space for the window */
+ if (state->window == Z_NULL) {
+ state->window = (unsigned char FAR *)
+ ZALLOC(strm, 1U << state->wbits,
+ sizeof(unsigned char));
+ if (state->window == Z_NULL) return 1;
+ }
+
+ /* if window not in use yet, initialize */
+ if (state->wsize == 0) {
+ state->wsize = 1U << state->wbits;
+ state->wnext = 0;
+ state->whave = 0;
+ }
+
+ /* copy state->wsize or less output bytes into the circular window */
+ if (copy >= state->wsize) {
+ zmemcpy(state->window, end - state->wsize, state->wsize);
+ state->wnext = 0;
+ state->whave = state->wsize;
+ }
+ else {
+ dist = state->wsize - state->wnext;
+ if (dist > copy) dist = copy;
+ zmemcpy(state->window + state->wnext, end - copy, dist);
+ copy -= dist;
+ if (copy) {
+ zmemcpy(state->window, end - copy, copy);
+ state->wnext = copy;
+ state->whave = state->wsize;
+ }
+ else {
+ state->wnext += dist;
+ if (state->wnext == state->wsize) state->wnext = 0;
+ if (state->whave < state->wsize) state->whave += dist;
+ }
+ }
+ return 0;
+}
+
+/* Macros for inflate(): */
+
+/* check function to use adler32() for zlib or crc32() for gzip */
+#ifdef GUNZIP
+# define UPDATE(check, buf, len) \
+ (state->flags ? crc32(check, buf, len) : adler32(check, buf, len))
+#else
+# define UPDATE(check, buf, len) adler32(check, buf, len)
+#endif
+
+/* check macros for header crc */
+#ifdef GUNZIP
+# define CRC2(check, word) \
+ do { \
+ hbuf[0] = (unsigned char)(word); \
+ hbuf[1] = (unsigned char)((word) >> 8); \
+ check = crc32(check, hbuf, 2); \
+ } while (0)
+
+# define CRC4(check, word) \
+ do { \
+ hbuf[0] = (unsigned char)(word); \
+ hbuf[1] = (unsigned char)((word) >> 8); \
+ hbuf[2] = (unsigned char)((word) >> 16); \
+ hbuf[3] = (unsigned char)((word) >> 24); \
+ check = crc32(check, hbuf, 4); \
+ } while (0)
+#endif
+
+/* Load registers with state in inflate() for speed */
+#define LOAD() \
+ do { \
+ put = strm->next_out; \
+ left = strm->avail_out; \
+ next = strm->next_in; \
+ have = strm->avail_in; \
+ hold = state->hold; \
+ bits = state->bits; \
+ } while (0)
+
+/* Restore state from registers in inflate() */
+#define RESTORE() \
+ do { \
+ strm->next_out = put; \
+ strm->avail_out = left; \
+ strm->next_in = next; \
+ strm->avail_in = have; \
+ state->hold = hold; \
+ state->bits = bits; \
+ } while (0)
+
+/* Clear the input bit accumulator */
+#define INITBITS() \
+ do { \
+ hold = 0; \
+ bits = 0; \
+ } while (0)
+
+/* Get a byte of input into the bit accumulator, or return from inflate()
+ if there is no input available. */
+#define PULLBYTE() \
+ do { \
+ if (have == 0) goto inf_leave; \
+ have--; \
+ hold += (unsigned long)(*next++) << bits; \
+ bits += 8; \
+ } while (0)
+
+/* Assure that there are at least n bits in the bit accumulator. If there is
+ not enough available input to do that, then return from inflate(). */
+#define NEEDBITS(n) \
+ do { \
+ while (bits < (unsigned)(n)) \
+ PULLBYTE(); \
+ } while (0)
+
+/* Return the low n bits of the bit accumulator (n < 16) */
+#define BITS(n) \
+ ((unsigned)hold & ((1U << (n)) - 1))
+
+/* Remove n bits from the bit accumulator */
+#define DROPBITS(n) \
+ do { \
+ hold >>= (n); \
+ bits -= (unsigned)(n); \
+ } while (0)
+
+/* Remove zero to seven bits as needed to go to a byte boundary */
+#define BYTEBITS() \
+ do { \
+ hold >>= bits & 7; \
+ bits -= bits & 7; \
+ } while (0)
+
+/*
+ inflate() uses a state machine to process as much input data and generate as
+ much output data as possible before returning. The state machine is
+ structured roughly as follows:
+
+ for (;;) switch (state) {
+ ...
+ case STATEn:
+ if (not enough input data or output space to make progress)
+ return;
+ ... make progress ...
+ state = STATEm;
+ break;
+ ...
+ }
+
+ so when inflate() is called again, the same case is attempted again, and
+ if the appropriate resources are provided, the machine proceeds to the
+ next state. The NEEDBITS() macro is usually the way the state evaluates
+ whether it can proceed or should return. NEEDBITS() does the return if
+ the requested bits are not available. The typical use of the BITS macros
+ is:
+
+ NEEDBITS(n);
+ ... do something with BITS(n) ...
+ DROPBITS(n);
+
+ where NEEDBITS(n) either returns from inflate() if there isn't enough
+ input left to load n bits into the accumulator, or it continues. BITS(n)
+ gives the low n bits in the accumulator. When done, DROPBITS(n) drops
+ the low n bits off the accumulator. INITBITS() clears the accumulator
+ and sets the number of available bits to zero. BYTEBITS() discards just
+ enough bits to put the accumulator on a byte boundary. After BYTEBITS()
+ and a NEEDBITS(8), then BITS(8) would return the next byte in the stream.
+
+ NEEDBITS(n) uses PULLBYTE() to get an available byte of input, or to return
+ if there is no input available. The decoding of variable length codes uses
+ PULLBYTE() directly in order to pull just enough bytes to decode the next
+ code, and no more.
+
+ Some states loop until they get enough input, making sure that enough
+ state information is maintained to continue the loop where it left off
+ if NEEDBITS() returns in the loop. For example, want, need, and keep
+ would all have to actually be part of the saved state in case NEEDBITS()
+ returns:
+
+ case STATEw:
+ while (want < need) {
+ NEEDBITS(n);
+ keep[want++] = BITS(n);
+ DROPBITS(n);
+ }
+ state = STATEx;
+ case STATEx:
+
+ As shown above, if the next state is also the next case, then the break
+ is omitted.
+
+ A state may also return if there is not enough output space available to
+ complete that state. Those states are copying stored data, writing a
+ literal byte, and copying a matching string.
+
+ When returning, a "goto inf_leave" is used to update the total counters,
+ update the check value, and determine whether any progress has been made
+ during that inflate() call in order to return the proper return code.
+ Progress is defined as a change in either strm->avail_in or strm->avail_out.
+ When there is a window, goto inf_leave will update the window with the last
+ output written. If a goto inf_leave occurs in the middle of decompression
+ and there is no window currently, goto inf_leave will create one and copy
+ output to the window for the next call of inflate().
+
+ In this implementation, the flush parameter of inflate() only affects the
+ return code (per zlib.h). inflate() always writes as much as possible to
+ strm->next_out, given the space available and the provided input--the effect
+ documented in zlib.h of Z_SYNC_FLUSH. Furthermore, inflate() always defers
+ the allocation of and copying into a sliding window until necessary, which
+ provides the effect documented in zlib.h for Z_FINISH when the entire input
+ stream available. So the only thing the flush parameter actually does is:
+ when flush is set to Z_FINISH, inflate() cannot return Z_OK. Instead it
+ will return Z_BUF_ERROR if it has not reached the end of the stream.
+ */
+
+int ZEXPORT inflate(strm, flush)
+z_streamp strm;
+int flush;
+{
+ struct inflate_state FAR *state;
+ z_const unsigned char FAR *next; /* next input */
+ unsigned char FAR *put; /* next output */
+ unsigned have, left; /* available input and output */
+ unsigned long hold; /* bit buffer */
+ unsigned bits; /* bits in bit buffer */
+ unsigned in, out; /* save starting available input and output */
+ unsigned copy; /* number of stored or match bytes to copy */
+ unsigned char FAR *from; /* where to copy match bytes from */
+ code here; /* current decoding table entry */
+ code last; /* parent table entry */
+ unsigned len; /* length to copy for repeats, bits to drop */
+ int ret; /* return code */
+#ifdef GUNZIP
+ unsigned char hbuf[4]; /* buffer for gzip header crc calculation */
+#endif
+ static const unsigned short order[19] = /* permutation of code lengths */
+ {16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
+
+ if (inflateStateCheck(strm) || strm->next_out == Z_NULL ||
+ (strm->next_in == Z_NULL && strm->avail_in != 0))
+ return Z_STREAM_ERROR;
+
+ state = (struct inflate_state FAR *)strm->state;
+ if (state->mode == TYPE) state->mode = TYPEDO; /* skip check */
+ LOAD();
+ in = have;
+ out = left;
+ ret = Z_OK;
+ for (;;)
+ switch (state->mode) {
+ case HEAD:
+ if (state->wrap == 0) {
+ state->mode = TYPEDO;
+ break;
+ }
+ NEEDBITS(16);
+#ifdef GUNZIP
+ if ((state->wrap & 2) && hold == 0x8b1f) { /* gzip header */
+ if (state->wbits == 0)
+ state->wbits = 15;
+ state->check = crc32(0L, Z_NULL, 0);
+ CRC2(state->check, hold);
+ INITBITS();
+ state->mode = FLAGS;
+ break;
+ }
+ state->flags = 0; /* expect zlib header */
+ if (state->head != Z_NULL)
+ state->head->done = -1;
+ if (!(state->wrap & 1) || /* check if zlib header allowed */
+#else
+ if (
+#endif
+ ((BITS(8) << 8) + (hold >> 8)) % 31) {
+ strm->msg = (char *)"incorrect header check";
+ state->mode = BAD;
+ break;
+ }
+ if (BITS(4) != Z_DEFLATED) {
+ strm->msg = (char *)"unknown compression method";
+ state->mode = BAD;
+ break;
+ }
+ DROPBITS(4);
+ len = BITS(4) + 8;
+ if (state->wbits == 0)
+ state->wbits = len;
+ if (len > 15 || len > state->wbits) {
+ strm->msg = (char *)"invalid window size";
+ state->mode = BAD;
+ break;
+ }
+ state->dmax = 1U << len;
+ Tracev((stderr, "inflate: zlib header ok\n"));
+ strm->adler = state->check = adler32(0L, Z_NULL, 0);
+ state->mode = hold & 0x200 ? DICTID : TYPE;
+ INITBITS();
+ break;
+#ifdef GUNZIP
+ case FLAGS:
+ NEEDBITS(16);
+ state->flags = (int)(hold);
+ if ((state->flags & 0xff) != Z_DEFLATED) {
+ strm->msg = (char *)"unknown compression method";
+ state->mode = BAD;
+ break;
+ }
+ if (state->flags & 0xe000) {
+ strm->msg = (char *)"unknown header flags set";
+ state->mode = BAD;
+ break;
+ }
+ if (state->head != Z_NULL)
+ state->head->text = (int)((hold >> 8) & 1);
+ if ((state->flags & 0x0200) && (state->wrap & 4))
+ CRC2(state->check, hold);
+ INITBITS();
+ state->mode = TIME;
+ case TIME:
+ NEEDBITS(32);
+ if (state->head != Z_NULL)
+ state->head->time = hold;
+ if ((state->flags & 0x0200) && (state->wrap & 4))
+ CRC4(state->check, hold);
+ INITBITS();
+ state->mode = OS;
+ case OS:
+ NEEDBITS(16);
+ if (state->head != Z_NULL) {
+ state->head->xflags = (int)(hold & 0xff);
+ state->head->os = (int)(hold >> 8);
+ }
+ if ((state->flags & 0x0200) && (state->wrap & 4))
+ CRC2(state->check, hold);
+ INITBITS();
+ state->mode = EXLEN;
+ case EXLEN:
+ if (state->flags & 0x0400) {
+ NEEDBITS(16);
+ state->length = (unsigned)(hold);
+ if (state->head != Z_NULL)
+ state->head->extra_len = (unsigned)hold;
+ if ((state->flags & 0x0200) && (state->wrap & 4))
+ CRC2(state->check, hold);
+ INITBITS();
+ }
+ else if (state->head != Z_NULL)
+ state->head->extra = Z_NULL;
+ state->mode = EXTRA;
+ case EXTRA:
+ if (state->flags & 0x0400) {
+ copy = state->length;
+ if (copy > have) copy = have;
+ if (copy) {
+ if (state->head != Z_NULL &&
+ state->head->extra != Z_NULL) {
+ len = state->head->extra_len - state->length;
+ zmemcpy(state->head->extra + len, next,
+ len + copy > state->head->extra_max ?
+ state->head->extra_max - len : copy);
+ }
+ if ((state->flags & 0x0200) && (state->wrap & 4))
+ state->check = crc32(state->check, next, copy);
+ have -= copy;
+ next += copy;
+ state->length -= copy;
+ }
+ if (state->length) goto inf_leave;
+ }
+ state->length = 0;
+ state->mode = NAME;
+ case NAME:
+ if (state->flags & 0x0800) {
+ if (have == 0) goto inf_leave;
+ copy = 0;
+ do {
+ len = (unsigned)(next[copy++]);
+ if (state->head != Z_NULL &&
+ state->head->name != Z_NULL &&
+ state->length < state->head->name_max)
+ state->head->name[state->length++] = (Bytef)len;
+ } while (len && copy < have);
+ if ((state->flags & 0x0200) && (state->wrap & 4))
+ state->check = crc32(state->check, next, copy);
+ have -= copy;
+ next += copy;
+ if (len) goto inf_leave;
+ }
+ else if (state->head != Z_NULL)
+ state->head->name = Z_NULL;
+ state->length = 0;
+ state->mode = COMMENT;
+ case COMMENT:
+ if (state->flags & 0x1000) {
+ if (have == 0) goto inf_leave;
+ copy = 0;
+ do {
+ len = (unsigned)(next[copy++]);
+ if (state->head != Z_NULL &&
+ state->head->comment != Z_NULL &&
+ state->length < state->head->comm_max)
+ state->head->comment[state->length++] = (Bytef)len;
+ } while (len && copy < have);
+ if ((state->flags & 0x0200) && (state->wrap & 4))
+ state->check = crc32(state->check, next, copy);
+ have -= copy;
+ next += copy;
+ if (len) goto inf_leave;
+ }
+ else if (state->head != Z_NULL)
+ state->head->comment = Z_NULL;
+ state->mode = HCRC;
+ case HCRC:
+ if (state->flags & 0x0200) {
+ NEEDBITS(16);
+ if ((state->wrap & 4) && hold != (state->check & 0xffff)) {
+ strm->msg = (char *)"header crc mismatch";
+ state->mode = BAD;
+ break;
+ }
+ INITBITS();
+ }
+ if (state->head != Z_NULL) {
+ state->head->hcrc = (int)((state->flags >> 9) & 1);
+ state->head->done = 1;
+ }
+ strm->adler = state->check = crc32(0L, Z_NULL, 0);
+ state->mode = TYPE;
+ break;
+#endif
+ case DICTID:
+ NEEDBITS(32);
+ strm->adler = state->check = ZSWAP32(hold);
+ INITBITS();
+ state->mode = DICT;
+ case DICT:
+ if (state->havedict == 0) {
+ RESTORE();
+ return Z_NEED_DICT;
+ }
+ strm->adler = state->check = adler32(0L, Z_NULL, 0);
+ state->mode = TYPE;
+ case TYPE:
+ if (flush == Z_BLOCK || flush == Z_TREES) goto inf_leave;
+ case TYPEDO:
+ if (state->last) {
+ BYTEBITS();
+ state->mode = CHECK;
+ break;
+ }
+ NEEDBITS(3);
+ state->last = BITS(1);
+ DROPBITS(1);
+ switch (BITS(2)) {
+ case 0: /* stored block */
+ Tracev((stderr, "inflate: stored block%s\n",
+ state->last ? " (last)" : ""));
+ state->mode = STORED;
+ break;
+ case 1: /* fixed block */
+ fixedtables(state);
+ Tracev((stderr, "inflate: fixed codes block%s\n",
+ state->last ? " (last)" : ""));
+ state->mode = LEN_; /* decode codes */
+ if (flush == Z_TREES) {
+ DROPBITS(2);
+ goto inf_leave;
+ }
+ break;
+ case 2: /* dynamic block */
+ Tracev((stderr, "inflate: dynamic codes block%s\n",
+ state->last ? " (last)" : ""));
+ state->mode = TABLE;
+ break;
+ case 3:
+ strm->msg = (char *)"invalid block type";
+ state->mode = BAD;
+ }
+ DROPBITS(2);
+ break;
+ case STORED:
+ BYTEBITS(); /* go to byte boundary */
+ NEEDBITS(32);
+ if ((hold & 0xffff) != ((hold >> 16) ^ 0xffff)) {
+ strm->msg = (char *)"invalid stored block lengths";
+ state->mode = BAD;
+ break;
+ }
+ state->length = (unsigned)hold & 0xffff;
+ Tracev((stderr, "inflate: stored length %u\n",
+ state->length));
+ INITBITS();
+ state->mode = COPY_;
+ if (flush == Z_TREES) goto inf_leave;
+ case COPY_:
+ state->mode = COPY;
+ case COPY:
+ copy = state->length;
+ if (copy) {
+ if (copy > have) copy = have;
+ if (copy > left) copy = left;
+ if (copy == 0) goto inf_leave;
+ zmemcpy(put, next, copy);
+ have -= copy;
+ next += copy;
+ left -= copy;
+ put += copy;
+ state->length -= copy;
+ break;
+ }
+ Tracev((stderr, "inflate: stored end\n"));
+ state->mode = TYPE;
+ break;
+ case TABLE:
+ NEEDBITS(14);
+ state->nlen = BITS(5) + 257;
+ DROPBITS(5);
+ state->ndist = BITS(5) + 1;
+ DROPBITS(5);
+ state->ncode = BITS(4) + 4;
+ DROPBITS(4);
+#ifndef PKZIP_BUG_WORKAROUND
+ if (state->nlen > 286 || state->ndist > 30) {
+ strm->msg = (char *)"too many length or distance symbols";
+ state->mode = BAD;
+ break;
+ }
+#endif
+ Tracev((stderr, "inflate: table sizes ok\n"));
+ state->have = 0;
+ state->mode = LENLENS;
+ case LENLENS:
+ while (state->have < state->ncode) {
+ NEEDBITS(3);
+ state->lens[order[state->have++]] = (unsigned short)BITS(3);
+ DROPBITS(3);
+ }
+ while (state->have < 19)
+ state->lens[order[state->have++]] = 0;
+ state->next = state->codes;
+ state->lencode = (const code FAR *)(state->next);
+ state->lenbits = 7;
+ ret = inflate_table(CODES, state->lens, 19, &(state->next),
+ &(state->lenbits), state->work);
+ if (ret) {
+ strm->msg = (char *)"invalid code lengths set";
+ state->mode = BAD;
+ break;
+ }
+ Tracev((stderr, "inflate: code lengths ok\n"));
+ state->have = 0;
+ state->mode = CODELENS;
+ case CODELENS:
+ while (state->have < state->nlen + state->ndist) {
+ for (;;) {
+ here = state->lencode[BITS(state->lenbits)];
+ if ((unsigned)(here.bits) <= bits) break;
+ PULLBYTE();
+ }
+ if (here.val < 16) {
+ DROPBITS(here.bits);
+ state->lens[state->have++] = here.val;
+ }
+ else {
+ if (here.val == 16) {
+ NEEDBITS(here.bits + 2);
+ DROPBITS(here.bits);
+ if (state->have == 0) {
+ strm->msg = (char *)"invalid bit length repeat";
+ state->mode = BAD;
+ break;
+ }
+ len = state->lens[state->have - 1];
+ copy = 3 + BITS(2);
+ DROPBITS(2);
+ }
+ else if (here.val == 17) {
+ NEEDBITS(here.bits + 3);
+ DROPBITS(here.bits);
+ len = 0;
+ copy = 3 + BITS(3);
+ DROPBITS(3);
+ }
+ else {
+ NEEDBITS(here.bits + 7);
+ DROPBITS(here.bits);
+ len = 0;
+ copy = 11 + BITS(7);
+ DROPBITS(7);
+ }
+ if (state->have + copy > state->nlen + state->ndist) {
+ strm->msg = (char *)"invalid bit length repeat";
+ state->mode = BAD;
+ break;
+ }
+ while (copy--)
+ state->lens[state->have++] = (unsigned short)len;
+ }
+ }
+
+ /* handle error breaks in while */
+ if (state->mode == BAD) break;
+
+ /* check for end-of-block code (better have one) */
+ if (state->lens[256] == 0) {
+ strm->msg = (char *)"invalid code -- missing end-of-block";
+ state->mode = BAD;
+ break;
+ }
+
+ /* build code tables -- note: do not change the lenbits or distbits
+ values here (9 and 6) without reading the comments in inftrees.h
+ concerning the ENOUGH constants, which depend on those values */
+ state->next = state->codes;
+ state->lencode = (const code FAR *)(state->next);
+ state->lenbits = 9;
+ ret = inflate_table(LENS, state->lens, state->nlen, &(state->next),
+ &(state->lenbits), state->work);
+ if (ret) {
+ strm->msg = (char *)"invalid literal/lengths set";
+ state->mode = BAD;
+ break;
+ }
+ state->distcode = (const code FAR *)(state->next);
+ state->distbits = 6;
+ ret = inflate_table(DISTS, state->lens + state->nlen, state->ndist,
+ &(state->next), &(state->distbits), state->work);
+ if (ret) {
+ strm->msg = (char *)"invalid distances set";
+ state->mode = BAD;
+ break;
+ }
+ Tracev((stderr, "inflate: codes ok\n"));
+ state->mode = LEN_;
+ if (flush == Z_TREES) goto inf_leave;
+ case LEN_:
+ state->mode = LEN;
+ case LEN:
+ if (have >= 6 && left >= 258) {
+ RESTORE();
+ inflate_fast(strm, out);
+ LOAD();
+ if (state->mode == TYPE)
+ state->back = -1;
+ break;
+ }
+ state->back = 0;
+ for (;;) {
+ here = state->lencode[BITS(state->lenbits)];
+ if ((unsigned)(here.bits) <= bits) break;
+ PULLBYTE();
+ }
+ if (here.op && (here.op & 0xf0) == 0) {
+ last = here;
+ for (;;) {
+ here = state->lencode[last.val +
+ (BITS(last.bits + last.op) >> last.bits)];
+ if ((unsigned)(last.bits + here.bits) <= bits) break;
+ PULLBYTE();
+ }
+ DROPBITS(last.bits);
+ state->back += last.bits;
+ }
+ DROPBITS(here.bits);
+ state->back += here.bits;
+ state->length = (unsigned)here.val;
+ if ((int)(here.op) == 0) {
+ Tracevv((stderr, here.val >= 0x20 && here.val < 0x7f ?
+ "inflate: literal '%c'\n" :
+ "inflate: literal 0x%02x\n", here.val));
+ state->mode = LIT;
+ break;
+ }
+ if (here.op & 32) {
+ Tracevv((stderr, "inflate: end of block\n"));
+ state->back = -1;
+ state->mode = TYPE;
+ break;
+ }
+ if (here.op & 64) {
+ strm->msg = (char *)"invalid literal/length code";
+ state->mode = BAD;
+ break;
+ }
+ state->extra = (unsigned)(here.op) & 15;
+ state->mode = LENEXT;
+ case LENEXT:
+ if (state->extra) {
+ NEEDBITS(state->extra);
+ state->length += BITS(state->extra);
+ DROPBITS(state->extra);
+ state->back += state->extra;
+ }
+ Tracevv((stderr, "inflate: length %u\n", state->length));
+ state->was = state->length;
+ state->mode = DIST;
+ case DIST:
+ for (;;) {
+ here = state->distcode[BITS(state->distbits)];
+ if ((unsigned)(here.bits) <= bits) break;
+ PULLBYTE();
+ }
+ if ((here.op & 0xf0) == 0) {
+ last = here;
+ for (;;) {
+ here = state->distcode[last.val +
+ (BITS(last.bits + last.op) >> last.bits)];
+ if ((unsigned)(last.bits + here.bits) <= bits) break;
+ PULLBYTE();
+ }
+ DROPBITS(last.bits);
+ state->back += last.bits;
+ }
+ DROPBITS(here.bits);
+ state->back += here.bits;
+ if (here.op & 64) {
+ strm->msg = (char *)"invalid distance code";
+ state->mode = BAD;
+ break;
+ }
+ state->offset = (unsigned)here.val;
+ state->extra = (unsigned)(here.op) & 15;
+ state->mode = DISTEXT;
+ case DISTEXT:
+ if (state->extra) {
+ NEEDBITS(state->extra);
+ state->offset += BITS(state->extra);
+ DROPBITS(state->extra);
+ state->back += state->extra;
+ }
+#ifdef INFLATE_STRICT
+ if (state->offset > state->dmax) {
+ strm->msg = (char *)"invalid distance too far back";
+ state->mode = BAD;
+ break;
+ }
+#endif
+ Tracevv((stderr, "inflate: distance %u\n", state->offset));
+ state->mode = MATCH;
+ case MATCH:
+ if (left == 0) goto inf_leave;
+ copy = out - left;
+ if (state->offset > copy) { /* copy from window */
+ copy = state->offset - copy;
+ if (copy > state->whave) {
+ if (state->sane) {
+ strm->msg = (char *)"invalid distance too far back";
+ state->mode = BAD;
+ break;
+ }
+#ifdef INFLATE_ALLOW_INVALID_DISTANCE_TOOFAR_ARRR
+ Trace((stderr, "inflate.c too far\n"));
+ copy -= state->whave;
+ if (copy > state->length) copy = state->length;
+ if (copy > left) copy = left;
+ left -= copy;
+ state->length -= copy;
+ do {
+ *put++ = 0;
+ } while (--copy);
+ if (state->length == 0) state->mode = LEN;
+ break;
+#endif
+ }
+ if (copy > state->wnext) {
+ copy -= state->wnext;
+ from = state->window + (state->wsize - copy);
+ }
+ else
+ from = state->window + (state->wnext - copy);
+ if (copy > state->length) copy = state->length;
+ }
+ else { /* copy from output */
+ from = put - state->offset;
+ copy = state->length;
+ }
+ if (copy > left) copy = left;
+ left -= copy;
+ state->length -= copy;
+ do {
+ *put++ = *from++;
+ } while (--copy);
+ if (state->length == 0) state->mode = LEN;
+ break;
+ case LIT:
+ if (left == 0) goto inf_leave;
+ *put++ = (unsigned char)(state->length);
+ left--;
+ state->mode = LEN;
+ break;
+ case CHECK:
+ if (state->wrap) {
+ NEEDBITS(32);
+ out -= left;
+ strm->total_out += out;
+ state->total += out;
+ if ((state->wrap & 4) && out)
+ strm->adler = state->check =
+ UPDATE(state->check, put - out, out);
+ out = left;
+ if ((state->wrap & 4) && (
+#ifdef GUNZIP
+ state->flags ? hold :
+#endif
+ ZSWAP32(hold)) != state->check) {
+ strm->msg = (char *)"incorrect data check";
+ state->mode = BAD;
+ break;
+ }
+ INITBITS();
+ Tracev((stderr, "inflate: check matches trailer\n"));
+ }
+#ifdef GUNZIP
+ state->mode = LENGTH;
+ case LENGTH:
+ if (state->wrap && state->flags) {
+ NEEDBITS(32);
+ if (hold != (state->total & 0xffffffffUL)) {
+ strm->msg = (char *)"incorrect length check";
+ state->mode = BAD;
+ break;
+ }
+ INITBITS();
+ Tracev((stderr, "inflate: length matches trailer\n"));
+ }
+#endif
+ state->mode = DONE;
+ case DONE:
+ ret = Z_STREAM_END;
+ goto inf_leave;
+ case BAD:
+ ret = Z_DATA_ERROR;
+ goto inf_leave;
+ case MEM:
+ return Z_MEM_ERROR;
+ case SYNC:
+ default:
+ return Z_STREAM_ERROR;
+ }
+
+ /*
+ Return from inflate(), updating the total counts and the check value.
+ If there was no progress during the inflate() call, return a buffer
+ error. Call updatewindow() to create and/or update the window state.
+ Note: a memory error from inflate() is non-recoverable.
+ */
+ inf_leave:
+ RESTORE();
+ if (state->wsize || (out != strm->avail_out && state->mode < BAD &&
+ (state->mode < CHECK || flush != Z_FINISH)))
+ if (updatewindow(strm, strm->next_out, out - strm->avail_out)) {
+ state->mode = MEM;
+ return Z_MEM_ERROR;
+ }
+ in -= strm->avail_in;
+ out -= strm->avail_out;
+ strm->total_in += in;
+ strm->total_out += out;
+ state->total += out;
+ if ((state->wrap & 4) && out)
+ strm->adler = state->check =
+ UPDATE(state->check, strm->next_out - out, out);
+ strm->data_type = (int)state->bits + (state->last ? 64 : 0) +
+ (state->mode == TYPE ? 128 : 0) +
+ (state->mode == LEN_ || state->mode == COPY_ ? 256 : 0);
+ if (((in == 0 && out == 0) || flush == Z_FINISH) && ret == Z_OK)
+ ret = Z_BUF_ERROR;
+ return ret;
+}
+
+int ZEXPORT inflateEnd(strm)
+z_streamp strm;
+{
+ struct inflate_state FAR *state;
+ if (inflateStateCheck(strm))
+ return Z_STREAM_ERROR;
+ state = (struct inflate_state FAR *)strm->state;
+ if (state->window != Z_NULL) ZFREE(strm, state->window);
+ ZFREE(strm, strm->state);
+ strm->state = Z_NULL;
+ Tracev((stderr, "inflate: end\n"));
+ return Z_OK;
+}
+
+int ZEXPORT inflateGetDictionary(strm, dictionary, dictLength)
+z_streamp strm;
+Bytef *dictionary;
+uInt *dictLength;
+{
+ struct inflate_state FAR *state;
+
+ /* check state */
+ if (inflateStateCheck(strm)) return Z_STREAM_ERROR;
+ state = (struct inflate_state FAR *)strm->state;
+
+ /* copy dictionary */
+ if (state->whave && dictionary != Z_NULL) {
+ zmemcpy(dictionary, state->window + state->wnext,
+ state->whave - state->wnext);
+ zmemcpy(dictionary + state->whave - state->wnext,
+ state->window, state->wnext);
+ }
+ if (dictLength != Z_NULL)
+ *dictLength = state->whave;
+ return Z_OK;
+}
+
+int ZEXPORT inflateSetDictionary(strm, dictionary, dictLength)
+z_streamp strm;
+const Bytef *dictionary;
+uInt dictLength;
+{
+ struct inflate_state FAR *state;
+ unsigned long dictid;
+ int ret;
+
+ /* check state */
+ if (inflateStateCheck(strm)) return Z_STREAM_ERROR;
+ state = (struct inflate_state FAR *)strm->state;
+ if (state->wrap != 0 && state->mode != DICT)
+ return Z_STREAM_ERROR;
+
+ /* check for correct dictionary identifier */
+ if (state->mode == DICT) {
+ dictid = adler32(0L, Z_NULL, 0);
+ dictid = adler32(dictid, dictionary, dictLength);
+ if (dictid != state->check)
+ return Z_DATA_ERROR;
+ }
+
+ /* copy dictionary to window using updatewindow(), which will amend the
+ existing dictionary if appropriate */
+ ret = updatewindow(strm, dictionary + dictLength, dictLength);
+ if (ret) {
+ state->mode = MEM;
+ return Z_MEM_ERROR;
+ }
+ state->havedict = 1;
+ Tracev((stderr, "inflate: dictionary set\n"));
+ return Z_OK;
+}
+
+int ZEXPORT inflateGetHeader(strm, head)
+z_streamp strm;
+gz_headerp head;
+{
+ struct inflate_state FAR *state;
+
+ /* check state */
+ if (inflateStateCheck(strm)) return Z_STREAM_ERROR;
+ state = (struct inflate_state FAR *)strm->state;
+ if ((state->wrap & 2) == 0) return Z_STREAM_ERROR;
+
+ /* save header structure */
+ state->head = head;
+ head->done = 0;
+ return Z_OK;
+}
+
+/*
+ Search buf[0..len-1] for the pattern: 0, 0, 0xff, 0xff. Return when found
+ or when out of input. When called, *have is the number of pattern bytes
+ found in order so far, in 0..3. On return *have is updated to the new
+ state. If on return *have equals four, then the pattern was found and the
+ return value is how many bytes were read including the last byte of the
+ pattern. If *have is less than four, then the pattern has not been found
+ yet and the return value is len. In the latter case, syncsearch() can be
+ called again with more data and the *have state. *have is initialized to
+ zero for the first call.
+ */
+local unsigned syncsearch(have, buf, len)
+unsigned FAR *have;
+const unsigned char FAR *buf;
+unsigned len;
+{
+ unsigned got;
+ unsigned next;
+
+ got = *have;
+ next = 0;
+ while (next < len && got < 4) {
+ if ((int)(buf[next]) == (got < 2 ? 0 : 0xff))
+ got++;
+ else if (buf[next])
+ got = 0;
+ else
+ got = 4 - got;
+ next++;
+ }
+ *have = got;
+ return next;
+}
+
+int ZEXPORT inflateSync(strm)
+z_streamp strm;
+{
+ unsigned len; /* number of bytes to look at or looked at */
+ unsigned long in, out; /* temporary to save total_in and total_out */
+ unsigned char buf[4]; /* to restore bit buffer to byte string */
+ struct inflate_state FAR *state;
+
+ /* check parameters */
+ if (inflateStateCheck(strm)) return Z_STREAM_ERROR;
+ state = (struct inflate_state FAR *)strm->state;
+ if (strm->avail_in == 0 && state->bits < 8) return Z_BUF_ERROR;
+
+ /* if first time, start search in bit buffer */
+ if (state->mode != SYNC) {
+ state->mode = SYNC;
+ state->hold <<= state->bits & 7;
+ state->bits -= state->bits & 7;
+ len = 0;
+ while (state->bits >= 8) {
+ buf[len++] = (unsigned char)(state->hold);
+ state->hold >>= 8;
+ state->bits -= 8;
+ }
+ state->have = 0;
+ syncsearch(&(state->have), buf, len);
+ }
+
+ /* search available input */
+ len = syncsearch(&(state->have), strm->next_in, strm->avail_in);
+ strm->avail_in -= len;
+ strm->next_in += len;
+ strm->total_in += len;
+
+ /* return no joy or set up to restart inflate() on a new block */
+ if (state->have != 4) return Z_DATA_ERROR;
+ in = strm->total_in; out = strm->total_out;
+ inflateReset(strm);
+ strm->total_in = in; strm->total_out = out;
+ state->mode = TYPE;
+ return Z_OK;
+}
+
+/*
+ Returns true if inflate is currently at the end of a block generated by
+ Z_SYNC_FLUSH or Z_FULL_FLUSH. This function is used by one PPP
+ implementation to provide an additional safety check. PPP uses
+ Z_SYNC_FLUSH but removes the length bytes of the resulting empty stored
+ block. When decompressing, PPP checks that at the end of input packet,
+ inflate is waiting for these length bytes.
+ */
+int ZEXPORT inflateSyncPoint(strm)
+z_streamp strm;
+{
+ struct inflate_state FAR *state;
+
+ if (inflateStateCheck(strm)) return Z_STREAM_ERROR;
+ state = (struct inflate_state FAR *)strm->state;
+ return state->mode == STORED && state->bits == 0;
+}
+
+int ZEXPORT inflateCopy(dest, source)
+z_streamp dest;
+z_streamp source;
+{
+ struct inflate_state FAR *state;
+ struct inflate_state FAR *copy;
+ unsigned char FAR *window;
+ unsigned wsize;
+
+ /* check input */
+ if (inflateStateCheck(source) || dest == Z_NULL)
+ return Z_STREAM_ERROR;
+ state = (struct inflate_state FAR *)source->state;
+
+ /* allocate space */
+ copy = (struct inflate_state FAR *)
+ ZALLOC(source, 1, sizeof(struct inflate_state));
+ if (copy == Z_NULL) return Z_MEM_ERROR;
+ window = Z_NULL;
+ if (state->window != Z_NULL) {
+ window = (unsigned char FAR *)
+ ZALLOC(source, 1U << state->wbits, sizeof(unsigned char));
+ if (window == Z_NULL) {
+ ZFREE(source, copy);
+ return Z_MEM_ERROR;
+ }
+ }
+
+ /* copy state */
+ zmemcpy((voidpf)dest, (voidpf)source, sizeof(z_stream));
+ zmemcpy((voidpf)copy, (voidpf)state, sizeof(struct inflate_state));
+ copy->strm = dest;
+ if (state->lencode >= state->codes &&
+ state->lencode <= state->codes + ENOUGH - 1) {
+ copy->lencode = copy->codes + (state->lencode - state->codes);
+ copy->distcode = copy->codes + (state->distcode - state->codes);
+ }
+ copy->next = copy->codes + (state->next - state->codes);
+ if (window != Z_NULL) {
+ wsize = 1U << state->wbits;
+ zmemcpy(window, state->window, wsize);
+ }
+ copy->window = window;
+ dest->state = (struct internal_state FAR *)copy;
+ return Z_OK;
+}
+
+int ZEXPORT inflateUndermine(strm, subvert)
+z_streamp strm;
+int subvert;
+{
+ struct inflate_state FAR *state;
+
+ if (inflateStateCheck(strm)) return Z_STREAM_ERROR;
+ state = (struct inflate_state FAR *)strm->state;
+#ifdef INFLATE_ALLOW_INVALID_DISTANCE_TOOFAR_ARRR
+ state->sane = !subvert;
+ return Z_OK;
+#else
+ (void)subvert;
+ state->sane = 1;
+ return Z_DATA_ERROR;
+#endif
+}
+
+int ZEXPORT inflateValidate(strm, check)
+z_streamp strm;
+int check;
+{
+ struct inflate_state FAR *state;
+
+ if (inflateStateCheck(strm)) return Z_STREAM_ERROR;
+ state = (struct inflate_state FAR *)strm->state;
+ if (check)
+ state->wrap |= 4;
+ else
+ state->wrap &= ~4;
+ return Z_OK;
+}
+
+long ZEXPORT inflateMark(strm)
+z_streamp strm;
+{
+ struct inflate_state FAR *state;
+
+ if (inflateStateCheck(strm))
+ return -(1L << 16);
+ state = (struct inflate_state FAR *)strm->state;
+ return (long)(((unsigned long)((long)state->back)) << 16) +
+ (state->mode == COPY ? state->length :
+ (state->mode == MATCH ? state->was - state->length : 0));
+}
+
+unsigned long ZEXPORT inflateCodesUsed(strm)
+z_streamp strm;
+{
+ struct inflate_state FAR *state;
+ if (inflateStateCheck(strm)) return (unsigned long)-1;
+ state = (struct inflate_state FAR *)strm->state;
+ return (unsigned long)(state->next - state->codes);
+}
diff --git a/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/inflate.h b/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/inflate.h
new file mode 100644
index 00000000..a46cce6b
--- /dev/null
+++ b/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/inflate.h
@@ -0,0 +1,125 @@
+/* inflate.h -- internal inflate state definition
+ * Copyright (C) 1995-2016 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* WARNING: this file should *not* be used by applications. It is
+ part of the implementation of the compression library and is
+ subject to change. Applications should only use zlib.h.
+ */
+
+/* define NO_GZIP when compiling if you want to disable gzip header and
+ trailer decoding by inflate(). NO_GZIP would be used to avoid linking in
+ the crc code when it is not needed. For shared libraries, gzip decoding
+ should be left enabled. */
+#ifndef NO_GZIP
+# define GUNZIP
+#endif
+
+/* Possible inflate modes between inflate() calls */
+typedef enum {
+ HEAD = 16180, /* i: waiting for magic header */
+ FLAGS, /* i: waiting for method and flags (gzip) */
+ TIME, /* i: waiting for modification time (gzip) */
+ OS, /* i: waiting for extra flags and operating system (gzip) */
+ EXLEN, /* i: waiting for extra length (gzip) */
+ EXTRA, /* i: waiting for extra bytes (gzip) */
+ NAME, /* i: waiting for end of file name (gzip) */
+ COMMENT, /* i: waiting for end of comment (gzip) */
+ HCRC, /* i: waiting for header crc (gzip) */
+ DICTID, /* i: waiting for dictionary check value */
+ DICT, /* waiting for inflateSetDictionary() call */
+ TYPE, /* i: waiting for type bits, including last-flag bit */
+ TYPEDO, /* i: same, but skip check to exit inflate on new block */
+ STORED, /* i: waiting for stored size (length and complement) */
+ COPY_, /* i/o: same as COPY below, but only first time in */
+ COPY, /* i/o: waiting for input or output to copy stored block */
+ TABLE, /* i: waiting for dynamic block table lengths */
+ LENLENS, /* i: waiting for code length code lengths */
+ CODELENS, /* i: waiting for length/lit and distance code lengths */
+ LEN_, /* i: same as LEN below, but only first time in */
+ LEN, /* i: waiting for length/lit/eob code */
+ LENEXT, /* i: waiting for length extra bits */
+ DIST, /* i: waiting for distance code */
+ DISTEXT, /* i: waiting for distance extra bits */
+ MATCH, /* o: waiting for output space to copy string */
+ LIT, /* o: waiting for output space to write literal */
+ CHECK, /* i: waiting for 32-bit check value */
+ LENGTH, /* i: waiting for 32-bit length (gzip) */
+ DONE, /* finished check, done -- remain here until reset */
+ BAD, /* got a data error -- remain here until reset */
+ MEM, /* got an inflate() memory error -- remain here until reset */
+ SYNC /* looking for synchronization bytes to restart inflate() */
+} inflate_mode;
+
+/*
+ State transitions between above modes -
+
+ (most modes can go to BAD or MEM on error -- not shown for clarity)
+
+ Process header:
+ HEAD -> (gzip) or (zlib) or (raw)
+ (gzip) -> FLAGS -> TIME -> OS -> EXLEN -> EXTRA -> NAME -> COMMENT ->
+ HCRC -> TYPE
+ (zlib) -> DICTID or TYPE
+ DICTID -> DICT -> TYPE
+ (raw) -> TYPEDO
+ Read deflate blocks:
+ TYPE -> TYPEDO -> STORED or TABLE or LEN_ or CHECK
+ STORED -> COPY_ -> COPY -> TYPE
+ TABLE -> LENLENS -> CODELENS -> LEN_
+ LEN_ -> LEN
+ Read deflate codes in fixed or dynamic block:
+ LEN -> LENEXT or LIT or TYPE
+ LENEXT -> DIST -> DISTEXT -> MATCH -> LEN
+ LIT -> LEN
+ Process trailer:
+ CHECK -> LENGTH -> DONE
+ */
+
+/* State maintained between inflate() calls -- approximately 7K bytes, not
+ including the allocated sliding window, which is up to 32K bytes. */
+struct inflate_state {
+ z_streamp strm; /* pointer back to this zlib stream */
+ inflate_mode mode; /* current inflate mode */
+ int last; /* true if processing last block */
+ int wrap; /* bit 0 true for zlib, bit 1 true for gzip,
+ bit 2 true to validate check value */
+ int havedict; /* true if dictionary provided */
+ int flags; /* gzip header method and flags (0 if zlib) */
+ unsigned dmax; /* zlib header max distance (INFLATE_STRICT) */
+ unsigned long check; /* protected copy of check value */
+ unsigned long total; /* protected copy of output count */
+ gz_headerp head; /* where to save gzip header information */
+ /* sliding window */
+ unsigned wbits; /* log base 2 of requested window size */
+ unsigned wsize; /* window size or zero if not using window */
+ unsigned whave; /* valid bytes in the window */
+ unsigned wnext; /* window write index */
+ unsigned char FAR *window; /* allocated sliding window, if needed */
+ /* bit accumulator */
+ unsigned long hold; /* input bit accumulator */
+ unsigned bits; /* number of bits in "in" */
+ /* for string and stored block copying */
+ unsigned length; /* literal or length of data to copy */
+ unsigned offset; /* distance back to copy string from */
+ /* for table and code decoding */
+ unsigned extra; /* extra bits needed */
+ /* fixed and dynamic code tables */
+ code const FAR *lencode; /* starting table for length/literal codes */
+ code const FAR *distcode; /* starting table for distance codes */
+ unsigned lenbits; /* index bits for lencode */
+ unsigned distbits; /* index bits for distcode */
+ /* dynamic table building */
+ unsigned ncode; /* number of code length code lengths */
+ unsigned nlen; /* number of length code lengths */
+ unsigned ndist; /* number of distance code lengths */
+ unsigned have; /* number of code lengths in lens[] */
+ code FAR *next; /* next available space in codes[] */
+ unsigned short lens[320]; /* temporary storage for code lengths */
+ unsigned short work[288]; /* work area for code table building */
+ code codes[ENOUGH]; /* space for code tables */
+ int sane; /* if false, allow invalid distance too far */
+ int back; /* bits back of last unprocessed length/lit */
+ unsigned was; /* initial length of match */
+};
diff --git a/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/inftrees.c b/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/inftrees.c
new file mode 100644
index 00000000..2ea08fc1
--- /dev/null
+++ b/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/inftrees.c
@@ -0,0 +1,304 @@
+/* inftrees.c -- generate Huffman trees for efficient decoding
+ * Copyright (C) 1995-2017 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+#include "zutil.h"
+#include "inftrees.h"
+
+#define MAXBITS 15
+
+const char inflate_copyright[] =
+ " inflate 1.2.11 Copyright 1995-2017 Mark Adler ";
+/*
+ If you use the zlib library in a product, an acknowledgment is welcome
+ in the documentation of your product. If for some reason you cannot
+ include such an acknowledgment, I would appreciate that you keep this
+ copyright string in the executable of your product.
+ */
+
+/*
+ Build a set of tables to decode the provided canonical Huffman code.
+ The code lengths are lens[0..codes-1]. The result starts at *table,
+ whose indices are 0..2^bits-1. work is a writable array of at least
+ lens shorts, which is used as a work area. type is the type of code
+ to be generated, CODES, LENS, or DISTS. On return, zero is success,
+ -1 is an invalid code, and +1 means that ENOUGH isn't enough. table
+ on return points to the next available entry's address. bits is the
+ requested root table index bits, and on return it is the actual root
+ table index bits. It will differ if the request is greater than the
+ longest code or if it is less than the shortest code.
+ */
+int ZLIB_INTERNAL inflate_table(type, lens, codes, table, bits, work)
+codetype type;
+unsigned short FAR *lens;
+unsigned codes;
+code FAR * FAR *table;
+unsigned FAR *bits;
+unsigned short FAR *work;
+{
+ unsigned len; /* a code's length in bits */
+ unsigned sym; /* index of code symbols */
+ unsigned min, max; /* minimum and maximum code lengths */
+ unsigned root; /* number of index bits for root table */
+ unsigned curr; /* number of index bits for current table */
+ unsigned drop; /* code bits to drop for sub-table */
+ int left; /* number of prefix codes available */
+ unsigned used; /* code entries in table used */
+ unsigned huff; /* Huffman code */
+ unsigned incr; /* for incrementing code, index */
+ unsigned fill; /* index for replicating entries */
+ unsigned low; /* low bits for current root entry */
+ unsigned mask; /* mask for low root bits */
+ code here; /* table entry for duplication */
+ code FAR *next; /* next available space in table */
+ const unsigned short FAR *base; /* base value table to use */
+ const unsigned short FAR *extra; /* extra bits table to use */
+ unsigned match; /* use base and extra for symbol >= match */
+ unsigned short count[MAXBITS+1]; /* number of codes of each length */
+ unsigned short offs[MAXBITS+1]; /* offsets in table for each length */
+ static const unsigned short lbase[31] = { /* Length codes 257..285 base */
+ 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31,
+ 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0};
+ static const unsigned short lext[31] = { /* Length codes 257..285 extra */
+ 16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 18, 18, 18, 18,
+ 19, 19, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21, 16, 77, 202};
+ static const unsigned short dbase[32] = { /* Distance codes 0..29 base */
+ 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193,
+ 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145,
+ 8193, 12289, 16385, 24577, 0, 0};
+ static const unsigned short dext[32] = { /* Distance codes 0..29 extra */
+ 16, 16, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 21, 21, 22, 22,
+ 23, 23, 24, 24, 25, 25, 26, 26, 27, 27,
+ 28, 28, 29, 29, 64, 64};
+
+ /*
+ Process a set of code lengths to create a canonical Huffman code. The
+ code lengths are lens[0..codes-1]. Each length corresponds to the
+ symbols 0..codes-1. The Huffman code is generated by first sorting the
+ symbols by length from short to long, and retaining the symbol order
+ for codes with equal lengths. Then the code starts with all zero bits
+ for the first code of the shortest length, and the codes are integer
+ increments for the same length, and zeros are appended as the length
+ increases. For the deflate format, these bits are stored backwards
+ from their more natural integer increment ordering, and so when the
+ decoding tables are built in the large loop below, the integer codes
+ are incremented backwards.
+
+ This routine assumes, but does not check, that all of the entries in
+ lens[] are in the range 0..MAXBITS. The caller must assure this.
+ 1..MAXBITS is interpreted as that code length. zero means that that
+ symbol does not occur in this code.
+
+ The codes are sorted by computing a count of codes for each length,
+ creating from that a table of starting indices for each length in the
+ sorted table, and then entering the symbols in order in the sorted
+ table. The sorted table is work[], with that space being provided by
+ the caller.
+
+ The length counts are used for other purposes as well, i.e. finding
+ the minimum and maximum length codes, determining if there are any
+ codes at all, checking for a valid set of lengths, and looking ahead
+ at length counts to determine sub-table sizes when building the
+ decoding tables.
+ */
+
+ /* accumulate lengths for codes (assumes lens[] all in 0..MAXBITS) */
+ for (len = 0; len <= MAXBITS; len++)
+ count[len] = 0;
+ for (sym = 0; sym < codes; sym++)
+ count[lens[sym]]++;
+
+ /* bound code lengths, force root to be within code lengths */
+ root = *bits;
+ for (max = MAXBITS; max >= 1; max--)
+ if (count[max] != 0) break;
+ if (root > max) root = max;
+ if (max == 0) { /* no symbols to code at all */
+ here.op = (unsigned char)64; /* invalid code marker */
+ here.bits = (unsigned char)1;
+ here.val = (unsigned short)0;
+ *(*table)++ = here; /* make a table to force an error */
+ *(*table)++ = here;
+ *bits = 1;
+ return 0; /* no symbols, but wait for decoding to report error */
+ }
+ for (min = 1; min < max; min++)
+ if (count[min] != 0) break;
+ if (root < min) root = min;
+
+ /* check for an over-subscribed or incomplete set of lengths */
+ left = 1;
+ for (len = 1; len <= MAXBITS; len++) {
+ left <<= 1;
+ left -= count[len];
+ if (left < 0) return -1; /* over-subscribed */
+ }
+ if (left > 0 && (type == CODES || max != 1))
+ return -1; /* incomplete set */
+
+ /* generate offsets into symbol table for each length for sorting */
+ offs[1] = 0;
+ for (len = 1; len < MAXBITS; len++)
+ offs[len + 1] = offs[len] + count[len];
+
+ /* sort symbols by length, by symbol order within each length */
+ for (sym = 0; sym < codes; sym++)
+ if (lens[sym] != 0) work[offs[lens[sym]]++] = (unsigned short)sym;
+
+ /*
+ Create and fill in decoding tables. In this loop, the table being
+ filled is at next and has curr index bits. The code being used is huff
+ with length len. That code is converted to an index by dropping drop
+ bits off of the bottom. For codes where len is less than drop + curr,
+ those top drop + curr - len bits are incremented through all values to
+ fill the table with replicated entries.
+
+ root is the number of index bits for the root table. When len exceeds
+ root, sub-tables are created pointed to by the root entry with an index
+ of the low root bits of huff. This is saved in low to check for when a
+ new sub-table should be started. drop is zero when the root table is
+ being filled, and drop is root when sub-tables are being filled.
+
+ When a new sub-table is needed, it is necessary to look ahead in the
+ code lengths to determine what size sub-table is needed. The length
+ counts are used for this, and so count[] is decremented as codes are
+ entered in the tables.
+
+ used keeps track of how many table entries have been allocated from the
+ provided *table space. It is checked for LENS and DIST tables against
+ the constants ENOUGH_LENS and ENOUGH_DISTS to guard against changes in
+ the initial root table size constants. See the comments in inftrees.h
+ for more information.
+
+ sym increments through all symbols, and the loop terminates when
+ all codes of length max, i.e. all codes, have been processed. This
+ routine permits incomplete codes, so another loop after this one fills
+ in the rest of the decoding tables with invalid code markers.
+ */
+
+ /* set up for code type */
+ switch (type) {
+ case CODES:
+ base = extra = work; /* dummy value--not used */
+ match = 20;
+ break;
+ case LENS:
+ base = lbase;
+ extra = lext;
+ match = 257;
+ break;
+ default: /* DISTS */
+ base = dbase;
+ extra = dext;
+ match = 0;
+ }
+
+ /* initialize state for loop */
+ huff = 0; /* starting code */
+ sym = 0; /* starting code symbol */
+ len = min; /* starting code length */
+ next = *table; /* current table to fill in */
+ curr = root; /* current table index bits */
+ drop = 0; /* current bits to drop from code for index */
+ low = (unsigned)(-1); /* trigger new sub-table when len > root */
+ used = 1U << root; /* use root table entries */
+ mask = used - 1; /* mask for comparing low */
+
+ /* check available table space */
+ if ((type == LENS && used > ENOUGH_LENS) ||
+ (type == DISTS && used > ENOUGH_DISTS))
+ return 1;
+
+ /* process all codes and make table entries */
+ for (;;) {
+ /* create table entry */
+ here.bits = (unsigned char)(len - drop);
+ if (work[sym] + 1U < match) {
+ here.op = (unsigned char)0;
+ here.val = work[sym];
+ }
+ else if (work[sym] >= match) {
+ here.op = (unsigned char)(extra[work[sym] - match]);
+ here.val = base[work[sym] - match];
+ }
+ else {
+ here.op = (unsigned char)(32 + 64); /* end of block */
+ here.val = 0;
+ }
+
+ /* replicate for those indices with low len bits equal to huff */
+ incr = 1U << (len - drop);
+ fill = 1U << curr;
+ min = fill; /* save offset to next table */
+ do {
+ fill -= incr;
+ next[(huff >> drop) + fill] = here;
+ } while (fill != 0);
+
+ /* backwards increment the len-bit code huff */
+ incr = 1U << (len - 1);
+ while (huff & incr)
+ incr >>= 1;
+ if (incr != 0) {
+ huff &= incr - 1;
+ huff += incr;
+ }
+ else
+ huff = 0;
+
+ /* go to next symbol, update count, len */
+ sym++;
+ if (--(count[len]) == 0) {
+ if (len == max) break;
+ len = lens[work[sym]];
+ }
+
+ /* create new sub-table if needed */
+ if (len > root && (huff & mask) != low) {
+ /* if first time, transition to sub-tables */
+ if (drop == 0)
+ drop = root;
+
+ /* increment past last table */
+ next += min; /* here min is 1 << curr */
+
+ /* determine length of next table */
+ curr = len - drop;
+ left = (int)(1 << curr);
+ while (curr + drop < max) {
+ left -= count[curr + drop];
+ if (left <= 0) break;
+ curr++;
+ left <<= 1;
+ }
+
+ /* check for enough space */
+ used += 1U << curr;
+ if ((type == LENS && used > ENOUGH_LENS) ||
+ (type == DISTS && used > ENOUGH_DISTS))
+ return 1;
+
+ /* point entry in root table to sub-table */
+ low = huff & mask;
+ (*table)[low].op = (unsigned char)curr;
+ (*table)[low].bits = (unsigned char)root;
+ (*table)[low].val = (unsigned short)(next - *table);
+ }
+ }
+
+ /* fill in remaining table entry if code is incomplete (guaranteed to have
+ at most one remaining entry, since if the code is incomplete, the
+ maximum code length that was allowed to get this far is one bit) */
+ if (huff != 0) {
+ here.op = (unsigned char)64; /* invalid code marker */
+ here.bits = (unsigned char)(len - drop);
+ here.val = (unsigned short)0;
+ next[huff] = here;
+ }
+
+ /* set return parameters */
+ *table += used;
+ *bits = root;
+ return 0;
+}
diff --git a/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/inftrees.h b/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/inftrees.h
new file mode 100644
index 00000000..baa53a0b
--- /dev/null
+++ b/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/inftrees.h
@@ -0,0 +1,62 @@
+/* inftrees.h -- header to use inftrees.c
+ * Copyright (C) 1995-2005, 2010 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* WARNING: this file should *not* be used by applications. It is
+ part of the implementation of the compression library and is
+ subject to change. Applications should only use zlib.h.
+ */
+
+/* Structure for decoding tables. Each entry provides either the
+ information needed to do the operation requested by the code that
+ indexed that table entry, or it provides a pointer to another
+ table that indexes more bits of the code. op indicates whether
+ the entry is a pointer to another table, a literal, a length or
+ distance, an end-of-block, or an invalid code. For a table
+ pointer, the low four bits of op is the number of index bits of
+ that table. For a length or distance, the low four bits of op
+ is the number of extra bits to get after the code. bits is
+ the number of bits in this code or part of the code to drop off
+ of the bit buffer. val is the actual byte to output in the case
+ of a literal, the base length or distance, or the offset from
+ the current table to the next table. Each entry is four bytes. */
+typedef struct {
+ unsigned char op; /* operation, extra bits, table bits */
+ unsigned char bits; /* bits in this part of the code */
+ unsigned short val; /* offset in table or code value */
+} code;
+
+/* op values as set by inflate_table():
+ 00000000 - literal
+ 0000tttt - table link, tttt != 0 is the number of table index bits
+ 0001eeee - length or distance, eeee is the number of extra bits
+ 01100000 - end of block
+ 01000000 - invalid code
+ */
+
+/* Maximum size of the dynamic table. The maximum number of code structures is
+ 1444, which is the sum of 852 for literal/length codes and 592 for distance
+ codes. These values were found by exhaustive searches using the program
+ examples/enough.c found in the zlib distribtution. The arguments to that
+ program are the number of symbols, the initial root table size, and the
+ maximum bit length of a code. "enough 286 9 15" for literal/length codes
+ returns returns 852, and "enough 30 6 15" for distance codes returns 592.
+ The initial root table size (9 or 6) is found in the fifth argument of the
+ inflate_table() calls in inflate.c and infback.c. If the root table size is
+ changed, then these maximum sizes would be need to be recalculated and
+ updated. */
+#define ENOUGH_LENS 852
+#define ENOUGH_DISTS 592
+#define ENOUGH (ENOUGH_LENS+ENOUGH_DISTS)
+
+/* Type of code to build for inflate_table() */
+typedef enum {
+ CODES,
+ LENS,
+ DISTS
+} codetype;
+
+int ZLIB_INTERNAL inflate_table OF((codetype type, unsigned short FAR *lens,
+ unsigned codes, code FAR * FAR *table,
+ unsigned FAR *bits, unsigned short FAR *work));
diff --git a/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/trees.c b/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/trees.c
new file mode 100644
index 00000000..50cf4b45
--- /dev/null
+++ b/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/trees.c
@@ -0,0 +1,1203 @@
+/* trees.c -- output deflated data using Huffman coding
+ * Copyright (C) 1995-2017 Jean-loup Gailly
+ * detect_data_type() function provided freely by Cosmin Truta, 2006
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/*
+ * ALGORITHM
+ *
+ * The "deflation" process uses several Huffman trees. The more
+ * common source values are represented by shorter bit sequences.
+ *
+ * Each code tree is stored in a compressed form which is itself
+ * a Huffman encoding of the lengths of all the code strings (in
+ * ascending order by source values). The actual code strings are
+ * reconstructed from the lengths in the inflate process, as described
+ * in the deflate specification.
+ *
+ * REFERENCES
+ *
+ * Deutsch, L.P.,"'Deflate' Compressed Data Format Specification".
+ * Available in ftp.uu.net:/pub/archiving/zip/doc/deflate-1.1.doc
+ *
+ * Storer, James A.
+ * Data Compression: Methods and Theory, pp. 49-50.
+ * Computer Science Press, 1988. ISBN 0-7167-8156-5.
+ *
+ * Sedgewick, R.
+ * Algorithms, p290.
+ * Addison-Wesley, 1983. ISBN 0-201-06672-6.
+ */
+
+/* @(#) $Id$ */
+
+/* #define GEN_TREES_H */
+
+#include "deflate.h"
+
+#ifdef ZLIB_DEBUG
+# include <ctype.h>
+#endif
+
+/* ===========================================================================
+ * Constants
+ */
+
+#define MAX_BL_BITS 7
+/* Bit length codes must not exceed MAX_BL_BITS bits */
+
+#define END_BLOCK 256
+/* end of block literal code */
+
+#define REP_3_6 16
+/* repeat previous bit length 3-6 times (2 bits of repeat count) */
+
+#define REPZ_3_10 17
+/* repeat a zero length 3-10 times (3 bits of repeat count) */
+
+#define REPZ_11_138 18
+/* repeat a zero length 11-138 times (7 bits of repeat count) */
+
+local const int extra_lbits[LENGTH_CODES] /* extra bits for each length code */
+ = {0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0};
+
+local const int extra_dbits[D_CODES] /* extra bits for each distance code */
+ = {0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13};
+
+local const int extra_blbits[BL_CODES]/* extra bits for each bit length code */
+ = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,3,7};
+
+local const uch bl_order[BL_CODES]
+ = {16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15};
+/* The lengths of the bit length codes are sent in order of decreasing
+ * probability, to avoid transmitting the lengths for unused bit length codes.
+ */
+
+/* ===========================================================================
+ * Local data. These are initialized only once.
+ */
+
+#define DIST_CODE_LEN 512 /* see definition of array dist_code below */
+
+#if defined(GEN_TREES_H) || !defined(STDC)
+/* non ANSI compilers may not accept trees.h */
+
+local ct_data static_ltree[L_CODES+2];
+/* The static literal tree. Since the bit lengths are imposed, there is no
+ * need for the L_CODES extra codes used during heap construction. However
+ * The codes 286 and 287 are needed to build a canonical tree (see _tr_init
+ * below).
+ */
+
+local ct_data static_dtree[D_CODES];
+/* The static distance tree. (Actually a trivial tree since all codes use
+ * 5 bits.)
+ */
+
+uch _dist_code[DIST_CODE_LEN];
+/* Distance codes. The first 256 values correspond to the distances
+ * 3 .. 258, the last 256 values correspond to the top 8 bits of
+ * the 15 bit distances.
+ */
+
+uch _length_code[MAX_MATCH-MIN_MATCH+1];
+/* length code for each normalized match length (0 == MIN_MATCH) */
+
+local int base_length[LENGTH_CODES];
+/* First normalized length for each code (0 = MIN_MATCH) */
+
+local int base_dist[D_CODES];
+/* First normalized distance for each code (0 = distance of 1) */
+
+#else
+# include "trees.h"
+#endif /* GEN_TREES_H */
+
+struct static_tree_desc_s {
+ const ct_data *static_tree; /* static tree or NULL */
+ const intf *extra_bits; /* extra bits for each code or NULL */
+ int extra_base; /* base index for extra_bits */
+ int elems; /* max number of elements in the tree */
+ int max_length; /* max bit length for the codes */
+};
+
+local const static_tree_desc static_l_desc =
+{static_ltree, extra_lbits, LITERALS+1, L_CODES, MAX_BITS};
+
+local const static_tree_desc static_d_desc =
+{static_dtree, extra_dbits, 0, D_CODES, MAX_BITS};
+
+local const static_tree_desc static_bl_desc =
+{(const ct_data *)0, extra_blbits, 0, BL_CODES, MAX_BL_BITS};
+
+/* ===========================================================================
+ * Local (static) routines in this file.
+ */
+
+local void tr_static_init OF((void));
+local void init_block OF((deflate_state *s));
+local void pqdownheap OF((deflate_state *s, ct_data *tree, int k));
+local void gen_bitlen OF((deflate_state *s, tree_desc *desc));
+local void gen_codes OF((ct_data *tree, int max_code, ushf *bl_count));
+local void build_tree OF((deflate_state *s, tree_desc *desc));
+local void scan_tree OF((deflate_state *s, ct_data *tree, int max_code));
+local void send_tree OF((deflate_state *s, ct_data *tree, int max_code));
+local int build_bl_tree OF((deflate_state *s));
+local void send_all_trees OF((deflate_state *s, int lcodes, int dcodes,
+ int blcodes));
+local void compress_block OF((deflate_state *s, const ct_data *ltree,
+ const ct_data *dtree));
+local int detect_data_type OF((deflate_state *s));
+local unsigned bi_reverse OF((unsigned value, int length));
+local void bi_windup OF((deflate_state *s));
+local void bi_flush OF((deflate_state *s));
+
+#ifdef GEN_TREES_H
+local void gen_trees_header OF((void));
+#endif
+
+#ifndef ZLIB_DEBUG
+# define send_code(s, c, tree) send_bits(s, tree[c].Code, tree[c].Len)
+ /* Send a code of the given tree. c and tree must not have side effects */
+
+#else /* !ZLIB_DEBUG */
+# define send_code(s, c, tree) \
+ { if (z_verbose>2) fprintf(stderr,"\ncd %3d ",(c)); \
+ send_bits(s, tree[c].Code, tree[c].Len); }
+#endif
+
+/* ===========================================================================
+ * Output a short LSB first on the stream.
+ * IN assertion: there is enough room in pendingBuf.
+ */
+#define put_short(s, w) { \
+ put_byte(s, (uch)((w) & 0xff)); \
+ put_byte(s, (uch)((ush)(w) >> 8)); \
+}
+
+/* ===========================================================================
+ * Send a value on a given number of bits.
+ * IN assertion: length <= 16 and value fits in length bits.
+ */
+#ifdef ZLIB_DEBUG
+local void send_bits OF((deflate_state *s, int value, int length));
+
+local void send_bits(s, value, length)
+ deflate_state *s;
+ int value; /* value to send */
+ int length; /* number of bits */
+{
+ Tracevv((stderr," l %2d v %4x ", length, value));
+ Assert(length > 0 && length <= 15, "invalid length");
+ s->bits_sent += (ulg)length;
+
+ /* If not enough room in bi_buf, use (valid) bits from bi_buf and
+ * (16 - bi_valid) bits from value, leaving (width - (16-bi_valid))
+ * unused bits in value.
+ */
+ if (s->bi_valid > (int)Buf_size - length) {
+ s->bi_buf |= (ush)value << s->bi_valid;
+ put_short(s, s->bi_buf);
+ s->bi_buf = (ush)value >> (Buf_size - s->bi_valid);
+ s->bi_valid += length - Buf_size;
+ } else {
+ s->bi_buf |= (ush)value << s->bi_valid;
+ s->bi_valid += length;
+ }
+}
+#else /* !ZLIB_DEBUG */
+
+#define send_bits(s, value, length) \
+{ int len = length;\
+ if (s->bi_valid > (int)Buf_size - len) {\
+ int val = (int)value;\
+ s->bi_buf |= (ush)val << s->bi_valid;\
+ put_short(s, s->bi_buf);\
+ s->bi_buf = (ush)val >> (Buf_size - s->bi_valid);\
+ s->bi_valid += len - Buf_size;\
+ } else {\
+ s->bi_buf |= (ush)(value) << s->bi_valid;\
+ s->bi_valid += len;\
+ }\
+}
+#endif /* ZLIB_DEBUG */
+
+
+/* the arguments must not have side effects */
+
+/* ===========================================================================
+ * Initialize the various 'constant' tables.
+ */
+local void tr_static_init()
+{
+#if defined(GEN_TREES_H) || !defined(STDC)
+ static int static_init_done = 0;
+ int n; /* iterates over tree elements */
+ int bits; /* bit counter */
+ int length; /* length value */
+ int code; /* code value */
+ int dist; /* distance index */
+ ush bl_count[MAX_BITS+1];
+ /* number of codes at each bit length for an optimal tree */
+
+ if (static_init_done) return;
+
+ /* For some embedded targets, global variables are not initialized: */
+#ifdef NO_INIT_GLOBAL_POINTERS
+ static_l_desc.static_tree = static_ltree;
+ static_l_desc.extra_bits = extra_lbits;
+ static_d_desc.static_tree = static_dtree;
+ static_d_desc.extra_bits = extra_dbits;
+ static_bl_desc.extra_bits = extra_blbits;
+#endif
+
+ /* Initialize the mapping length (0..255) -> length code (0..28) */
+ length = 0;
+ for (code = 0; code < LENGTH_CODES-1; code++) {
+ base_length[code] = length;
+ for (n = 0; n < (1<<extra_lbits[code]); n++) {
+ _length_code[length++] = (uch)code;
+ }
+ }
+ Assert (length == 256, "tr_static_init: length != 256");
+ /* Note that the length 255 (match length 258) can be represented
+ * in two different ways: code 284 + 5 bits or code 285, so we
+ * overwrite length_code[255] to use the best encoding:
+ */
+ _length_code[length-1] = (uch)code;
+
+ /* Initialize the mapping dist (0..32K) -> dist code (0..29) */
+ dist = 0;
+ for (code = 0 ; code < 16; code++) {
+ base_dist[code] = dist;
+ for (n = 0; n < (1<<extra_dbits[code]); n++) {
+ _dist_code[dist++] = (uch)code;
+ }
+ }
+ Assert (dist == 256, "tr_static_init: dist != 256");
+ dist >>= 7; /* from now on, all distances are divided by 128 */
+ for ( ; code < D_CODES; code++) {
+ base_dist[code] = dist << 7;
+ for (n = 0; n < (1<<(extra_dbits[code]-7)); n++) {
+ _dist_code[256 + dist++] = (uch)code;
+ }
+ }
+ Assert (dist == 256, "tr_static_init: 256+dist != 512");
+
+ /* Construct the codes of the static literal tree */
+ for (bits = 0; bits <= MAX_BITS; bits++) bl_count[bits] = 0;
+ n = 0;
+ while (n <= 143) static_ltree[n++].Len = 8, bl_count[8]++;
+ while (n <= 255) static_ltree[n++].Len = 9, bl_count[9]++;
+ while (n <= 279) static_ltree[n++].Len = 7, bl_count[7]++;
+ while (n <= 287) static_ltree[n++].Len = 8, bl_count[8]++;
+ /* Codes 286 and 287 do not exist, but we must include them in the
+ * tree construction to get a canonical Huffman tree (longest code
+ * all ones)
+ */
+ gen_codes((ct_data *)static_ltree, L_CODES+1, bl_count);
+
+ /* The static distance tree is trivial: */
+ for (n = 0; n < D_CODES; n++) {
+ static_dtree[n].Len = 5;
+ static_dtree[n].Code = bi_reverse((unsigned)n, 5);
+ }
+ static_init_done = 1;
+
+# ifdef GEN_TREES_H
+ gen_trees_header();
+# endif
+#endif /* defined(GEN_TREES_H) || !defined(STDC) */
+}
+
+/* ===========================================================================
+ * Genererate the file trees.h describing the static trees.
+ */
+#ifdef GEN_TREES_H
+# ifndef ZLIB_DEBUG
+# include <stdio.h>
+# endif
+
+# define SEPARATOR(i, last, width) \
+ ((i) == (last)? "\n};\n\n" : \
+ ((i) % (width) == (width)-1 ? ",\n" : ", "))
+
+void gen_trees_header()
+{
+ FILE *header = fopen("trees.h", "w");
+ int i;
+
+ Assert (header != NULL, "Can't open trees.h");
+ fprintf(header,
+ "/* header created automatically with -DGEN_TREES_H */\n\n");
+
+ fprintf(header, "local const ct_data static_ltree[L_CODES+2] = {\n");
+ for (i = 0; i < L_CODES+2; i++) {
+ fprintf(header, "{{%3u},{%3u}}%s", static_ltree[i].Code,
+ static_ltree[i].Len, SEPARATOR(i, L_CODES+1, 5));
+ }
+
+ fprintf(header, "local const ct_data static_dtree[D_CODES] = {\n");
+ for (i = 0; i < D_CODES; i++) {
+ fprintf(header, "{{%2u},{%2u}}%s", static_dtree[i].Code,
+ static_dtree[i].Len, SEPARATOR(i, D_CODES-1, 5));
+ }
+
+ fprintf(header, "const uch ZLIB_INTERNAL _dist_code[DIST_CODE_LEN] = {\n");
+ for (i = 0; i < DIST_CODE_LEN; i++) {
+ fprintf(header, "%2u%s", _dist_code[i],
+ SEPARATOR(i, DIST_CODE_LEN-1, 20));
+ }
+
+ fprintf(header,
+ "const uch ZLIB_INTERNAL _length_code[MAX_MATCH-MIN_MATCH+1]= {\n");
+ for (i = 0; i < MAX_MATCH-MIN_MATCH+1; i++) {
+ fprintf(header, "%2u%s", _length_code[i],
+ SEPARATOR(i, MAX_MATCH-MIN_MATCH, 20));
+ }
+
+ fprintf(header, "local const int base_length[LENGTH_CODES] = {\n");
+ for (i = 0; i < LENGTH_CODES; i++) {
+ fprintf(header, "%1u%s", base_length[i],
+ SEPARATOR(i, LENGTH_CODES-1, 20));
+ }
+
+ fprintf(header, "local const int base_dist[D_CODES] = {\n");
+ for (i = 0; i < D_CODES; i++) {
+ fprintf(header, "%5u%s", base_dist[i],
+ SEPARATOR(i, D_CODES-1, 10));
+ }
+
+ fclose(header);
+}
+#endif /* GEN_TREES_H */
+
+/* ===========================================================================
+ * Initialize the tree data structures for a new zlib stream.
+ */
+void ZLIB_INTERNAL _tr_init(s)
+ deflate_state *s;
+{
+ tr_static_init();
+
+ s->l_desc.dyn_tree = s->dyn_ltree;
+ s->l_desc.stat_desc = &static_l_desc;
+
+ s->d_desc.dyn_tree = s->dyn_dtree;
+ s->d_desc.stat_desc = &static_d_desc;
+
+ s->bl_desc.dyn_tree = s->bl_tree;
+ s->bl_desc.stat_desc = &static_bl_desc;
+
+ s->bi_buf = 0;
+ s->bi_valid = 0;
+#ifdef ZLIB_DEBUG
+ s->compressed_len = 0L;
+ s->bits_sent = 0L;
+#endif
+
+ /* Initialize the first block of the first file: */
+ init_block(s);
+}
+
+/* ===========================================================================
+ * Initialize a new block.
+ */
+local void init_block(s)
+ deflate_state *s;
+{
+ int n; /* iterates over tree elements */
+
+ /* Initialize the trees. */
+ for (n = 0; n < L_CODES; n++) s->dyn_ltree[n].Freq = 0;
+ for (n = 0; n < D_CODES; n++) s->dyn_dtree[n].Freq = 0;
+ for (n = 0; n < BL_CODES; n++) s->bl_tree[n].Freq = 0;
+
+ s->dyn_ltree[END_BLOCK].Freq = 1;
+ s->opt_len = s->static_len = 0L;
+ s->last_lit = s->matches = 0;
+}
+
+#define SMALLEST 1
+/* Index within the heap array of least frequent node in the Huffman tree */
+
+
+/* ===========================================================================
+ * Remove the smallest element from the heap and recreate the heap with
+ * one less element. Updates heap and heap_len.
+ */
+#define pqremove(s, tree, top) \
+{\
+ top = s->heap[SMALLEST]; \
+ s->heap[SMALLEST] = s->heap[s->heap_len--]; \
+ pqdownheap(s, tree, SMALLEST); \
+}
+
+/* ===========================================================================
+ * Compares to subtrees, using the tree depth as tie breaker when
+ * the subtrees have equal frequency. This minimizes the worst case length.
+ */
+#define smaller(tree, n, m, depth) \
+ (tree[n].Freq < tree[m].Freq || \
+ (tree[n].Freq == tree[m].Freq && depth[n] <= depth[m]))
+
+/* ===========================================================================
+ * Restore the heap property by moving down the tree starting at node k,
+ * exchanging a node with the smallest of its two sons if necessary, stopping
+ * when the heap property is re-established (each father smaller than its
+ * two sons).
+ */
+local void pqdownheap(s, tree, k)
+ deflate_state *s;
+ ct_data *tree; /* the tree to restore */
+ int k; /* node to move down */
+{
+ int v = s->heap[k];
+ int j = k << 1; /* left son of k */
+ while (j <= s->heap_len) {
+ /* Set j to the smallest of the two sons: */
+ if (j < s->heap_len &&
+ smaller(tree, s->heap[j+1], s->heap[j], s->depth)) {
+ j++;
+ }
+ /* Exit if v is smaller than both sons */
+ if (smaller(tree, v, s->heap[j], s->depth)) break;
+
+ /* Exchange v with the smallest son */
+ s->heap[k] = s->heap[j]; k = j;
+
+ /* And continue down the tree, setting j to the left son of k */
+ j <<= 1;
+ }
+ s->heap[k] = v;
+}
+
+/* ===========================================================================
+ * Compute the optimal bit lengths for a tree and update the total bit length
+ * for the current block.
+ * IN assertion: the fields freq and dad are set, heap[heap_max] and
+ * above are the tree nodes sorted by increasing frequency.
+ * OUT assertions: the field len is set to the optimal bit length, the
+ * array bl_count contains the frequencies for each bit length.
+ * The length opt_len is updated; static_len is also updated if stree is
+ * not null.
+ */
+local void gen_bitlen(s, desc)
+ deflate_state *s;
+ tree_desc *desc; /* the tree descriptor */
+{
+ ct_data *tree = desc->dyn_tree;
+ int max_code = desc->max_code;
+ const ct_data *stree = desc->stat_desc->static_tree;
+ const intf *extra = desc->stat_desc->extra_bits;
+ int base = desc->stat_desc->extra_base;
+ int max_length = desc->stat_desc->max_length;
+ int h; /* heap index */
+ int n, m; /* iterate over the tree elements */
+ int bits; /* bit length */
+ int xbits; /* extra bits */
+ ush f; /* frequency */
+ int overflow = 0; /* number of elements with bit length too large */
+
+ for (bits = 0; bits <= MAX_BITS; bits++) s->bl_count[bits] = 0;
+
+ /* In a first pass, compute the optimal bit lengths (which may
+ * overflow in the case of the bit length tree).
+ */
+ tree[s->heap[s->heap_max]].Len = 0; /* root of the heap */
+
+ for (h = s->heap_max+1; h < HEAP_SIZE; h++) {
+ n = s->heap[h];
+ bits = tree[tree[n].Dad].Len + 1;
+ if (bits > max_length) bits = max_length, overflow++;
+ tree[n].Len = (ush)bits;
+ /* We overwrite tree[n].Dad which is no longer needed */
+
+ if (n > max_code) continue; /* not a leaf node */
+
+ s->bl_count[bits]++;
+ xbits = 0;
+ if (n >= base) xbits = extra[n-base];
+ f = tree[n].Freq;
+ s->opt_len += (ulg)f * (unsigned)(bits + xbits);
+ if (stree) s->static_len += (ulg)f * (unsigned)(stree[n].Len + xbits);
+ }
+ if (overflow == 0) return;
+
+ Tracev((stderr,"\nbit length overflow\n"));
+ /* This happens for example on obj2 and pic of the Calgary corpus */
+
+ /* Find the first bit length which could increase: */
+ do {
+ bits = max_length-1;
+ while (s->bl_count[bits] == 0) bits--;
+ s->bl_count[bits]--; /* move one leaf down the tree */
+ s->bl_count[bits+1] += 2; /* move one overflow item as its brother */
+ s->bl_count[max_length]--;
+ /* The brother of the overflow item also moves one step up,
+ * but this does not affect bl_count[max_length]
+ */
+ overflow -= 2;
+ } while (overflow > 0);
+
+ /* Now recompute all bit lengths, scanning in increasing frequency.
+ * h is still equal to HEAP_SIZE. (It is simpler to reconstruct all
+ * lengths instead of fixing only the wrong ones. This idea is taken
+ * from 'ar' written by Haruhiko Okumura.)
+ */
+ for (bits = max_length; bits != 0; bits--) {
+ n = s->bl_count[bits];
+ while (n != 0) {
+ m = s->heap[--h];
+ if (m > max_code) continue;
+ if ((unsigned) tree[m].Len != (unsigned) bits) {
+ Tracev((stderr,"code %d bits %d->%d\n", m, tree[m].Len, bits));
+ s->opt_len += ((ulg)bits - tree[m].Len) * tree[m].Freq;
+ tree[m].Len = (ush)bits;
+ }
+ n--;
+ }
+ }
+}
+
+/* ===========================================================================
+ * Generate the codes for a given tree and bit counts (which need not be
+ * optimal).
+ * IN assertion: the array bl_count contains the bit length statistics for
+ * the given tree and the field len is set for all tree elements.
+ * OUT assertion: the field code is set for all tree elements of non
+ * zero code length.
+ */
+local void gen_codes (tree, max_code, bl_count)
+ ct_data *tree; /* the tree to decorate */
+ int max_code; /* largest code with non zero frequency */
+ ushf *bl_count; /* number of codes at each bit length */
+{
+ ush next_code[MAX_BITS+1]; /* next code value for each bit length */
+ unsigned code = 0; /* running code value */
+ int bits; /* bit index */
+ int n; /* code index */
+
+ /* The distribution counts are first used to generate the code values
+ * without bit reversal.
+ */
+ for (bits = 1; bits <= MAX_BITS; bits++) {
+ code = (code + bl_count[bits-1]) << 1;
+ next_code[bits] = (ush)code;
+ }
+ /* Check that the bit counts in bl_count are consistent. The last code
+ * must be all ones.
+ */
+ Assert (code + bl_count[MAX_BITS]-1 == (1<<MAX_BITS)-1,
+ "inconsistent bit counts");
+ Tracev((stderr,"\ngen_codes: max_code %d ", max_code));
+
+ for (n = 0; n <= max_code; n++) {
+ int len = tree[n].Len;
+ if (len == 0) continue;
+ /* Now reverse the bits */
+ tree[n].Code = (ush)bi_reverse(next_code[len]++, len);
+
+ Tracecv(tree != static_ltree, (stderr,"\nn %3d %c l %2d c %4x (%x) ",
+ n, (isgraph(n) ? n : ' '), len, tree[n].Code, next_code[len]-1));
+ }
+}
+
+/* ===========================================================================
+ * Construct one Huffman tree and assigns the code bit strings and lengths.
+ * Update the total bit length for the current block.
+ * IN assertion: the field freq is set for all tree elements.
+ * OUT assertions: the fields len and code are set to the optimal bit length
+ * and corresponding code. The length opt_len is updated; static_len is
+ * also updated if stree is not null. The field max_code is set.
+ */
+local void build_tree(s, desc)
+ deflate_state *s;
+ tree_desc *desc; /* the tree descriptor */
+{
+ ct_data *tree = desc->dyn_tree;
+ const ct_data *stree = desc->stat_desc->static_tree;
+ int elems = desc->stat_desc->elems;
+ int n, m; /* iterate over heap elements */
+ int max_code = -1; /* largest code with non zero frequency */
+ int node; /* new node being created */
+
+ /* Construct the initial heap, with least frequent element in
+ * heap[SMALLEST]. The sons of heap[n] are heap[2*n] and heap[2*n+1].
+ * heap[0] is not used.
+ */
+ s->heap_len = 0, s->heap_max = HEAP_SIZE;
+
+ for (n = 0; n < elems; n++) {
+ if (tree[n].Freq != 0) {
+ s->heap[++(s->heap_len)] = max_code = n;
+ s->depth[n] = 0;
+ } else {
+ tree[n].Len = 0;
+ }
+ }
+
+ /* The pkzip format requires that at least one distance code exists,
+ * and that at least one bit should be sent even if there is only one
+ * possible code. So to avoid special checks later on we force at least
+ * two codes of non zero frequency.
+ */
+ while (s->heap_len < 2) {
+ node = s->heap[++(s->heap_len)] = (max_code < 2 ? ++max_code : 0);
+ tree[node].Freq = 1;
+ s->depth[node] = 0;
+ s->opt_len--; if (stree) s->static_len -= stree[node].Len;
+ /* node is 0 or 1 so it does not have extra bits */
+ }
+ desc->max_code = max_code;
+
+ /* The elements heap[heap_len/2+1 .. heap_len] are leaves of the tree,
+ * establish sub-heaps of increasing lengths:
+ */
+ for (n = s->heap_len/2; n >= 1; n--) pqdownheap(s, tree, n);
+
+ /* Construct the Huffman tree by repeatedly combining the least two
+ * frequent nodes.
+ */
+ node = elems; /* next internal node of the tree */
+ do {
+ pqremove(s, tree, n); /* n = node of least frequency */
+ m = s->heap[SMALLEST]; /* m = node of next least frequency */
+
+ s->heap[--(s->heap_max)] = n; /* keep the nodes sorted by frequency */
+ s->heap[--(s->heap_max)] = m;
+
+ /* Create a new node father of n and m */
+ tree[node].Freq = tree[n].Freq + tree[m].Freq;
+ s->depth[node] = (uch)((s->depth[n] >= s->depth[m] ?
+ s->depth[n] : s->depth[m]) + 1);
+ tree[n].Dad = tree[m].Dad = (ush)node;
+#ifdef DUMP_BL_TREE
+ if (tree == s->bl_tree) {
+ fprintf(stderr,"\nnode %d(%d), sons %d(%d) %d(%d)",
+ node, tree[node].Freq, n, tree[n].Freq, m, tree[m].Freq);
+ }
+#endif
+ /* and insert the new node in the heap */
+ s->heap[SMALLEST] = node++;
+ pqdownheap(s, tree, SMALLEST);
+
+ } while (s->heap_len >= 2);
+
+ s->heap[--(s->heap_max)] = s->heap[SMALLEST];
+
+ /* At this point, the fields freq and dad are set. We can now
+ * generate the bit lengths.
+ */
+ gen_bitlen(s, (tree_desc *)desc);
+
+ /* The field len is now set, we can generate the bit codes */
+ gen_codes ((ct_data *)tree, max_code, s->bl_count);
+}
+
+/* ===========================================================================
+ * Scan a literal or distance tree to determine the frequencies of the codes
+ * in the bit length tree.
+ */
+local void scan_tree (s, tree, max_code)
+ deflate_state *s;
+ ct_data *tree; /* the tree to be scanned */
+ int max_code; /* and its largest code of non zero frequency */
+{
+ int n; /* iterates over all tree elements */
+ int prevlen = -1; /* last emitted length */
+ int curlen; /* length of current code */
+ int nextlen = tree[0].Len; /* length of next code */
+ int count = 0; /* repeat count of the current code */
+ int max_count = 7; /* max repeat count */
+ int min_count = 4; /* min repeat count */
+
+ if (nextlen == 0) max_count = 138, min_count = 3;
+ tree[max_code+1].Len = (ush)0xffff; /* guard */
+
+ for (n = 0; n <= max_code; n++) {
+ curlen = nextlen; nextlen = tree[n+1].Len;
+ if (++count < max_count && curlen == nextlen) {
+ continue;
+ } else if (count < min_count) {
+ s->bl_tree[curlen].Freq += count;
+ } else if (curlen != 0) {
+ if (curlen != prevlen) s->bl_tree[curlen].Freq++;
+ s->bl_tree[REP_3_6].Freq++;
+ } else if (count <= 10) {
+ s->bl_tree[REPZ_3_10].Freq++;
+ } else {
+ s->bl_tree[REPZ_11_138].Freq++;
+ }
+ count = 0; prevlen = curlen;
+ if (nextlen == 0) {
+ max_count = 138, min_count = 3;
+ } else if (curlen == nextlen) {
+ max_count = 6, min_count = 3;
+ } else {
+ max_count = 7, min_count = 4;
+ }
+ }
+}
+
+/* ===========================================================================
+ * Send a literal or distance tree in compressed form, using the codes in
+ * bl_tree.
+ */
+local void send_tree (s, tree, max_code)
+ deflate_state *s;
+ ct_data *tree; /* the tree to be scanned */
+ int max_code; /* and its largest code of non zero frequency */
+{
+ int n; /* iterates over all tree elements */
+ int prevlen = -1; /* last emitted length */
+ int curlen; /* length of current code */
+ int nextlen = tree[0].Len; /* length of next code */
+ int count = 0; /* repeat count of the current code */
+ int max_count = 7; /* max repeat count */
+ int min_count = 4; /* min repeat count */
+
+ /* tree[max_code+1].Len = -1; */ /* guard already set */
+ if (nextlen == 0) max_count = 138, min_count = 3;
+
+ for (n = 0; n <= max_code; n++) {
+ curlen = nextlen; nextlen = tree[n+1].Len;
+ if (++count < max_count && curlen == nextlen) {
+ continue;
+ } else if (count < min_count) {
+ do { send_code(s, curlen, s->bl_tree); } while (--count != 0);
+
+ } else if (curlen != 0) {
+ if (curlen != prevlen) {
+ send_code(s, curlen, s->bl_tree); count--;
+ }
+ Assert(count >= 3 && count <= 6, " 3_6?");
+ send_code(s, REP_3_6, s->bl_tree); send_bits(s, count-3, 2);
+
+ } else if (count <= 10) {
+ send_code(s, REPZ_3_10, s->bl_tree); send_bits(s, count-3, 3);
+
+ } else {
+ send_code(s, REPZ_11_138, s->bl_tree); send_bits(s, count-11, 7);
+ }
+ count = 0; prevlen = curlen;
+ if (nextlen == 0) {
+ max_count = 138, min_count = 3;
+ } else if (curlen == nextlen) {
+ max_count = 6, min_count = 3;
+ } else {
+ max_count = 7, min_count = 4;
+ }
+ }
+}
+
+/* ===========================================================================
+ * Construct the Huffman tree for the bit lengths and return the index in
+ * bl_order of the last bit length code to send.
+ */
+local int build_bl_tree(s)
+ deflate_state *s;
+{
+ int max_blindex; /* index of last bit length code of non zero freq */
+
+ /* Determine the bit length frequencies for literal and distance trees */
+ scan_tree(s, (ct_data *)s->dyn_ltree, s->l_desc.max_code);
+ scan_tree(s, (ct_data *)s->dyn_dtree, s->d_desc.max_code);
+
+ /* Build the bit length tree: */
+ build_tree(s, (tree_desc *)(&(s->bl_desc)));
+ /* opt_len now includes the length of the tree representations, except
+ * the lengths of the bit lengths codes and the 5+5+4 bits for the counts.
+ */
+
+ /* Determine the number of bit length codes to send. The pkzip format
+ * requires that at least 4 bit length codes be sent. (appnote.txt says
+ * 3 but the actual value used is 4.)
+ */
+ for (max_blindex = BL_CODES-1; max_blindex >= 3; max_blindex--) {
+ if (s->bl_tree[bl_order[max_blindex]].Len != 0) break;
+ }
+ /* Update opt_len to include the bit length tree and counts */
+ s->opt_len += 3*((ulg)max_blindex+1) + 5+5+4;
+ Tracev((stderr, "\ndyn trees: dyn %ld, stat %ld",
+ s->opt_len, s->static_len));
+
+ return max_blindex;
+}
+
+/* ===========================================================================
+ * Send the header for a block using dynamic Huffman trees: the counts, the
+ * lengths of the bit length codes, the literal tree and the distance tree.
+ * IN assertion: lcodes >= 257, dcodes >= 1, blcodes >= 4.
+ */
+local void send_all_trees(s, lcodes, dcodes, blcodes)
+ deflate_state *s;
+ int lcodes, dcodes, blcodes; /* number of codes for each tree */
+{
+ int rank; /* index in bl_order */
+
+ Assert (lcodes >= 257 && dcodes >= 1 && blcodes >= 4, "not enough codes");
+ Assert (lcodes <= L_CODES && dcodes <= D_CODES && blcodes <= BL_CODES,
+ "too many codes");
+ Tracev((stderr, "\nbl counts: "));
+ send_bits(s, lcodes-257, 5); /* not +255 as stated in appnote.txt */
+ send_bits(s, dcodes-1, 5);
+ send_bits(s, blcodes-4, 4); /* not -3 as stated in appnote.txt */
+ for (rank = 0; rank < blcodes; rank++) {
+ Tracev((stderr, "\nbl code %2d ", bl_order[rank]));
+ send_bits(s, s->bl_tree[bl_order[rank]].Len, 3);
+ }
+ Tracev((stderr, "\nbl tree: sent %ld", s->bits_sent));
+
+ send_tree(s, (ct_data *)s->dyn_ltree, lcodes-1); /* literal tree */
+ Tracev((stderr, "\nlit tree: sent %ld", s->bits_sent));
+
+ send_tree(s, (ct_data *)s->dyn_dtree, dcodes-1); /* distance tree */
+ Tracev((stderr, "\ndist tree: sent %ld", s->bits_sent));
+}
+
+/* ===========================================================================
+ * Send a stored block
+ */
+void ZLIB_INTERNAL _tr_stored_block(s, buf, stored_len, last)
+ deflate_state *s;
+ charf *buf; /* input block */
+ ulg stored_len; /* length of input block */
+ int last; /* one if this is the last block for a file */
+{
+ send_bits(s, (STORED_BLOCK<<1)+last, 3); /* send block type */
+ bi_windup(s); /* align on byte boundary */
+ put_short(s, (ush)stored_len);
+ put_short(s, (ush)~stored_len);
+ zmemcpy(s->pending_buf + s->pending, (Bytef *)buf, stored_len);
+ s->pending += stored_len;
+#ifdef ZLIB_DEBUG
+ s->compressed_len = (s->compressed_len + 3 + 7) & (ulg)~7L;
+ s->compressed_len += (stored_len + 4) << 3;
+ s->bits_sent += 2*16;
+ s->bits_sent += stored_len<<3;
+#endif
+}
+
+/* ===========================================================================
+ * Flush the bits in the bit buffer to pending output (leaves at most 7 bits)
+ */
+void ZLIB_INTERNAL _tr_flush_bits(s)
+ deflate_state *s;
+{
+ bi_flush(s);
+}
+
+/* ===========================================================================
+ * Send one empty static block to give enough lookahead for inflate.
+ * This takes 10 bits, of which 7 may remain in the bit buffer.
+ */
+void ZLIB_INTERNAL _tr_align(s)
+ deflate_state *s;
+{
+ send_bits(s, STATIC_TREES<<1, 3);
+ send_code(s, END_BLOCK, static_ltree);
+#ifdef ZLIB_DEBUG
+ s->compressed_len += 10L; /* 3 for block type, 7 for EOB */
+#endif
+ bi_flush(s);
+}
+
+/* ===========================================================================
+ * Determine the best encoding for the current block: dynamic trees, static
+ * trees or store, and write out the encoded block.
+ */
+void ZLIB_INTERNAL _tr_flush_block(s, buf, stored_len, last)
+ deflate_state *s;
+ charf *buf; /* input block, or NULL if too old */
+ ulg stored_len; /* length of input block */
+ int last; /* one if this is the last block for a file */
+{
+ ulg opt_lenb, static_lenb; /* opt_len and static_len in bytes */
+ int max_blindex = 0; /* index of last bit length code of non zero freq */
+
+ /* Build the Huffman trees unless a stored block is forced */
+ if (s->level > 0) {
+
+ /* Check if the file is binary or text */
+ if (s->strm->data_type == Z_UNKNOWN)
+ s->strm->data_type = detect_data_type(s);
+
+ /* Construct the literal and distance trees */
+ build_tree(s, (tree_desc *)(&(s->l_desc)));
+ Tracev((stderr, "\nlit data: dyn %ld, stat %ld", s->opt_len,
+ s->static_len));
+
+ build_tree(s, (tree_desc *)(&(s->d_desc)));
+ Tracev((stderr, "\ndist data: dyn %ld, stat %ld", s->opt_len,
+ s->static_len));
+ /* At this point, opt_len and static_len are the total bit lengths of
+ * the compressed block data, excluding the tree representations.
+ */
+
+ /* Build the bit length tree for the above two trees, and get the index
+ * in bl_order of the last bit length code to send.
+ */
+ max_blindex = build_bl_tree(s);
+
+ /* Determine the best encoding. Compute the block lengths in bytes. */
+ opt_lenb = (s->opt_len+3+7)>>3;
+ static_lenb = (s->static_len+3+7)>>3;
+
+ Tracev((stderr, "\nopt %lu(%lu) stat %lu(%lu) stored %lu lit %u ",
+ opt_lenb, s->opt_len, static_lenb, s->static_len, stored_len,
+ s->last_lit));
+
+ if (static_lenb <= opt_lenb) opt_lenb = static_lenb;
+
+ } else {
+ Assert(buf != (char*)0, "lost buf");
+ opt_lenb = static_lenb = stored_len + 5; /* force a stored block */
+ }
+
+#ifdef FORCE_STORED
+ if (buf != (char*)0) { /* force stored block */
+#else
+ if (stored_len+4 <= opt_lenb && buf != (char*)0) {
+ /* 4: two words for the lengths */
+#endif
+ /* The test buf != NULL is only necessary if LIT_BUFSIZE > WSIZE.
+ * Otherwise we can't have processed more than WSIZE input bytes since
+ * the last block flush, because compression would have been
+ * successful. If LIT_BUFSIZE <= WSIZE, it is never too late to
+ * transform a block into a stored block.
+ */
+ _tr_stored_block(s, buf, stored_len, last);
+
+#ifdef FORCE_STATIC
+ } else if (static_lenb >= 0) { /* force static trees */
+#else
+ } else if (s->strategy == Z_FIXED || static_lenb == opt_lenb) {
+#endif
+ send_bits(s, (STATIC_TREES<<1)+last, 3);
+ compress_block(s, (const ct_data *)static_ltree,
+ (const ct_data *)static_dtree);
+#ifdef ZLIB_DEBUG
+ s->compressed_len += 3 + s->static_len;
+#endif
+ } else {
+ send_bits(s, (DYN_TREES<<1)+last, 3);
+ send_all_trees(s, s->l_desc.max_code+1, s->d_desc.max_code+1,
+ max_blindex+1);
+ compress_block(s, (const ct_data *)s->dyn_ltree,
+ (const ct_data *)s->dyn_dtree);
+#ifdef ZLIB_DEBUG
+ s->compressed_len += 3 + s->opt_len;
+#endif
+ }
+ Assert (s->compressed_len == s->bits_sent, "bad compressed size");
+ /* The above check is made mod 2^32, for files larger than 512 MB
+ * and uLong implemented on 32 bits.
+ */
+ init_block(s);
+
+ if (last) {
+ bi_windup(s);
+#ifdef ZLIB_DEBUG
+ s->compressed_len += 7; /* align on byte boundary */
+#endif
+ }
+ Tracev((stderr,"\ncomprlen %lu(%lu) ", s->compressed_len>>3,
+ s->compressed_len-7*last));
+}
+
+/* ===========================================================================
+ * Save the match info and tally the frequency counts. Return true if
+ * the current block must be flushed.
+ */
+int ZLIB_INTERNAL _tr_tally (s, dist, lc)
+ deflate_state *s;
+ unsigned dist; /* distance of matched string */
+ unsigned lc; /* match length-MIN_MATCH or unmatched char (if dist==0) */
+{
+ s->d_buf[s->last_lit] = (ush)dist;
+ s->l_buf[s->last_lit++] = (uch)lc;
+ if (dist == 0) {
+ /* lc is the unmatched char */
+ s->dyn_ltree[lc].Freq++;
+ } else {
+ s->matches++;
+ /* Here, lc is the match length - MIN_MATCH */
+ dist--; /* dist = match distance - 1 */
+ Assert((ush)dist < (ush)MAX_DIST(s) &&
+ (ush)lc <= (ush)(MAX_MATCH-MIN_MATCH) &&
+ (ush)d_code(dist) < (ush)D_CODES, "_tr_tally: bad match");
+
+ s->dyn_ltree[_length_code[lc]+LITERALS+1].Freq++;
+ s->dyn_dtree[d_code(dist)].Freq++;
+ }
+
+#ifdef TRUNCATE_BLOCK
+ /* Try to guess if it is profitable to stop the current block here */
+ if ((s->last_lit & 0x1fff) == 0 && s->level > 2) {
+ /* Compute an upper bound for the compressed length */
+ ulg out_length = (ulg)s->last_lit*8L;
+ ulg in_length = (ulg)((long)s->strstart - s->block_start);
+ int dcode;
+ for (dcode = 0; dcode < D_CODES; dcode++) {
+ out_length += (ulg)s->dyn_dtree[dcode].Freq *
+ (5L+extra_dbits[dcode]);
+ }
+ out_length >>= 3;
+ Tracev((stderr,"\nlast_lit %u, in %ld, out ~%ld(%ld%%) ",
+ s->last_lit, in_length, out_length,
+ 100L - out_length*100L/in_length));
+ if (s->matches < s->last_lit/2 && out_length < in_length/2) return 1;
+ }
+#endif
+ return (s->last_lit == s->lit_bufsize-1);
+ /* We avoid equality with lit_bufsize because of wraparound at 64K
+ * on 16 bit machines and because stored blocks are restricted to
+ * 64K-1 bytes.
+ */
+}
+
+/* ===========================================================================
+ * Send the block data compressed using the given Huffman trees
+ */
+local void compress_block(s, ltree, dtree)
+ deflate_state *s;
+ const ct_data *ltree; /* literal tree */
+ const ct_data *dtree; /* distance tree */
+{
+ unsigned dist; /* distance of matched string */
+ int lc; /* match length or unmatched char (if dist == 0) */
+ unsigned lx = 0; /* running index in l_buf */
+ unsigned code; /* the code to send */
+ int extra; /* number of extra bits to send */
+
+ if (s->last_lit != 0) do {
+ dist = s->d_buf[lx];
+ lc = s->l_buf[lx++];
+ if (dist == 0) {
+ send_code(s, lc, ltree); /* send a literal byte */
+ Tracecv(isgraph(lc), (stderr," '%c' ", lc));
+ } else {
+ /* Here, lc is the match length - MIN_MATCH */
+ code = _length_code[lc];
+ send_code(s, code+LITERALS+1, ltree); /* send the length code */
+ extra = extra_lbits[code];
+ if (extra != 0) {
+ lc -= base_length[code];
+ send_bits(s, lc, extra); /* send the extra length bits */
+ }
+ dist--; /* dist is now the match distance - 1 */
+ code = d_code(dist);
+ Assert (code < D_CODES, "bad d_code");
+
+ send_code(s, code, dtree); /* send the distance code */
+ extra = extra_dbits[code];
+ if (extra != 0) {
+ dist -= (unsigned)base_dist[code];
+ send_bits(s, dist, extra); /* send the extra distance bits */
+ }
+ } /* literal or match pair ? */
+
+ /* Check that the overlay between pending_buf and d_buf+l_buf is ok: */
+ Assert((uInt)(s->pending) < s->lit_bufsize + 2*lx,
+ "pendingBuf overflow");
+
+ } while (lx < s->last_lit);
+
+ send_code(s, END_BLOCK, ltree);
+}
+
+/* ===========================================================================
+ * Check if the data type is TEXT or BINARY, using the following algorithm:
+ * - TEXT if the two conditions below are satisfied:
+ * a) There are no non-portable control characters belonging to the
+ * "black list" (0..6, 14..25, 28..31).
+ * b) There is at least one printable character belonging to the
+ * "white list" (9 {TAB}, 10 {LF}, 13 {CR}, 32..255).
+ * - BINARY otherwise.
+ * - The following partially-portable control characters form a
+ * "gray list" that is ignored in this detection algorithm:
+ * (7 {BEL}, 8 {BS}, 11 {VT}, 12 {FF}, 26 {SUB}, 27 {ESC}).
+ * IN assertion: the fields Freq of dyn_ltree are set.
+ */
+local int detect_data_type(s)
+ deflate_state *s;
+{
+ /* black_mask is the bit mask of black-listed bytes
+ * set bits 0..6, 14..25, and 28..31
+ * 0xf3ffc07f = binary 11110011111111111100000001111111
+ */
+ unsigned long black_mask = 0xf3ffc07fUL;
+ int n;
+
+ /* Check for non-textual ("black-listed") bytes. */
+ for (n = 0; n <= 31; n++, black_mask >>= 1)
+ if ((black_mask & 1) && (s->dyn_ltree[n].Freq != 0))
+ return Z_BINARY;
+
+ /* Check for textual ("white-listed") bytes. */
+ if (s->dyn_ltree[9].Freq != 0 || s->dyn_ltree[10].Freq != 0
+ || s->dyn_ltree[13].Freq != 0)
+ return Z_TEXT;
+ for (n = 32; n < LITERALS; n++)
+ if (s->dyn_ltree[n].Freq != 0)
+ return Z_TEXT;
+
+ /* There are no "black-listed" or "white-listed" bytes:
+ * this stream either is empty or has tolerated ("gray-listed") bytes only.
+ */
+ return Z_BINARY;
+}
+
+/* ===========================================================================
+ * Reverse the first len bits of a code, using straightforward code (a faster
+ * method would use a table)
+ * IN assertion: 1 <= len <= 15
+ */
+local unsigned bi_reverse(code, len)
+ unsigned code; /* the value to invert */
+ int len; /* its bit length */
+{
+ register unsigned res = 0;
+ do {
+ res |= code & 1;
+ code >>= 1, res <<= 1;
+ } while (--len > 0);
+ return res >> 1;
+}
+
+/* ===========================================================================
+ * Flush the bit buffer, keeping at most 7 bits in it.
+ */
+local void bi_flush(s)
+ deflate_state *s;
+{
+ if (s->bi_valid == 16) {
+ put_short(s, s->bi_buf);
+ s->bi_buf = 0;
+ s->bi_valid = 0;
+ } else if (s->bi_valid >= 8) {
+ put_byte(s, (Byte)s->bi_buf);
+ s->bi_buf >>= 8;
+ s->bi_valid -= 8;
+ }
+}
+
+/* ===========================================================================
+ * Flush the bit buffer and align the output on a byte boundary
+ */
+local void bi_windup(s)
+ deflate_state *s;
+{
+ if (s->bi_valid > 8) {
+ put_short(s, s->bi_buf);
+ } else if (s->bi_valid > 0) {
+ put_byte(s, (Byte)s->bi_buf);
+ }
+ s->bi_buf = 0;
+ s->bi_valid = 0;
+#ifdef ZLIB_DEBUG
+ s->bits_sent = (s->bits_sent+7) & ~7;
+#endif
+}
diff --git a/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/trees.h b/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/trees.h
new file mode 100644
index 00000000..d35639d8
--- /dev/null
+++ b/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/trees.h
@@ -0,0 +1,128 @@
+/* header created automatically with -DGEN_TREES_H */
+
+local const ct_data static_ltree[L_CODES+2] = {
+{{ 12},{ 8}}, {{140},{ 8}}, {{ 76},{ 8}}, {{204},{ 8}}, {{ 44},{ 8}},
+{{172},{ 8}}, {{108},{ 8}}, {{236},{ 8}}, {{ 28},{ 8}}, {{156},{ 8}},
+{{ 92},{ 8}}, {{220},{ 8}}, {{ 60},{ 8}}, {{188},{ 8}}, {{124},{ 8}},
+{{252},{ 8}}, {{ 2},{ 8}}, {{130},{ 8}}, {{ 66},{ 8}}, {{194},{ 8}},
+{{ 34},{ 8}}, {{162},{ 8}}, {{ 98},{ 8}}, {{226},{ 8}}, {{ 18},{ 8}},
+{{146},{ 8}}, {{ 82},{ 8}}, {{210},{ 8}}, {{ 50},{ 8}}, {{178},{ 8}},
+{{114},{ 8}}, {{242},{ 8}}, {{ 10},{ 8}}, {{138},{ 8}}, {{ 74},{ 8}},
+{{202},{ 8}}, {{ 42},{ 8}}, {{170},{ 8}}, {{106},{ 8}}, {{234},{ 8}},
+{{ 26},{ 8}}, {{154},{ 8}}, {{ 90},{ 8}}, {{218},{ 8}}, {{ 58},{ 8}},
+{{186},{ 8}}, {{122},{ 8}}, {{250},{ 8}}, {{ 6},{ 8}}, {{134},{ 8}},
+{{ 70},{ 8}}, {{198},{ 8}}, {{ 38},{ 8}}, {{166},{ 8}}, {{102},{ 8}},
+{{230},{ 8}}, {{ 22},{ 8}}, {{150},{ 8}}, {{ 86},{ 8}}, {{214},{ 8}},
+{{ 54},{ 8}}, {{182},{ 8}}, {{118},{ 8}}, {{246},{ 8}}, {{ 14},{ 8}},
+{{142},{ 8}}, {{ 78},{ 8}}, {{206},{ 8}}, {{ 46},{ 8}}, {{174},{ 8}},
+{{110},{ 8}}, {{238},{ 8}}, {{ 30},{ 8}}, {{158},{ 8}}, {{ 94},{ 8}},
+{{222},{ 8}}, {{ 62},{ 8}}, {{190},{ 8}}, {{126},{ 8}}, {{254},{ 8}},
+{{ 1},{ 8}}, {{129},{ 8}}, {{ 65},{ 8}}, {{193},{ 8}}, {{ 33},{ 8}},
+{{161},{ 8}}, {{ 97},{ 8}}, {{225},{ 8}}, {{ 17},{ 8}}, {{145},{ 8}},
+{{ 81},{ 8}}, {{209},{ 8}}, {{ 49},{ 8}}, {{177},{ 8}}, {{113},{ 8}},
+{{241},{ 8}}, {{ 9},{ 8}}, {{137},{ 8}}, {{ 73},{ 8}}, {{201},{ 8}},
+{{ 41},{ 8}}, {{169},{ 8}}, {{105},{ 8}}, {{233},{ 8}}, {{ 25},{ 8}},
+{{153},{ 8}}, {{ 89},{ 8}}, {{217},{ 8}}, {{ 57},{ 8}}, {{185},{ 8}},
+{{121},{ 8}}, {{249},{ 8}}, {{ 5},{ 8}}, {{133},{ 8}}, {{ 69},{ 8}},
+{{197},{ 8}}, {{ 37},{ 8}}, {{165},{ 8}}, {{101},{ 8}}, {{229},{ 8}},
+{{ 21},{ 8}}, {{149},{ 8}}, {{ 85},{ 8}}, {{213},{ 8}}, {{ 53},{ 8}},
+{{181},{ 8}}, {{117},{ 8}}, {{245},{ 8}}, {{ 13},{ 8}}, {{141},{ 8}},
+{{ 77},{ 8}}, {{205},{ 8}}, {{ 45},{ 8}}, {{173},{ 8}}, {{109},{ 8}},
+{{237},{ 8}}, {{ 29},{ 8}}, {{157},{ 8}}, {{ 93},{ 8}}, {{221},{ 8}},
+{{ 61},{ 8}}, {{189},{ 8}}, {{125},{ 8}}, {{253},{ 8}}, {{ 19},{ 9}},
+{{275},{ 9}}, {{147},{ 9}}, {{403},{ 9}}, {{ 83},{ 9}}, {{339},{ 9}},
+{{211},{ 9}}, {{467},{ 9}}, {{ 51},{ 9}}, {{307},{ 9}}, {{179},{ 9}},
+{{435},{ 9}}, {{115},{ 9}}, {{371},{ 9}}, {{243},{ 9}}, {{499},{ 9}},
+{{ 11},{ 9}}, {{267},{ 9}}, {{139},{ 9}}, {{395},{ 9}}, {{ 75},{ 9}},
+{{331},{ 9}}, {{203},{ 9}}, {{459},{ 9}}, {{ 43},{ 9}}, {{299},{ 9}},
+{{171},{ 9}}, {{427},{ 9}}, {{107},{ 9}}, {{363},{ 9}}, {{235},{ 9}},
+{{491},{ 9}}, {{ 27},{ 9}}, {{283},{ 9}}, {{155},{ 9}}, {{411},{ 9}},
+{{ 91},{ 9}}, {{347},{ 9}}, {{219},{ 9}}, {{475},{ 9}}, {{ 59},{ 9}},
+{{315},{ 9}}, {{187},{ 9}}, {{443},{ 9}}, {{123},{ 9}}, {{379},{ 9}},
+{{251},{ 9}}, {{507},{ 9}}, {{ 7},{ 9}}, {{263},{ 9}}, {{135},{ 9}},
+{{391},{ 9}}, {{ 71},{ 9}}, {{327},{ 9}}, {{199},{ 9}}, {{455},{ 9}},
+{{ 39},{ 9}}, {{295},{ 9}}, {{167},{ 9}}, {{423},{ 9}}, {{103},{ 9}},
+{{359},{ 9}}, {{231},{ 9}}, {{487},{ 9}}, {{ 23},{ 9}}, {{279},{ 9}},
+{{151},{ 9}}, {{407},{ 9}}, {{ 87},{ 9}}, {{343},{ 9}}, {{215},{ 9}},
+{{471},{ 9}}, {{ 55},{ 9}}, {{311},{ 9}}, {{183},{ 9}}, {{439},{ 9}},
+{{119},{ 9}}, {{375},{ 9}}, {{247},{ 9}}, {{503},{ 9}}, {{ 15},{ 9}},
+{{271},{ 9}}, {{143},{ 9}}, {{399},{ 9}}, {{ 79},{ 9}}, {{335},{ 9}},
+{{207},{ 9}}, {{463},{ 9}}, {{ 47},{ 9}}, {{303},{ 9}}, {{175},{ 9}},
+{{431},{ 9}}, {{111},{ 9}}, {{367},{ 9}}, {{239},{ 9}}, {{495},{ 9}},
+{{ 31},{ 9}}, {{287},{ 9}}, {{159},{ 9}}, {{415},{ 9}}, {{ 95},{ 9}},
+{{351},{ 9}}, {{223},{ 9}}, {{479},{ 9}}, {{ 63},{ 9}}, {{319},{ 9}},
+{{191},{ 9}}, {{447},{ 9}}, {{127},{ 9}}, {{383},{ 9}}, {{255},{ 9}},
+{{511},{ 9}}, {{ 0},{ 7}}, {{ 64},{ 7}}, {{ 32},{ 7}}, {{ 96},{ 7}},
+{{ 16},{ 7}}, {{ 80},{ 7}}, {{ 48},{ 7}}, {{112},{ 7}}, {{ 8},{ 7}},
+{{ 72},{ 7}}, {{ 40},{ 7}}, {{104},{ 7}}, {{ 24},{ 7}}, {{ 88},{ 7}},
+{{ 56},{ 7}}, {{120},{ 7}}, {{ 4},{ 7}}, {{ 68},{ 7}}, {{ 36},{ 7}},
+{{100},{ 7}}, {{ 20},{ 7}}, {{ 84},{ 7}}, {{ 52},{ 7}}, {{116},{ 7}},
+{{ 3},{ 8}}, {{131},{ 8}}, {{ 67},{ 8}}, {{195},{ 8}}, {{ 35},{ 8}},
+{{163},{ 8}}, {{ 99},{ 8}}, {{227},{ 8}}
+};
+
+local const ct_data static_dtree[D_CODES] = {
+{{ 0},{ 5}}, {{16},{ 5}}, {{ 8},{ 5}}, {{24},{ 5}}, {{ 4},{ 5}},
+{{20},{ 5}}, {{12},{ 5}}, {{28},{ 5}}, {{ 2},{ 5}}, {{18},{ 5}},
+{{10},{ 5}}, {{26},{ 5}}, {{ 6},{ 5}}, {{22},{ 5}}, {{14},{ 5}},
+{{30},{ 5}}, {{ 1},{ 5}}, {{17},{ 5}}, {{ 9},{ 5}}, {{25},{ 5}},
+{{ 5},{ 5}}, {{21},{ 5}}, {{13},{ 5}}, {{29},{ 5}}, {{ 3},{ 5}},
+{{19},{ 5}}, {{11},{ 5}}, {{27},{ 5}}, {{ 7},{ 5}}, {{23},{ 5}}
+};
+
+const uch ZLIB_INTERNAL _dist_code[DIST_CODE_LEN] = {
+ 0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8,
+ 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10,
+10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
+12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13,
+13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
+13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
+14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
+14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
+14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15, 15, 15, 15, 15,
+15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
+15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
+15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 0, 0, 16, 17,
+18, 18, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21, 22, 22, 22, 22, 22, 22, 22, 22,
+23, 23, 23, 23, 23, 23, 23, 23, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
+26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27,
+27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
+27, 27, 27, 27, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
+28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
+28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
+28, 28, 28, 28, 28, 28, 28, 28, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
+29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
+29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
+29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29
+};
+
+const uch ZLIB_INTERNAL _length_code[MAX_MATCH-MIN_MATCH+1]= {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 12, 12,
+13, 13, 13, 13, 14, 14, 14, 14, 15, 15, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16,
+17, 17, 17, 17, 17, 17, 17, 17, 18, 18, 18, 18, 18, 18, 18, 18, 19, 19, 19, 19,
+19, 19, 19, 19, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20,
+21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 22, 22, 22, 22,
+22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 23, 23, 23, 23, 23, 23, 23, 23,
+23, 23, 23, 23, 23, 23, 23, 23, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 26, 26, 26, 26, 26, 26, 26, 26,
+26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
+26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
+27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 28
+};
+
+local const int base_length[LENGTH_CODES] = {
+0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 16, 20, 24, 28, 32, 40, 48, 56,
+64, 80, 96, 112, 128, 160, 192, 224, 0
+};
+
+local const int base_dist[D_CODES] = {
+ 0, 1, 2, 3, 4, 6, 8, 12, 16, 24,
+ 32, 48, 64, 96, 128, 192, 256, 384, 512, 768,
+ 1024, 1536, 2048, 3072, 4096, 6144, 8192, 12288, 16384, 24576
+};
+
diff --git a/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/uncompr.c b/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/uncompr.c
new file mode 100644
index 00000000..f03a1a86
--- /dev/null
+++ b/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/uncompr.c
@@ -0,0 +1,93 @@
+/* uncompr.c -- decompress a memory buffer
+ * Copyright (C) 1995-2003, 2010, 2014, 2016 Jean-loup Gailly, Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* @(#) $Id$ */
+
+#define ZLIB_INTERNAL
+#include "zlib.h"
+
+/* ===========================================================================
+ Decompresses the source buffer into the destination buffer. *sourceLen is
+ the byte length of the source buffer. Upon entry, *destLen is the total size
+ of the destination buffer, which must be large enough to hold the entire
+ uncompressed data. (The size of the uncompressed data must have been saved
+ previously by the compressor and transmitted to the decompressor by some
+ mechanism outside the scope of this compression library.) Upon exit,
+ *destLen is the size of the decompressed data and *sourceLen is the number
+ of source bytes consumed. Upon return, source + *sourceLen points to the
+ first unused input byte.
+
+ uncompress returns Z_OK if success, Z_MEM_ERROR if there was not enough
+ memory, Z_BUF_ERROR if there was not enough room in the output buffer, or
+ Z_DATA_ERROR if the input data was corrupted, including if the input data is
+ an incomplete zlib stream.
+*/
+int ZEXPORT uncompress2 (dest, destLen, source, sourceLen)
+ Bytef *dest;
+ uLongf *destLen;
+ const Bytef *source;
+ uLong *sourceLen;
+{
+ z_stream stream;
+ int err;
+ const uInt max = (uInt)-1;
+ uLong len, left;
+ Byte buf[1]; /* for detection of incomplete stream when *destLen == 0 */
+
+ len = *sourceLen;
+ if (*destLen) {
+ left = *destLen;
+ *destLen = 0;
+ }
+ else {
+ left = 1;
+ dest = buf;
+ }
+
+ stream.next_in = (z_const Bytef *)source;
+ stream.avail_in = 0;
+ stream.zalloc = (alloc_func)0;
+ stream.zfree = (free_func)0;
+ stream.opaque = (voidpf)0;
+
+ err = inflateInit(&stream);
+ if (err != Z_OK) return err;
+
+ stream.next_out = dest;
+ stream.avail_out = 0;
+
+ do {
+ if (stream.avail_out == 0) {
+ stream.avail_out = left > (uLong)max ? max : (uInt)left;
+ left -= stream.avail_out;
+ }
+ if (stream.avail_in == 0) {
+ stream.avail_in = len > (uLong)max ? max : (uInt)len;
+ len -= stream.avail_in;
+ }
+ err = inflate(&stream, Z_NO_FLUSH);
+ } while (err == Z_OK);
+
+ *sourceLen -= len + stream.avail_in;
+ if (dest != buf)
+ *destLen = stream.total_out;
+ else if (stream.total_out && err == Z_BUF_ERROR)
+ left = 1;
+
+ inflateEnd(&stream);
+ return err == Z_STREAM_END ? Z_OK :
+ err == Z_NEED_DICT ? Z_DATA_ERROR :
+ err == Z_BUF_ERROR && left + stream.avail_out ? Z_DATA_ERROR :
+ err;
+}
+
+int ZEXPORT uncompress (dest, destLen, source, sourceLen)
+ Bytef *dest;
+ uLongf *destLen;
+ const Bytef *source;
+ uLong sourceLen;
+{
+ return uncompress2(dest, destLen, source, &sourceLen);
+}
diff --git a/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/zconf.h b/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/zconf.h
new file mode 100644
index 00000000..5e1d68a0
--- /dev/null
+++ b/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/zconf.h
@@ -0,0 +1,534 @@
+/* zconf.h -- configuration of the zlib compression library
+ * Copyright (C) 1995-2016 Jean-loup Gailly, Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* @(#) $Id$ */
+
+#ifndef ZCONF_H
+#define ZCONF_H
+
+/*
+ * If you *really* need a unique prefix for all types and library functions,
+ * compile with -DZ_PREFIX. The "standard" zlib should be compiled without it.
+ * Even better than compiling with -DZ_PREFIX would be to use configure to set
+ * this permanently in zconf.h using "./configure --zprefix".
+ */
+#ifdef Z_PREFIX /* may be set to #if 1 by ./configure */
+# define Z_PREFIX_SET
+
+/* all linked symbols and init macros */
+# define _dist_code z__dist_code
+# define _length_code z__length_code
+# define _tr_align z__tr_align
+# define _tr_flush_bits z__tr_flush_bits
+# define _tr_flush_block z__tr_flush_block
+# define _tr_init z__tr_init
+# define _tr_stored_block z__tr_stored_block
+# define _tr_tally z__tr_tally
+# define adler32 z_adler32
+# define adler32_combine z_adler32_combine
+# define adler32_combine64 z_adler32_combine64
+# define adler32_z z_adler32_z
+# ifndef Z_SOLO
+# define compress z_compress
+# define compress2 z_compress2
+# define compressBound z_compressBound
+# endif
+# define crc32 z_crc32
+# define crc32_combine z_crc32_combine
+# define crc32_combine64 z_crc32_combine64
+# define crc32_z z_crc32_z
+# define deflate z_deflate
+# define deflateBound z_deflateBound
+# define deflateCopy z_deflateCopy
+# define deflateEnd z_deflateEnd
+# define deflateGetDictionary z_deflateGetDictionary
+# define deflateInit z_deflateInit
+# define deflateInit2 z_deflateInit2
+# define deflateInit2_ z_deflateInit2_
+# define deflateInit_ z_deflateInit_
+# define deflateParams z_deflateParams
+# define deflatePending z_deflatePending
+# define deflatePrime z_deflatePrime
+# define deflateReset z_deflateReset
+# define deflateResetKeep z_deflateResetKeep
+# define deflateSetDictionary z_deflateSetDictionary
+# define deflateSetHeader z_deflateSetHeader
+# define deflateTune z_deflateTune
+# define deflate_copyright z_deflate_copyright
+# define get_crc_table z_get_crc_table
+# ifndef Z_SOLO
+# define gz_error z_gz_error
+# define gz_intmax z_gz_intmax
+# define gz_strwinerror z_gz_strwinerror
+# define gzbuffer z_gzbuffer
+# define gzclearerr z_gzclearerr
+# define gzclose z_gzclose
+# define gzclose_r z_gzclose_r
+# define gzclose_w z_gzclose_w
+# define gzdirect z_gzdirect
+# define gzdopen z_gzdopen
+# define gzeof z_gzeof
+# define gzerror z_gzerror
+# define gzflush z_gzflush
+# define gzfread z_gzfread
+# define gzfwrite z_gzfwrite
+# define gzgetc z_gzgetc
+# define gzgetc_ z_gzgetc_
+# define gzgets z_gzgets
+# define gzoffset z_gzoffset
+# define gzoffset64 z_gzoffset64
+# define gzopen z_gzopen
+# define gzopen64 z_gzopen64
+# ifdef _WIN32
+# define gzopen_w z_gzopen_w
+# endif
+# define gzprintf z_gzprintf
+# define gzputc z_gzputc
+# define gzputs z_gzputs
+# define gzread z_gzread
+# define gzrewind z_gzrewind
+# define gzseek z_gzseek
+# define gzseek64 z_gzseek64
+# define gzsetparams z_gzsetparams
+# define gztell z_gztell
+# define gztell64 z_gztell64
+# define gzungetc z_gzungetc
+# define gzvprintf z_gzvprintf
+# define gzwrite z_gzwrite
+# endif
+# define inflate z_inflate
+# define inflateBack z_inflateBack
+# define inflateBackEnd z_inflateBackEnd
+# define inflateBackInit z_inflateBackInit
+# define inflateBackInit_ z_inflateBackInit_
+# define inflateCodesUsed z_inflateCodesUsed
+# define inflateCopy z_inflateCopy
+# define inflateEnd z_inflateEnd
+# define inflateGetDictionary z_inflateGetDictionary
+# define inflateGetHeader z_inflateGetHeader
+# define inflateInit z_inflateInit
+# define inflateInit2 z_inflateInit2
+# define inflateInit2_ z_inflateInit2_
+# define inflateInit_ z_inflateInit_
+# define inflateMark z_inflateMark
+# define inflatePrime z_inflatePrime
+# define inflateReset z_inflateReset
+# define inflateReset2 z_inflateReset2
+# define inflateResetKeep z_inflateResetKeep
+# define inflateSetDictionary z_inflateSetDictionary
+# define inflateSync z_inflateSync
+# define inflateSyncPoint z_inflateSyncPoint
+# define inflateUndermine z_inflateUndermine
+# define inflateValidate z_inflateValidate
+# define inflate_copyright z_inflate_copyright
+# define inflate_fast z_inflate_fast
+# define inflate_table z_inflate_table
+# ifndef Z_SOLO
+# define uncompress z_uncompress
+# define uncompress2 z_uncompress2
+# endif
+# define zError z_zError
+# ifndef Z_SOLO
+# define zcalloc z_zcalloc
+# define zcfree z_zcfree
+# endif
+# define zlibCompileFlags z_zlibCompileFlags
+# define zlibVersion z_zlibVersion
+
+/* all zlib typedefs in zlib.h and zconf.h */
+# define Byte z_Byte
+# define Bytef z_Bytef
+# define alloc_func z_alloc_func
+# define charf z_charf
+# define free_func z_free_func
+# ifndef Z_SOLO
+# define gzFile z_gzFile
+# endif
+# define gz_header z_gz_header
+# define gz_headerp z_gz_headerp
+# define in_func z_in_func
+# define intf z_intf
+# define out_func z_out_func
+# define uInt z_uInt
+# define uIntf z_uIntf
+# define uLong z_uLong
+# define uLongf z_uLongf
+# define voidp z_voidp
+# define voidpc z_voidpc
+# define voidpf z_voidpf
+
+/* all zlib structs in zlib.h and zconf.h */
+# define gz_header_s z_gz_header_s
+# define internal_state z_internal_state
+
+#endif
+
+#if defined(__MSDOS__) && !defined(MSDOS)
+# define MSDOS
+#endif
+#if (defined(OS_2) || defined(__OS2__)) && !defined(OS2)
+# define OS2
+#endif
+#if defined(_WINDOWS) && !defined(WINDOWS)
+# define WINDOWS
+#endif
+#if defined(_WIN32) || defined(_WIN32_WCE) || defined(__WIN32__)
+# ifndef WIN32
+# define WIN32
+# endif
+#endif
+#if (defined(MSDOS) || defined(OS2) || defined(WINDOWS)) && !defined(WIN32)
+# if !defined(__GNUC__) && !defined(__FLAT__) && !defined(__386__)
+# ifndef SYS16BIT
+# define SYS16BIT
+# endif
+# endif
+#endif
+
+/*
+ * Compile with -DMAXSEG_64K if the alloc function cannot allocate more
+ * than 64k bytes at a time (needed on systems with 16-bit int).
+ */
+#ifdef SYS16BIT
+# define MAXSEG_64K
+#endif
+#ifdef MSDOS
+# define UNALIGNED_OK
+#endif
+
+#ifdef __STDC_VERSION__
+# ifndef STDC
+# define STDC
+# endif
+# if __STDC_VERSION__ >= 199901L
+# ifndef STDC99
+# define STDC99
+# endif
+# endif
+#endif
+#if !defined(STDC) && (defined(__STDC__) || defined(__cplusplus))
+# define STDC
+#endif
+#if !defined(STDC) && (defined(__GNUC__) || defined(__BORLANDC__))
+# define STDC
+#endif
+#if !defined(STDC) && (defined(MSDOS) || defined(WINDOWS) || defined(WIN32))
+# define STDC
+#endif
+#if !defined(STDC) && (defined(OS2) || defined(__HOS_AIX__))
+# define STDC
+#endif
+
+#if defined(__OS400__) && !defined(STDC) /* iSeries (formerly AS/400). */
+# define STDC
+#endif
+
+#ifndef STDC
+# ifndef const /* cannot use !defined(STDC) && !defined(const) on Mac */
+# define const /* note: need a more gentle solution here */
+# endif
+#endif
+
+#if defined(ZLIB_CONST) && !defined(z_const)
+# define z_const const
+#else
+# define z_const
+#endif
+
+#ifdef Z_SOLO
+ typedef unsigned long z_size_t;
+#else
+# define z_longlong long long
+# if defined(NO_SIZE_T)
+ typedef unsigned NO_SIZE_T z_size_t;
+# elif defined(STDC)
+# include <stddef.h>
+ typedef size_t z_size_t;
+# else
+ typedef unsigned long z_size_t;
+# endif
+# undef z_longlong
+#endif
+
+/* Maximum value for memLevel in deflateInit2 */
+#ifndef MAX_MEM_LEVEL
+# ifdef MAXSEG_64K
+# define MAX_MEM_LEVEL 8
+# else
+# define MAX_MEM_LEVEL 9
+# endif
+#endif
+
+/* Maximum value for windowBits in deflateInit2 and inflateInit2.
+ * WARNING: reducing MAX_WBITS makes minigzip unable to extract .gz files
+ * created by gzip. (Files created by minigzip can still be extracted by
+ * gzip.)
+ */
+#ifndef MAX_WBITS
+# define MAX_WBITS 15 /* 32K LZ77 window */
+#endif
+
+/* The memory requirements for deflate are (in bytes):
+ (1 << (windowBits+2)) + (1 << (memLevel+9))
+ that is: 128K for windowBits=15 + 128K for memLevel = 8 (default values)
+ plus a few kilobytes for small objects. For example, if you want to reduce
+ the default memory requirements from 256K to 128K, compile with
+ make CFLAGS="-O -DMAX_WBITS=14 -DMAX_MEM_LEVEL=7"
+ Of course this will generally degrade compression (there's no free lunch).
+
+ The memory requirements for inflate are (in bytes) 1 << windowBits
+ that is, 32K for windowBits=15 (default value) plus about 7 kilobytes
+ for small objects.
+*/
+
+ /* Type declarations */
+
+#ifndef OF /* function prototypes */
+# ifdef STDC
+# define OF(args) args
+# else
+# define OF(args) ()
+# endif
+#endif
+
+#ifndef Z_ARG /* function prototypes for stdarg */
+# if defined(STDC) || defined(Z_HAVE_STDARG_H)
+# define Z_ARG(args) args
+# else
+# define Z_ARG(args) ()
+# endif
+#endif
+
+/* The following definitions for FAR are needed only for MSDOS mixed
+ * model programming (small or medium model with some far allocations).
+ * This was tested only with MSC; for other MSDOS compilers you may have
+ * to define NO_MEMCPY in zutil.h. If you don't need the mixed model,
+ * just define FAR to be empty.
+ */
+#ifdef SYS16BIT
+# if defined(M_I86SM) || defined(M_I86MM)
+ /* MSC small or medium model */
+# define SMALL_MEDIUM
+# ifdef _MSC_VER
+# define FAR _far
+# else
+# define FAR far
+# endif
+# endif
+# if (defined(__SMALL__) || defined(__MEDIUM__))
+ /* Turbo C small or medium model */
+# define SMALL_MEDIUM
+# ifdef __BORLANDC__
+# define FAR _far
+# else
+# define FAR far
+# endif
+# endif
+#endif
+
+#if defined(WINDOWS) || defined(WIN32)
+ /* If building or using zlib as a DLL, define ZLIB_DLL.
+ * This is not mandatory, but it offers a little performance increase.
+ */
+# ifdef ZLIB_DLL
+# if defined(WIN32) && (!defined(__BORLANDC__) || (__BORLANDC__ >= 0x500))
+# ifdef ZLIB_INTERNAL
+# define ZEXTERN extern __declspec(dllexport)
+# else
+# define ZEXTERN extern __declspec(dllimport)
+# endif
+# endif
+# endif /* ZLIB_DLL */
+ /* If building or using zlib with the WINAPI/WINAPIV calling convention,
+ * define ZLIB_WINAPI.
+ * Caution: the standard ZLIB1.DLL is NOT compiled using ZLIB_WINAPI.
+ */
+# ifdef ZLIB_WINAPI
+# ifdef FAR
+# undef FAR
+# endif
+# include <windows.h>
+ /* No need for _export, use ZLIB.DEF instead. */
+ /* For complete Windows compatibility, use WINAPI, not __stdcall. */
+# define ZEXPORT WINAPI
+# ifdef WIN32
+# define ZEXPORTVA WINAPIV
+# else
+# define ZEXPORTVA FAR CDECL
+# endif
+# endif
+#endif
+
+#if defined (__BEOS__)
+# ifdef ZLIB_DLL
+# ifdef ZLIB_INTERNAL
+# define ZEXPORT __declspec(dllexport)
+# define ZEXPORTVA __declspec(dllexport)
+# else
+# define ZEXPORT __declspec(dllimport)
+# define ZEXPORTVA __declspec(dllimport)
+# endif
+# endif
+#endif
+
+#ifndef ZEXTERN
+# define ZEXTERN extern
+#endif
+#ifndef ZEXPORT
+# define ZEXPORT
+#endif
+#ifndef ZEXPORTVA
+# define ZEXPORTVA
+#endif
+
+#ifndef FAR
+# define FAR
+#endif
+
+#if !defined(__MACTYPES__)
+typedef unsigned char Byte; /* 8 bits */
+#endif
+typedef unsigned int uInt; /* 16 bits or more */
+typedef unsigned long uLong; /* 32 bits or more */
+
+#ifdef SMALL_MEDIUM
+ /* Borland C/C++ and some old MSC versions ignore FAR inside typedef */
+# define Bytef Byte FAR
+#else
+ typedef Byte FAR Bytef;
+#endif
+typedef char FAR charf;
+typedef int FAR intf;
+typedef uInt FAR uIntf;
+typedef uLong FAR uLongf;
+
+#ifdef STDC
+ typedef void const *voidpc;
+ typedef void FAR *voidpf;
+ typedef void *voidp;
+#else
+ typedef Byte const *voidpc;
+ typedef Byte FAR *voidpf;
+ typedef Byte *voidp;
+#endif
+
+#if !defined(Z_U4) && !defined(Z_SOLO) && defined(STDC)
+# include <limits.h>
+# if (UINT_MAX == 0xffffffffUL)
+# define Z_U4 unsigned
+# elif (ULONG_MAX == 0xffffffffUL)
+# define Z_U4 unsigned long
+# elif (USHRT_MAX == 0xffffffffUL)
+# define Z_U4 unsigned short
+# endif
+#endif
+
+#ifdef Z_U4
+ typedef Z_U4 z_crc_t;
+#else
+ typedef unsigned long z_crc_t;
+#endif
+
+#ifdef HAVE_UNISTD_H /* may be set to #if 1 by ./configure */
+# define Z_HAVE_UNISTD_H
+#endif
+
+#ifdef HAVE_STDARG_H /* may be set to #if 1 by ./configure */
+# define Z_HAVE_STDARG_H
+#endif
+
+#ifdef STDC
+# ifndef Z_SOLO
+# include <sys/types.h> /* for off_t */
+# endif
+#endif
+
+#if defined(STDC) || defined(Z_HAVE_STDARG_H)
+# ifndef Z_SOLO
+# include <stdarg.h> /* for va_list */
+# endif
+#endif
+
+#ifdef _WIN32
+# ifndef Z_SOLO
+# include <stddef.h> /* for wchar_t */
+# endif
+#endif
+
+/* a little trick to accommodate both "#define _LARGEFILE64_SOURCE" and
+ * "#define _LARGEFILE64_SOURCE 1" as requesting 64-bit operations, (even
+ * though the former does not conform to the LFS document), but considering
+ * both "#undef _LARGEFILE64_SOURCE" and "#define _LARGEFILE64_SOURCE 0" as
+ * equivalently requesting no 64-bit operations
+ */
+#if defined(_LARGEFILE64_SOURCE) && -_LARGEFILE64_SOURCE - -1 == 1
+# undef _LARGEFILE64_SOURCE
+#endif
+
+#if defined(__WATCOMC__) && !defined(Z_HAVE_UNISTD_H)
+# define Z_HAVE_UNISTD_H
+#endif
+#ifndef Z_SOLO
+# if defined(Z_HAVE_UNISTD_H) || defined(_LARGEFILE64_SOURCE)
+# include <unistd.h> /* for SEEK_*, off_t, and _LFS64_LARGEFILE */
+# ifdef VMS
+# include <unixio.h> /* for off_t */
+# endif
+# ifndef z_off_t
+# define z_off_t off_t
+# endif
+# endif
+#endif
+
+#if defined(_LFS64_LARGEFILE) && _LFS64_LARGEFILE-0
+# define Z_LFS64
+#endif
+
+#if defined(_LARGEFILE64_SOURCE) && defined(Z_LFS64)
+# define Z_LARGE64
+#endif
+
+#if defined(_FILE_OFFSET_BITS) && _FILE_OFFSET_BITS-0 == 64 && defined(Z_LFS64)
+# define Z_WANT64
+#endif
+
+#if !defined(SEEK_SET) && !defined(Z_SOLO)
+# define SEEK_SET 0 /* Seek from beginning of file. */
+# define SEEK_CUR 1 /* Seek from current position. */
+# define SEEK_END 2 /* Set file pointer to EOF plus "offset" */
+#endif
+
+#ifndef z_off_t
+# define z_off_t long
+#endif
+
+#if !defined(_WIN32) && defined(Z_LARGE64)
+# define z_off64_t off64_t
+#else
+# if defined(_WIN32) && !defined(__GNUC__) && !defined(Z_SOLO)
+# define z_off64_t __int64
+# else
+# define z_off64_t z_off_t
+# endif
+#endif
+
+/* MVS linker does not support external names larger than 8 bytes */
+#if defined(__MVS__)
+ #pragma map(deflateInit_,"DEIN")
+ #pragma map(deflateInit2_,"DEIN2")
+ #pragma map(deflateEnd,"DEEND")
+ #pragma map(deflateBound,"DEBND")
+ #pragma map(inflateInit_,"ININ")
+ #pragma map(inflateInit2_,"ININ2")
+ #pragma map(inflateEnd,"INEND")
+ #pragma map(inflateSync,"INSY")
+ #pragma map(inflateSetDictionary,"INSEDI")
+ #pragma map(compressBound,"CMBND")
+ #pragma map(inflate_table,"INTABL")
+ #pragma map(inflate_fast,"INFA")
+ #pragma map(inflate_copyright,"INCOPY")
+#endif
+
+#endif /* ZCONF_H */
diff --git a/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/zconf.h.in b/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/zconf.h.in
new file mode 100644
index 00000000..5e1d68a0
--- /dev/null
+++ b/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/zconf.h.in
@@ -0,0 +1,534 @@
+/* zconf.h -- configuration of the zlib compression library
+ * Copyright (C) 1995-2016 Jean-loup Gailly, Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* @(#) $Id$ */
+
+#ifndef ZCONF_H
+#define ZCONF_H
+
+/*
+ * If you *really* need a unique prefix for all types and library functions,
+ * compile with -DZ_PREFIX. The "standard" zlib should be compiled without it.
+ * Even better than compiling with -DZ_PREFIX would be to use configure to set
+ * this permanently in zconf.h using "./configure --zprefix".
+ */
+#ifdef Z_PREFIX /* may be set to #if 1 by ./configure */
+# define Z_PREFIX_SET
+
+/* all linked symbols and init macros */
+# define _dist_code z__dist_code
+# define _length_code z__length_code
+# define _tr_align z__tr_align
+# define _tr_flush_bits z__tr_flush_bits
+# define _tr_flush_block z__tr_flush_block
+# define _tr_init z__tr_init
+# define _tr_stored_block z__tr_stored_block
+# define _tr_tally z__tr_tally
+# define adler32 z_adler32
+# define adler32_combine z_adler32_combine
+# define adler32_combine64 z_adler32_combine64
+# define adler32_z z_adler32_z
+# ifndef Z_SOLO
+# define compress z_compress
+# define compress2 z_compress2
+# define compressBound z_compressBound
+# endif
+# define crc32 z_crc32
+# define crc32_combine z_crc32_combine
+# define crc32_combine64 z_crc32_combine64
+# define crc32_z z_crc32_z
+# define deflate z_deflate
+# define deflateBound z_deflateBound
+# define deflateCopy z_deflateCopy
+# define deflateEnd z_deflateEnd
+# define deflateGetDictionary z_deflateGetDictionary
+# define deflateInit z_deflateInit
+# define deflateInit2 z_deflateInit2
+# define deflateInit2_ z_deflateInit2_
+# define deflateInit_ z_deflateInit_
+# define deflateParams z_deflateParams
+# define deflatePending z_deflatePending
+# define deflatePrime z_deflatePrime
+# define deflateReset z_deflateReset
+# define deflateResetKeep z_deflateResetKeep
+# define deflateSetDictionary z_deflateSetDictionary
+# define deflateSetHeader z_deflateSetHeader
+# define deflateTune z_deflateTune
+# define deflate_copyright z_deflate_copyright
+# define get_crc_table z_get_crc_table
+# ifndef Z_SOLO
+# define gz_error z_gz_error
+# define gz_intmax z_gz_intmax
+# define gz_strwinerror z_gz_strwinerror
+# define gzbuffer z_gzbuffer
+# define gzclearerr z_gzclearerr
+# define gzclose z_gzclose
+# define gzclose_r z_gzclose_r
+# define gzclose_w z_gzclose_w
+# define gzdirect z_gzdirect
+# define gzdopen z_gzdopen
+# define gzeof z_gzeof
+# define gzerror z_gzerror
+# define gzflush z_gzflush
+# define gzfread z_gzfread
+# define gzfwrite z_gzfwrite
+# define gzgetc z_gzgetc
+# define gzgetc_ z_gzgetc_
+# define gzgets z_gzgets
+# define gzoffset z_gzoffset
+# define gzoffset64 z_gzoffset64
+# define gzopen z_gzopen
+# define gzopen64 z_gzopen64
+# ifdef _WIN32
+# define gzopen_w z_gzopen_w
+# endif
+# define gzprintf z_gzprintf
+# define gzputc z_gzputc
+# define gzputs z_gzputs
+# define gzread z_gzread
+# define gzrewind z_gzrewind
+# define gzseek z_gzseek
+# define gzseek64 z_gzseek64
+# define gzsetparams z_gzsetparams
+# define gztell z_gztell
+# define gztell64 z_gztell64
+# define gzungetc z_gzungetc
+# define gzvprintf z_gzvprintf
+# define gzwrite z_gzwrite
+# endif
+# define inflate z_inflate
+# define inflateBack z_inflateBack
+# define inflateBackEnd z_inflateBackEnd
+# define inflateBackInit z_inflateBackInit
+# define inflateBackInit_ z_inflateBackInit_
+# define inflateCodesUsed z_inflateCodesUsed
+# define inflateCopy z_inflateCopy
+# define inflateEnd z_inflateEnd
+# define inflateGetDictionary z_inflateGetDictionary
+# define inflateGetHeader z_inflateGetHeader
+# define inflateInit z_inflateInit
+# define inflateInit2 z_inflateInit2
+# define inflateInit2_ z_inflateInit2_
+# define inflateInit_ z_inflateInit_
+# define inflateMark z_inflateMark
+# define inflatePrime z_inflatePrime
+# define inflateReset z_inflateReset
+# define inflateReset2 z_inflateReset2
+# define inflateResetKeep z_inflateResetKeep
+# define inflateSetDictionary z_inflateSetDictionary
+# define inflateSync z_inflateSync
+# define inflateSyncPoint z_inflateSyncPoint
+# define inflateUndermine z_inflateUndermine
+# define inflateValidate z_inflateValidate
+# define inflate_copyright z_inflate_copyright
+# define inflate_fast z_inflate_fast
+# define inflate_table z_inflate_table
+# ifndef Z_SOLO
+# define uncompress z_uncompress
+# define uncompress2 z_uncompress2
+# endif
+# define zError z_zError
+# ifndef Z_SOLO
+# define zcalloc z_zcalloc
+# define zcfree z_zcfree
+# endif
+# define zlibCompileFlags z_zlibCompileFlags
+# define zlibVersion z_zlibVersion
+
+/* all zlib typedefs in zlib.h and zconf.h */
+# define Byte z_Byte
+# define Bytef z_Bytef
+# define alloc_func z_alloc_func
+# define charf z_charf
+# define free_func z_free_func
+# ifndef Z_SOLO
+# define gzFile z_gzFile
+# endif
+# define gz_header z_gz_header
+# define gz_headerp z_gz_headerp
+# define in_func z_in_func
+# define intf z_intf
+# define out_func z_out_func
+# define uInt z_uInt
+# define uIntf z_uIntf
+# define uLong z_uLong
+# define uLongf z_uLongf
+# define voidp z_voidp
+# define voidpc z_voidpc
+# define voidpf z_voidpf
+
+/* all zlib structs in zlib.h and zconf.h */
+# define gz_header_s z_gz_header_s
+# define internal_state z_internal_state
+
+#endif
+
+#if defined(__MSDOS__) && !defined(MSDOS)
+# define MSDOS
+#endif
+#if (defined(OS_2) || defined(__OS2__)) && !defined(OS2)
+# define OS2
+#endif
+#if defined(_WINDOWS) && !defined(WINDOWS)
+# define WINDOWS
+#endif
+#if defined(_WIN32) || defined(_WIN32_WCE) || defined(__WIN32__)
+# ifndef WIN32
+# define WIN32
+# endif
+#endif
+#if (defined(MSDOS) || defined(OS2) || defined(WINDOWS)) && !defined(WIN32)
+# if !defined(__GNUC__) && !defined(__FLAT__) && !defined(__386__)
+# ifndef SYS16BIT
+# define SYS16BIT
+# endif
+# endif
+#endif
+
+/*
+ * Compile with -DMAXSEG_64K if the alloc function cannot allocate more
+ * than 64k bytes at a time (needed on systems with 16-bit int).
+ */
+#ifdef SYS16BIT
+# define MAXSEG_64K
+#endif
+#ifdef MSDOS
+# define UNALIGNED_OK
+#endif
+
+#ifdef __STDC_VERSION__
+# ifndef STDC
+# define STDC
+# endif
+# if __STDC_VERSION__ >= 199901L
+# ifndef STDC99
+# define STDC99
+# endif
+# endif
+#endif
+#if !defined(STDC) && (defined(__STDC__) || defined(__cplusplus))
+# define STDC
+#endif
+#if !defined(STDC) && (defined(__GNUC__) || defined(__BORLANDC__))
+# define STDC
+#endif
+#if !defined(STDC) && (defined(MSDOS) || defined(WINDOWS) || defined(WIN32))
+# define STDC
+#endif
+#if !defined(STDC) && (defined(OS2) || defined(__HOS_AIX__))
+# define STDC
+#endif
+
+#if defined(__OS400__) && !defined(STDC) /* iSeries (formerly AS/400). */
+# define STDC
+#endif
+
+#ifndef STDC
+# ifndef const /* cannot use !defined(STDC) && !defined(const) on Mac */
+# define const /* note: need a more gentle solution here */
+# endif
+#endif
+
+#if defined(ZLIB_CONST) && !defined(z_const)
+# define z_const const
+#else
+# define z_const
+#endif
+
+#ifdef Z_SOLO
+ typedef unsigned long z_size_t;
+#else
+# define z_longlong long long
+# if defined(NO_SIZE_T)
+ typedef unsigned NO_SIZE_T z_size_t;
+# elif defined(STDC)
+# include <stddef.h>
+ typedef size_t z_size_t;
+# else
+ typedef unsigned long z_size_t;
+# endif
+# undef z_longlong
+#endif
+
+/* Maximum value for memLevel in deflateInit2 */
+#ifndef MAX_MEM_LEVEL
+# ifdef MAXSEG_64K
+# define MAX_MEM_LEVEL 8
+# else
+# define MAX_MEM_LEVEL 9
+# endif
+#endif
+
+/* Maximum value for windowBits in deflateInit2 and inflateInit2.
+ * WARNING: reducing MAX_WBITS makes minigzip unable to extract .gz files
+ * created by gzip. (Files created by minigzip can still be extracted by
+ * gzip.)
+ */
+#ifndef MAX_WBITS
+# define MAX_WBITS 15 /* 32K LZ77 window */
+#endif
+
+/* The memory requirements for deflate are (in bytes):
+ (1 << (windowBits+2)) + (1 << (memLevel+9))
+ that is: 128K for windowBits=15 + 128K for memLevel = 8 (default values)
+ plus a few kilobytes for small objects. For example, if you want to reduce
+ the default memory requirements from 256K to 128K, compile with
+ make CFLAGS="-O -DMAX_WBITS=14 -DMAX_MEM_LEVEL=7"
+ Of course this will generally degrade compression (there's no free lunch).
+
+ The memory requirements for inflate are (in bytes) 1 << windowBits
+ that is, 32K for windowBits=15 (default value) plus about 7 kilobytes
+ for small objects.
+*/
+
+ /* Type declarations */
+
+#ifndef OF /* function prototypes */
+# ifdef STDC
+# define OF(args) args
+# else
+# define OF(args) ()
+# endif
+#endif
+
+#ifndef Z_ARG /* function prototypes for stdarg */
+# if defined(STDC) || defined(Z_HAVE_STDARG_H)
+# define Z_ARG(args) args
+# else
+# define Z_ARG(args) ()
+# endif
+#endif
+
+/* The following definitions for FAR are needed only for MSDOS mixed
+ * model programming (small or medium model with some far allocations).
+ * This was tested only with MSC; for other MSDOS compilers you may have
+ * to define NO_MEMCPY in zutil.h. If you don't need the mixed model,
+ * just define FAR to be empty.
+ */
+#ifdef SYS16BIT
+# if defined(M_I86SM) || defined(M_I86MM)
+ /* MSC small or medium model */
+# define SMALL_MEDIUM
+# ifdef _MSC_VER
+# define FAR _far
+# else
+# define FAR far
+# endif
+# endif
+# if (defined(__SMALL__) || defined(__MEDIUM__))
+ /* Turbo C small or medium model */
+# define SMALL_MEDIUM
+# ifdef __BORLANDC__
+# define FAR _far
+# else
+# define FAR far
+# endif
+# endif
+#endif
+
+#if defined(WINDOWS) || defined(WIN32)
+ /* If building or using zlib as a DLL, define ZLIB_DLL.
+ * This is not mandatory, but it offers a little performance increase.
+ */
+# ifdef ZLIB_DLL
+# if defined(WIN32) && (!defined(__BORLANDC__) || (__BORLANDC__ >= 0x500))
+# ifdef ZLIB_INTERNAL
+# define ZEXTERN extern __declspec(dllexport)
+# else
+# define ZEXTERN extern __declspec(dllimport)
+# endif
+# endif
+# endif /* ZLIB_DLL */
+ /* If building or using zlib with the WINAPI/WINAPIV calling convention,
+ * define ZLIB_WINAPI.
+ * Caution: the standard ZLIB1.DLL is NOT compiled using ZLIB_WINAPI.
+ */
+# ifdef ZLIB_WINAPI
+# ifdef FAR
+# undef FAR
+# endif
+# include <windows.h>
+ /* No need for _export, use ZLIB.DEF instead. */
+ /* For complete Windows compatibility, use WINAPI, not __stdcall. */
+# define ZEXPORT WINAPI
+# ifdef WIN32
+# define ZEXPORTVA WINAPIV
+# else
+# define ZEXPORTVA FAR CDECL
+# endif
+# endif
+#endif
+
+#if defined (__BEOS__)
+# ifdef ZLIB_DLL
+# ifdef ZLIB_INTERNAL
+# define ZEXPORT __declspec(dllexport)
+# define ZEXPORTVA __declspec(dllexport)
+# else
+# define ZEXPORT __declspec(dllimport)
+# define ZEXPORTVA __declspec(dllimport)
+# endif
+# endif
+#endif
+
+#ifndef ZEXTERN
+# define ZEXTERN extern
+#endif
+#ifndef ZEXPORT
+# define ZEXPORT
+#endif
+#ifndef ZEXPORTVA
+# define ZEXPORTVA
+#endif
+
+#ifndef FAR
+# define FAR
+#endif
+
+#if !defined(__MACTYPES__)
+typedef unsigned char Byte; /* 8 bits */
+#endif
+typedef unsigned int uInt; /* 16 bits or more */
+typedef unsigned long uLong; /* 32 bits or more */
+
+#ifdef SMALL_MEDIUM
+ /* Borland C/C++ and some old MSC versions ignore FAR inside typedef */
+# define Bytef Byte FAR
+#else
+ typedef Byte FAR Bytef;
+#endif
+typedef char FAR charf;
+typedef int FAR intf;
+typedef uInt FAR uIntf;
+typedef uLong FAR uLongf;
+
+#ifdef STDC
+ typedef void const *voidpc;
+ typedef void FAR *voidpf;
+ typedef void *voidp;
+#else
+ typedef Byte const *voidpc;
+ typedef Byte FAR *voidpf;
+ typedef Byte *voidp;
+#endif
+
+#if !defined(Z_U4) && !defined(Z_SOLO) && defined(STDC)
+# include <limits.h>
+# if (UINT_MAX == 0xffffffffUL)
+# define Z_U4 unsigned
+# elif (ULONG_MAX == 0xffffffffUL)
+# define Z_U4 unsigned long
+# elif (USHRT_MAX == 0xffffffffUL)
+# define Z_U4 unsigned short
+# endif
+#endif
+
+#ifdef Z_U4
+ typedef Z_U4 z_crc_t;
+#else
+ typedef unsigned long z_crc_t;
+#endif
+
+#ifdef HAVE_UNISTD_H /* may be set to #if 1 by ./configure */
+# define Z_HAVE_UNISTD_H
+#endif
+
+#ifdef HAVE_STDARG_H /* may be set to #if 1 by ./configure */
+# define Z_HAVE_STDARG_H
+#endif
+
+#ifdef STDC
+# ifndef Z_SOLO
+# include <sys/types.h> /* for off_t */
+# endif
+#endif
+
+#if defined(STDC) || defined(Z_HAVE_STDARG_H)
+# ifndef Z_SOLO
+# include <stdarg.h> /* for va_list */
+# endif
+#endif
+
+#ifdef _WIN32
+# ifndef Z_SOLO
+# include <stddef.h> /* for wchar_t */
+# endif
+#endif
+
+/* a little trick to accommodate both "#define _LARGEFILE64_SOURCE" and
+ * "#define _LARGEFILE64_SOURCE 1" as requesting 64-bit operations, (even
+ * though the former does not conform to the LFS document), but considering
+ * both "#undef _LARGEFILE64_SOURCE" and "#define _LARGEFILE64_SOURCE 0" as
+ * equivalently requesting no 64-bit operations
+ */
+#if defined(_LARGEFILE64_SOURCE) && -_LARGEFILE64_SOURCE - -1 == 1
+# undef _LARGEFILE64_SOURCE
+#endif
+
+#if defined(__WATCOMC__) && !defined(Z_HAVE_UNISTD_H)
+# define Z_HAVE_UNISTD_H
+#endif
+#ifndef Z_SOLO
+# if defined(Z_HAVE_UNISTD_H) || defined(_LARGEFILE64_SOURCE)
+# include <unistd.h> /* for SEEK_*, off_t, and _LFS64_LARGEFILE */
+# ifdef VMS
+# include <unixio.h> /* for off_t */
+# endif
+# ifndef z_off_t
+# define z_off_t off_t
+# endif
+# endif
+#endif
+
+#if defined(_LFS64_LARGEFILE) && _LFS64_LARGEFILE-0
+# define Z_LFS64
+#endif
+
+#if defined(_LARGEFILE64_SOURCE) && defined(Z_LFS64)
+# define Z_LARGE64
+#endif
+
+#if defined(_FILE_OFFSET_BITS) && _FILE_OFFSET_BITS-0 == 64 && defined(Z_LFS64)
+# define Z_WANT64
+#endif
+
+#if !defined(SEEK_SET) && !defined(Z_SOLO)
+# define SEEK_SET 0 /* Seek from beginning of file. */
+# define SEEK_CUR 1 /* Seek from current position. */
+# define SEEK_END 2 /* Set file pointer to EOF plus "offset" */
+#endif
+
+#ifndef z_off_t
+# define z_off_t long
+#endif
+
+#if !defined(_WIN32) && defined(Z_LARGE64)
+# define z_off64_t off64_t
+#else
+# if defined(_WIN32) && !defined(__GNUC__) && !defined(Z_SOLO)
+# define z_off64_t __int64
+# else
+# define z_off64_t z_off_t
+# endif
+#endif
+
+/* MVS linker does not support external names larger than 8 bytes */
+#if defined(__MVS__)
+ #pragma map(deflateInit_,"DEIN")
+ #pragma map(deflateInit2_,"DEIN2")
+ #pragma map(deflateEnd,"DEEND")
+ #pragma map(deflateBound,"DEBND")
+ #pragma map(inflateInit_,"ININ")
+ #pragma map(inflateInit2_,"ININ2")
+ #pragma map(inflateEnd,"INEND")
+ #pragma map(inflateSync,"INSY")
+ #pragma map(inflateSetDictionary,"INSEDI")
+ #pragma map(compressBound,"CMBND")
+ #pragma map(inflate_table,"INTABL")
+ #pragma map(inflate_fast,"INFA")
+ #pragma map(inflate_copyright,"INCOPY")
+#endif
+
+#endif /* ZCONF_H */
diff --git a/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/zlib.h b/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/zlib.h
new file mode 100644
index 00000000..f09cdaf1
--- /dev/null
+++ b/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/zlib.h
@@ -0,0 +1,1912 @@
+/* zlib.h -- interface of the 'zlib' general purpose compression library
+ version 1.2.11, January 15th, 2017
+
+ Copyright (C) 1995-2017 Jean-loup Gailly and Mark Adler
+
+ This software is provided 'as-is', without any express or implied
+ warranty. In no event will the authors be held liable for any damages
+ arising from the use of this software.
+
+ Permission is granted to anyone to use this software for any purpose,
+ including commercial applications, and to alter it and redistribute it
+ freely, subject to the following restrictions:
+
+ 1. The origin of this software must not be misrepresented; you must not
+ claim that you wrote the original software. If you use this software
+ in a product, an acknowledgment in the product documentation would be
+ appreciated but is not required.
+ 2. Altered source versions must be plainly marked as such, and must not be
+ misrepresented as being the original software.
+ 3. This notice may not be removed or altered from any source distribution.
+
+ Jean-loup Gailly Mark Adler
+ jloup@gzip.org madler@alumni.caltech.edu
+
+
+ The data format used by the zlib library is described by RFCs (Request for
+ Comments) 1950 to 1952 in the files http://tools.ietf.org/html/rfc1950
+ (zlib format), rfc1951 (deflate format) and rfc1952 (gzip format).
+*/
+
+#ifndef ZLIB_H
+#define ZLIB_H
+
+#include "zconf.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define ZLIB_VERSION "1.2.11"
+#define ZLIB_VERNUM 0x12b0
+#define ZLIB_VER_MAJOR 1
+#define ZLIB_VER_MINOR 2
+#define ZLIB_VER_REVISION 11
+#define ZLIB_VER_SUBREVISION 0
+
+/*
+ The 'zlib' compression library provides in-memory compression and
+ decompression functions, including integrity checks of the uncompressed data.
+ This version of the library supports only one compression method (deflation)
+ but other algorithms will be added later and will have the same stream
+ interface.
+
+ Compression can be done in a single step if the buffers are large enough,
+ or can be done by repeated calls of the compression function. In the latter
+ case, the application must provide more input and/or consume the output
+ (providing more output space) before each call.
+
+ The compressed data format used by default by the in-memory functions is
+ the zlib format, which is a zlib wrapper documented in RFC 1950, wrapped
+ around a deflate stream, which is itself documented in RFC 1951.
+
+ The library also supports reading and writing files in gzip (.gz) format
+ with an interface similar to that of stdio using the functions that start
+ with "gz". The gzip format is different from the zlib format. gzip is a
+ gzip wrapper, documented in RFC 1952, wrapped around a deflate stream.
+
+ This library can optionally read and write gzip and raw deflate streams in
+ memory as well.
+
+ The zlib format was designed to be compact and fast for use in memory
+ and on communications channels. The gzip format was designed for single-
+ file compression on file systems, has a larger header than zlib to maintain
+ directory information, and uses a different, slower check method than zlib.
+
+ The library does not install any signal handler. The decoder checks
+ the consistency of the compressed data, so the library should never crash
+ even in the case of corrupted input.
+*/
+
+typedef voidpf (*alloc_func) OF((voidpf opaque, uInt items, uInt size));
+typedef void (*free_func) OF((voidpf opaque, voidpf address));
+
+struct internal_state;
+
+typedef struct z_stream_s {
+ z_const Bytef *next_in; /* next input byte */
+ uInt avail_in; /* number of bytes available at next_in */
+ uLong total_in; /* total number of input bytes read so far */
+
+ Bytef *next_out; /* next output byte will go here */
+ uInt avail_out; /* remaining free space at next_out */
+ uLong total_out; /* total number of bytes output so far */
+
+ z_const char *msg; /* last error message, NULL if no error */
+ struct internal_state FAR *state; /* not visible by applications */
+
+ alloc_func zalloc; /* used to allocate the internal state */
+ free_func zfree; /* used to free the internal state */
+ voidpf opaque; /* private data object passed to zalloc and zfree */
+
+ int data_type; /* best guess about the data type: binary or text
+ for deflate, or the decoding state for inflate */
+ uLong adler; /* Adler-32 or CRC-32 value of the uncompressed data */
+ uLong reserved; /* reserved for future use */
+} z_stream;
+
+typedef z_stream FAR *z_streamp;
+
+/*
+ gzip header information passed to and from zlib routines. See RFC 1952
+ for more details on the meanings of these fields.
+*/
+typedef struct gz_header_s {
+ int text; /* true if compressed data believed to be text */
+ uLong time; /* modification time */
+ int xflags; /* extra flags (not used when writing a gzip file) */
+ int os; /* operating system */
+ Bytef *extra; /* pointer to extra field or Z_NULL if none */
+ uInt extra_len; /* extra field length (valid if extra != Z_NULL) */
+ uInt extra_max; /* space at extra (only when reading header) */
+ Bytef *name; /* pointer to zero-terminated file name or Z_NULL */
+ uInt name_max; /* space at name (only when reading header) */
+ Bytef *comment; /* pointer to zero-terminated comment or Z_NULL */
+ uInt comm_max; /* space at comment (only when reading header) */
+ int hcrc; /* true if there was or will be a header crc */
+ int done; /* true when done reading gzip header (not used
+ when writing a gzip file) */
+} gz_header;
+
+typedef gz_header FAR *gz_headerp;
+
+/*
+ The application must update next_in and avail_in when avail_in has dropped
+ to zero. It must update next_out and avail_out when avail_out has dropped
+ to zero. The application must initialize zalloc, zfree and opaque before
+ calling the init function. All other fields are set by the compression
+ library and must not be updated by the application.
+
+ The opaque value provided by the application will be passed as the first
+ parameter for calls of zalloc and zfree. This can be useful for custom
+ memory management. The compression library attaches no meaning to the
+ opaque value.
+
+ zalloc must return Z_NULL if there is not enough memory for the object.
+ If zlib is used in a multi-threaded application, zalloc and zfree must be
+ thread safe. In that case, zlib is thread-safe. When zalloc and zfree are
+ Z_NULL on entry to the initialization function, they are set to internal
+ routines that use the standard library functions malloc() and free().
+
+ On 16-bit systems, the functions zalloc and zfree must be able to allocate
+ exactly 65536 bytes, but will not be required to allocate more than this if
+ the symbol MAXSEG_64K is defined (see zconf.h). WARNING: On MSDOS, pointers
+ returned by zalloc for objects of exactly 65536 bytes *must* have their
+ offset normalized to zero. The default allocation function provided by this
+ library ensures this (see zutil.c). To reduce memory requirements and avoid
+ any allocation of 64K objects, at the expense of compression ratio, compile
+ the library with -DMAX_WBITS=14 (see zconf.h).
+
+ The fields total_in and total_out can be used for statistics or progress
+ reports. After compression, total_in holds the total size of the
+ uncompressed data and may be saved for use by the decompressor (particularly
+ if the decompressor wants to decompress everything in a single step).
+*/
+
+ /* constants */
+
+#define Z_NO_FLUSH 0
+#define Z_PARTIAL_FLUSH 1
+#define Z_SYNC_FLUSH 2
+#define Z_FULL_FLUSH 3
+#define Z_FINISH 4
+#define Z_BLOCK 5
+#define Z_TREES 6
+/* Allowed flush values; see deflate() and inflate() below for details */
+
+#define Z_OK 0
+#define Z_STREAM_END 1
+#define Z_NEED_DICT 2
+#define Z_ERRNO (-1)
+#define Z_STREAM_ERROR (-2)
+#define Z_DATA_ERROR (-3)
+#define Z_MEM_ERROR (-4)
+#define Z_BUF_ERROR (-5)
+#define Z_VERSION_ERROR (-6)
+/* Return codes for the compression/decompression functions. Negative values
+ * are errors, positive values are used for special but normal events.
+ */
+
+#define Z_NO_COMPRESSION 0
+#define Z_BEST_SPEED 1
+#define Z_BEST_COMPRESSION 9
+#define Z_DEFAULT_COMPRESSION (-1)
+/* compression levels */
+
+#define Z_FILTERED 1
+#define Z_HUFFMAN_ONLY 2
+#define Z_RLE 3
+#define Z_FIXED 4
+#define Z_DEFAULT_STRATEGY 0
+/* compression strategy; see deflateInit2() below for details */
+
+#define Z_BINARY 0
+#define Z_TEXT 1
+#define Z_ASCII Z_TEXT /* for compatibility with 1.2.2 and earlier */
+#define Z_UNKNOWN 2
+/* Possible values of the data_type field for deflate() */
+
+#define Z_DEFLATED 8
+/* The deflate compression method (the only one supported in this version) */
+
+#define Z_NULL 0 /* for initializing zalloc, zfree, opaque */
+
+#define zlib_version zlibVersion()
+/* for compatibility with versions < 1.0.2 */
+
+
+ /* basic functions */
+
+ZEXTERN const char * ZEXPORT zlibVersion OF((void));
+/* The application can compare zlibVersion and ZLIB_VERSION for consistency.
+ If the first character differs, the library code actually used is not
+ compatible with the zlib.h header file used by the application. This check
+ is automatically made by deflateInit and inflateInit.
+ */
+
+/*
+ZEXTERN int ZEXPORT deflateInit OF((z_streamp strm, int level));
+
+ Initializes the internal stream state for compression. The fields
+ zalloc, zfree and opaque must be initialized before by the caller. If
+ zalloc and zfree are set to Z_NULL, deflateInit updates them to use default
+ allocation functions.
+
+ The compression level must be Z_DEFAULT_COMPRESSION, or between 0 and 9:
+ 1 gives best speed, 9 gives best compression, 0 gives no compression at all
+ (the input data is simply copied a block at a time). Z_DEFAULT_COMPRESSION
+ requests a default compromise between speed and compression (currently
+ equivalent to level 6).
+
+ deflateInit returns Z_OK if success, Z_MEM_ERROR if there was not enough
+ memory, Z_STREAM_ERROR if level is not a valid compression level, or
+ Z_VERSION_ERROR if the zlib library version (zlib_version) is incompatible
+ with the version assumed by the caller (ZLIB_VERSION). msg is set to null
+ if there is no error message. deflateInit does not perform any compression:
+ this will be done by deflate().
+*/
+
+
+ZEXTERN int ZEXPORT deflate OF((z_streamp strm, int flush));
+/*
+ deflate compresses as much data as possible, and stops when the input
+ buffer becomes empty or the output buffer becomes full. It may introduce
+ some output latency (reading input without producing any output) except when
+ forced to flush.
+
+ The detailed semantics are as follows. deflate performs one or both of the
+ following actions:
+
+ - Compress more input starting at next_in and update next_in and avail_in
+ accordingly. If not all input can be processed (because there is not
+ enough room in the output buffer), next_in and avail_in are updated and
+ processing will resume at this point for the next call of deflate().
+
+ - Generate more output starting at next_out and update next_out and avail_out
+ accordingly. This action is forced if the parameter flush is non zero.
+ Forcing flush frequently degrades the compression ratio, so this parameter
+ should be set only when necessary. Some output may be provided even if
+ flush is zero.
+
+ Before the call of deflate(), the application should ensure that at least
+ one of the actions is possible, by providing more input and/or consuming more
+ output, and updating avail_in or avail_out accordingly; avail_out should
+ never be zero before the call. The application can consume the compressed
+ output when it wants, for example when the output buffer is full (avail_out
+ == 0), or after each call of deflate(). If deflate returns Z_OK and with
+ zero avail_out, it must be called again after making room in the output
+ buffer because there might be more output pending. See deflatePending(),
+ which can be used if desired to determine whether or not there is more ouput
+ in that case.
+
+ Normally the parameter flush is set to Z_NO_FLUSH, which allows deflate to
+ decide how much data to accumulate before producing output, in order to
+ maximize compression.
+
+ If the parameter flush is set to Z_SYNC_FLUSH, all pending output is
+ flushed to the output buffer and the output is aligned on a byte boundary, so
+ that the decompressor can get all input data available so far. (In
+ particular avail_in is zero after the call if enough output space has been
+ provided before the call.) Flushing may degrade compression for some
+ compression algorithms and so it should be used only when necessary. This
+ completes the current deflate block and follows it with an empty stored block
+ that is three bits plus filler bits to the next byte, followed by four bytes
+ (00 00 ff ff).
+
+ If flush is set to Z_PARTIAL_FLUSH, all pending output is flushed to the
+ output buffer, but the output is not aligned to a byte boundary. All of the
+ input data so far will be available to the decompressor, as for Z_SYNC_FLUSH.
+ This completes the current deflate block and follows it with an empty fixed
+ codes block that is 10 bits long. This assures that enough bytes are output
+ in order for the decompressor to finish the block before the empty fixed
+ codes block.
+
+ If flush is set to Z_BLOCK, a deflate block is completed and emitted, as
+ for Z_SYNC_FLUSH, but the output is not aligned on a byte boundary, and up to
+ seven bits of the current block are held to be written as the next byte after
+ the next deflate block is completed. In this case, the decompressor may not
+ be provided enough bits at this point in order to complete decompression of
+ the data provided so far to the compressor. It may need to wait for the next
+ block to be emitted. This is for advanced applications that need to control
+ the emission of deflate blocks.
+
+ If flush is set to Z_FULL_FLUSH, all output is flushed as with
+ Z_SYNC_FLUSH, and the compression state is reset so that decompression can
+ restart from this point if previous compressed data has been damaged or if
+ random access is desired. Using Z_FULL_FLUSH too often can seriously degrade
+ compression.
+
+ If deflate returns with avail_out == 0, this function must be called again
+ with the same value of the flush parameter and more output space (updated
+ avail_out), until the flush is complete (deflate returns with non-zero
+ avail_out). In the case of a Z_FULL_FLUSH or Z_SYNC_FLUSH, make sure that
+ avail_out is greater than six to avoid repeated flush markers due to
+ avail_out == 0 on return.
+
+ If the parameter flush is set to Z_FINISH, pending input is processed,
+ pending output is flushed and deflate returns with Z_STREAM_END if there was
+ enough output space. If deflate returns with Z_OK or Z_BUF_ERROR, this
+ function must be called again with Z_FINISH and more output space (updated
+ avail_out) but no more input data, until it returns with Z_STREAM_END or an
+ error. After deflate has returned Z_STREAM_END, the only possible operations
+ on the stream are deflateReset or deflateEnd.
+
+ Z_FINISH can be used in the first deflate call after deflateInit if all the
+ compression is to be done in a single step. In order to complete in one
+ call, avail_out must be at least the value returned by deflateBound (see
+ below). Then deflate is guaranteed to return Z_STREAM_END. If not enough
+ output space is provided, deflate will not return Z_STREAM_END, and it must
+ be called again as described above.
+
+ deflate() sets strm->adler to the Adler-32 checksum of all input read
+ so far (that is, total_in bytes). If a gzip stream is being generated, then
+ strm->adler will be the CRC-32 checksum of the input read so far. (See
+ deflateInit2 below.)
+
+ deflate() may update strm->data_type if it can make a good guess about
+ the input data type (Z_BINARY or Z_TEXT). If in doubt, the data is
+ considered binary. This field is only for information purposes and does not
+ affect the compression algorithm in any manner.
+
+ deflate() returns Z_OK if some progress has been made (more input
+ processed or more output produced), Z_STREAM_END if all input has been
+ consumed and all output has been produced (only when flush is set to
+ Z_FINISH), Z_STREAM_ERROR if the stream state was inconsistent (for example
+ if next_in or next_out was Z_NULL or the state was inadvertently written over
+ by the application), or Z_BUF_ERROR if no progress is possible (for example
+ avail_in or avail_out was zero). Note that Z_BUF_ERROR is not fatal, and
+ deflate() can be called again with more input and more output space to
+ continue compressing.
+*/
+
+
+ZEXTERN int ZEXPORT deflateEnd OF((z_streamp strm));
+/*
+ All dynamically allocated data structures for this stream are freed.
+ This function discards any unprocessed input and does not flush any pending
+ output.
+
+ deflateEnd returns Z_OK if success, Z_STREAM_ERROR if the
+ stream state was inconsistent, Z_DATA_ERROR if the stream was freed
+ prematurely (some input or output was discarded). In the error case, msg
+ may be set but then points to a static string (which must not be
+ deallocated).
+*/
+
+
+/*
+ZEXTERN int ZEXPORT inflateInit OF((z_streamp strm));
+
+ Initializes the internal stream state for decompression. The fields
+ next_in, avail_in, zalloc, zfree and opaque must be initialized before by
+ the caller. In the current version of inflate, the provided input is not
+ read or consumed. The allocation of a sliding window will be deferred to
+ the first call of inflate (if the decompression does not complete on the
+ first call). If zalloc and zfree are set to Z_NULL, inflateInit updates
+ them to use default allocation functions.
+
+ inflateInit returns Z_OK if success, Z_MEM_ERROR if there was not enough
+ memory, Z_VERSION_ERROR if the zlib library version is incompatible with the
+ version assumed by the caller, or Z_STREAM_ERROR if the parameters are
+ invalid, such as a null pointer to the structure. msg is set to null if
+ there is no error message. inflateInit does not perform any decompression.
+ Actual decompression will be done by inflate(). So next_in, and avail_in,
+ next_out, and avail_out are unused and unchanged. The current
+ implementation of inflateInit() does not process any header information --
+ that is deferred until inflate() is called.
+*/
+
+
+ZEXTERN int ZEXPORT inflate OF((z_streamp strm, int flush));
+/*
+ inflate decompresses as much data as possible, and stops when the input
+ buffer becomes empty or the output buffer becomes full. It may introduce
+ some output latency (reading input without producing any output) except when
+ forced to flush.
+
+ The detailed semantics are as follows. inflate performs one or both of the
+ following actions:
+
+ - Decompress more input starting at next_in and update next_in and avail_in
+ accordingly. If not all input can be processed (because there is not
+ enough room in the output buffer), then next_in and avail_in are updated
+ accordingly, and processing will resume at this point for the next call of
+ inflate().
+
+ - Generate more output starting at next_out and update next_out and avail_out
+ accordingly. inflate() provides as much output as possible, until there is
+ no more input data or no more space in the output buffer (see below about
+ the flush parameter).
+
+ Before the call of inflate(), the application should ensure that at least
+ one of the actions is possible, by providing more input and/or consuming more
+ output, and updating the next_* and avail_* values accordingly. If the
+ caller of inflate() does not provide both available input and available
+ output space, it is possible that there will be no progress made. The
+ application can consume the uncompressed output when it wants, for example
+ when the output buffer is full (avail_out == 0), or after each call of
+ inflate(). If inflate returns Z_OK and with zero avail_out, it must be
+ called again after making room in the output buffer because there might be
+ more output pending.
+
+ The flush parameter of inflate() can be Z_NO_FLUSH, Z_SYNC_FLUSH, Z_FINISH,
+ Z_BLOCK, or Z_TREES. Z_SYNC_FLUSH requests that inflate() flush as much
+ output as possible to the output buffer. Z_BLOCK requests that inflate()
+ stop if and when it gets to the next deflate block boundary. When decoding
+ the zlib or gzip format, this will cause inflate() to return immediately
+ after the header and before the first block. When doing a raw inflate,
+ inflate() will go ahead and process the first block, and will return when it
+ gets to the end of that block, or when it runs out of data.
+
+ The Z_BLOCK option assists in appending to or combining deflate streams.
+ To assist in this, on return inflate() always sets strm->data_type to the
+ number of unused bits in the last byte taken from strm->next_in, plus 64 if
+ inflate() is currently decoding the last block in the deflate stream, plus
+ 128 if inflate() returned immediately after decoding an end-of-block code or
+ decoding the complete header up to just before the first byte of the deflate
+ stream. The end-of-block will not be indicated until all of the uncompressed
+ data from that block has been written to strm->next_out. The number of
+ unused bits may in general be greater than seven, except when bit 7 of
+ data_type is set, in which case the number of unused bits will be less than
+ eight. data_type is set as noted here every time inflate() returns for all
+ flush options, and so can be used to determine the amount of currently
+ consumed input in bits.
+
+ The Z_TREES option behaves as Z_BLOCK does, but it also returns when the
+ end of each deflate block header is reached, before any actual data in that
+ block is decoded. This allows the caller to determine the length of the
+ deflate block header for later use in random access within a deflate block.
+ 256 is added to the value of strm->data_type when inflate() returns
+ immediately after reaching the end of the deflate block header.
+
+ inflate() should normally be called until it returns Z_STREAM_END or an
+ error. However if all decompression is to be performed in a single step (a
+ single call of inflate), the parameter flush should be set to Z_FINISH. In
+ this case all pending input is processed and all pending output is flushed;
+ avail_out must be large enough to hold all of the uncompressed data for the
+ operation to complete. (The size of the uncompressed data may have been
+ saved by the compressor for this purpose.) The use of Z_FINISH is not
+ required to perform an inflation in one step. However it may be used to
+ inform inflate that a faster approach can be used for the single inflate()
+ call. Z_FINISH also informs inflate to not maintain a sliding window if the
+ stream completes, which reduces inflate's memory footprint. If the stream
+ does not complete, either because not all of the stream is provided or not
+ enough output space is provided, then a sliding window will be allocated and
+ inflate() can be called again to continue the operation as if Z_NO_FLUSH had
+ been used.
+
+ In this implementation, inflate() always flushes as much output as
+ possible to the output buffer, and always uses the faster approach on the
+ first call. So the effects of the flush parameter in this implementation are
+ on the return value of inflate() as noted below, when inflate() returns early
+ when Z_BLOCK or Z_TREES is used, and when inflate() avoids the allocation of
+ memory for a sliding window when Z_FINISH is used.
+
+ If a preset dictionary is needed after this call (see inflateSetDictionary
+ below), inflate sets strm->adler to the Adler-32 checksum of the dictionary
+ chosen by the compressor and returns Z_NEED_DICT; otherwise it sets
+ strm->adler to the Adler-32 checksum of all output produced so far (that is,
+ total_out bytes) and returns Z_OK, Z_STREAM_END or an error code as described
+ below. At the end of the stream, inflate() checks that its computed Adler-32
+ checksum is equal to that saved by the compressor and returns Z_STREAM_END
+ only if the checksum is correct.
+
+ inflate() can decompress and check either zlib-wrapped or gzip-wrapped
+ deflate data. The header type is detected automatically, if requested when
+ initializing with inflateInit2(). Any information contained in the gzip
+ header is not retained unless inflateGetHeader() is used. When processing
+ gzip-wrapped deflate data, strm->adler32 is set to the CRC-32 of the output
+ produced so far. The CRC-32 is checked against the gzip trailer, as is the
+ uncompressed length, modulo 2^32.
+
+ inflate() returns Z_OK if some progress has been made (more input processed
+ or more output produced), Z_STREAM_END if the end of the compressed data has
+ been reached and all uncompressed output has been produced, Z_NEED_DICT if a
+ preset dictionary is needed at this point, Z_DATA_ERROR if the input data was
+ corrupted (input stream not conforming to the zlib format or incorrect check
+ value, in which case strm->msg points to a string with a more specific
+ error), Z_STREAM_ERROR if the stream structure was inconsistent (for example
+ next_in or next_out was Z_NULL, or the state was inadvertently written over
+ by the application), Z_MEM_ERROR if there was not enough memory, Z_BUF_ERROR
+ if no progress was possible or if there was not enough room in the output
+ buffer when Z_FINISH is used. Note that Z_BUF_ERROR is not fatal, and
+ inflate() can be called again with more input and more output space to
+ continue decompressing. If Z_DATA_ERROR is returned, the application may
+ then call inflateSync() to look for a good compression block if a partial
+ recovery of the data is to be attempted.
+*/
+
+
+ZEXTERN int ZEXPORT inflateEnd OF((z_streamp strm));
+/*
+ All dynamically allocated data structures for this stream are freed.
+ This function discards any unprocessed input and does not flush any pending
+ output.
+
+ inflateEnd returns Z_OK if success, or Z_STREAM_ERROR if the stream state
+ was inconsistent.
+*/
+
+
+ /* Advanced functions */
+
+/*
+ The following functions are needed only in some special applications.
+*/
+
+/*
+ZEXTERN int ZEXPORT deflateInit2 OF((z_streamp strm,
+ int level,
+ int method,
+ int windowBits,
+ int memLevel,
+ int strategy));
+
+ This is another version of deflateInit with more compression options. The
+ fields next_in, zalloc, zfree and opaque must be initialized before by the
+ caller.
+
+ The method parameter is the compression method. It must be Z_DEFLATED in
+ this version of the library.
+
+ The windowBits parameter is the base two logarithm of the window size
+ (the size of the history buffer). It should be in the range 8..15 for this
+ version of the library. Larger values of this parameter result in better
+ compression at the expense of memory usage. The default value is 15 if
+ deflateInit is used instead.
+
+ For the current implementation of deflate(), a windowBits value of 8 (a
+ window size of 256 bytes) is not supported. As a result, a request for 8
+ will result in 9 (a 512-byte window). In that case, providing 8 to
+ inflateInit2() will result in an error when the zlib header with 9 is
+ checked against the initialization of inflate(). The remedy is to not use 8
+ with deflateInit2() with this initialization, or at least in that case use 9
+ with inflateInit2().
+
+ windowBits can also be -8..-15 for raw deflate. In this case, -windowBits
+ determines the window size. deflate() will then generate raw deflate data
+ with no zlib header or trailer, and will not compute a check value.
+
+ windowBits can also be greater than 15 for optional gzip encoding. Add
+ 16 to windowBits to write a simple gzip header and trailer around the
+ compressed data instead of a zlib wrapper. The gzip header will have no
+ file name, no extra data, no comment, no modification time (set to zero), no
+ header crc, and the operating system will be set to the appropriate value,
+ if the operating system was determined at compile time. If a gzip stream is
+ being written, strm->adler is a CRC-32 instead of an Adler-32.
+
+ For raw deflate or gzip encoding, a request for a 256-byte window is
+ rejected as invalid, since only the zlib header provides a means of
+ transmitting the window size to the decompressor.
+
+ The memLevel parameter specifies how much memory should be allocated
+ for the internal compression state. memLevel=1 uses minimum memory but is
+ slow and reduces compression ratio; memLevel=9 uses maximum memory for
+ optimal speed. The default value is 8. See zconf.h for total memory usage
+ as a function of windowBits and memLevel.
+
+ The strategy parameter is used to tune the compression algorithm. Use the
+ value Z_DEFAULT_STRATEGY for normal data, Z_FILTERED for data produced by a
+ filter (or predictor), Z_HUFFMAN_ONLY to force Huffman encoding only (no
+ string match), or Z_RLE to limit match distances to one (run-length
+ encoding). Filtered data consists mostly of small values with a somewhat
+ random distribution. In this case, the compression algorithm is tuned to
+ compress them better. The effect of Z_FILTERED is to force more Huffman
+ coding and less string matching; it is somewhat intermediate between
+ Z_DEFAULT_STRATEGY and Z_HUFFMAN_ONLY. Z_RLE is designed to be almost as
+ fast as Z_HUFFMAN_ONLY, but give better compression for PNG image data. The
+ strategy parameter only affects the compression ratio but not the
+ correctness of the compressed output even if it is not set appropriately.
+ Z_FIXED prevents the use of dynamic Huffman codes, allowing for a simpler
+ decoder for special applications.
+
+ deflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was not enough
+ memory, Z_STREAM_ERROR if any parameter is invalid (such as an invalid
+ method), or Z_VERSION_ERROR if the zlib library version (zlib_version) is
+ incompatible with the version assumed by the caller (ZLIB_VERSION). msg is
+ set to null if there is no error message. deflateInit2 does not perform any
+ compression: this will be done by deflate().
+*/
+
+ZEXTERN int ZEXPORT deflateSetDictionary OF((z_streamp strm,
+ const Bytef *dictionary,
+ uInt dictLength));
+/*
+ Initializes the compression dictionary from the given byte sequence
+ without producing any compressed output. When using the zlib format, this
+ function must be called immediately after deflateInit, deflateInit2 or
+ deflateReset, and before any call of deflate. When doing raw deflate, this
+ function must be called either before any call of deflate, or immediately
+ after the completion of a deflate block, i.e. after all input has been
+ consumed and all output has been delivered when using any of the flush
+ options Z_BLOCK, Z_PARTIAL_FLUSH, Z_SYNC_FLUSH, or Z_FULL_FLUSH. The
+ compressor and decompressor must use exactly the same dictionary (see
+ inflateSetDictionary).
+
+ The dictionary should consist of strings (byte sequences) that are likely
+ to be encountered later in the data to be compressed, with the most commonly
+ used strings preferably put towards the end of the dictionary. Using a
+ dictionary is most useful when the data to be compressed is short and can be
+ predicted with good accuracy; the data can then be compressed better than
+ with the default empty dictionary.
+
+ Depending on the size of the compression data structures selected by
+ deflateInit or deflateInit2, a part of the dictionary may in effect be
+ discarded, for example if the dictionary is larger than the window size
+ provided in deflateInit or deflateInit2. Thus the strings most likely to be
+ useful should be put at the end of the dictionary, not at the front. In
+ addition, the current implementation of deflate will use at most the window
+ size minus 262 bytes of the provided dictionary.
+
+ Upon return of this function, strm->adler is set to the Adler-32 value
+ of the dictionary; the decompressor may later use this value to determine
+ which dictionary has been used by the compressor. (The Adler-32 value
+ applies to the whole dictionary even if only a subset of the dictionary is
+ actually used by the compressor.) If a raw deflate was requested, then the
+ Adler-32 value is not computed and strm->adler is not set.
+
+ deflateSetDictionary returns Z_OK if success, or Z_STREAM_ERROR if a
+ parameter is invalid (e.g. dictionary being Z_NULL) or the stream state is
+ inconsistent (for example if deflate has already been called for this stream
+ or if not at a block boundary for raw deflate). deflateSetDictionary does
+ not perform any compression: this will be done by deflate().
+*/
+
+ZEXTERN int ZEXPORT deflateGetDictionary OF((z_streamp strm,
+ Bytef *dictionary,
+ uInt *dictLength));
+/*
+ Returns the sliding dictionary being maintained by deflate. dictLength is
+ set to the number of bytes in the dictionary, and that many bytes are copied
+ to dictionary. dictionary must have enough space, where 32768 bytes is
+ always enough. If deflateGetDictionary() is called with dictionary equal to
+ Z_NULL, then only the dictionary length is returned, and nothing is copied.
+ Similary, if dictLength is Z_NULL, then it is not set.
+
+ deflateGetDictionary() may return a length less than the window size, even
+ when more than the window size in input has been provided. It may return up
+ to 258 bytes less in that case, due to how zlib's implementation of deflate
+ manages the sliding window and lookahead for matches, where matches can be
+ up to 258 bytes long. If the application needs the last window-size bytes of
+ input, then that would need to be saved by the application outside of zlib.
+
+ deflateGetDictionary returns Z_OK on success, or Z_STREAM_ERROR if the
+ stream state is inconsistent.
+*/
+
+ZEXTERN int ZEXPORT deflateCopy OF((z_streamp dest,
+ z_streamp source));
+/*
+ Sets the destination stream as a complete copy of the source stream.
+
+ This function can be useful when several compression strategies will be
+ tried, for example when there are several ways of pre-processing the input
+ data with a filter. The streams that will be discarded should then be freed
+ by calling deflateEnd. Note that deflateCopy duplicates the internal
+ compression state which can be quite large, so this strategy is slow and can
+ consume lots of memory.
+
+ deflateCopy returns Z_OK if success, Z_MEM_ERROR if there was not
+ enough memory, Z_STREAM_ERROR if the source stream state was inconsistent
+ (such as zalloc being Z_NULL). msg is left unchanged in both source and
+ destination.
+*/
+
+ZEXTERN int ZEXPORT deflateReset OF((z_streamp strm));
+/*
+ This function is equivalent to deflateEnd followed by deflateInit, but
+ does not free and reallocate the internal compression state. The stream
+ will leave the compression level and any other attributes that may have been
+ set unchanged.
+
+ deflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source
+ stream state was inconsistent (such as zalloc or state being Z_NULL).
+*/
+
+ZEXTERN int ZEXPORT deflateParams OF((z_streamp strm,
+ int level,
+ int strategy));
+/*
+ Dynamically update the compression level and compression strategy. The
+ interpretation of level and strategy is as in deflateInit2(). This can be
+ used to switch between compression and straight copy of the input data, or
+ to switch to a different kind of input data requiring a different strategy.
+ If the compression approach (which is a function of the level) or the
+ strategy is changed, and if any input has been consumed in a previous
+ deflate() call, then the input available so far is compressed with the old
+ level and strategy using deflate(strm, Z_BLOCK). There are three approaches
+ for the compression levels 0, 1..3, and 4..9 respectively. The new level
+ and strategy will take effect at the next call of deflate().
+
+ If a deflate(strm, Z_BLOCK) is performed by deflateParams(), and it does
+ not have enough output space to complete, then the parameter change will not
+ take effect. In this case, deflateParams() can be called again with the
+ same parameters and more output space to try again.
+
+ In order to assure a change in the parameters on the first try, the
+ deflate stream should be flushed using deflate() with Z_BLOCK or other flush
+ request until strm.avail_out is not zero, before calling deflateParams().
+ Then no more input data should be provided before the deflateParams() call.
+ If this is done, the old level and strategy will be applied to the data
+ compressed before deflateParams(), and the new level and strategy will be
+ applied to the the data compressed after deflateParams().
+
+ deflateParams returns Z_OK on success, Z_STREAM_ERROR if the source stream
+ state was inconsistent or if a parameter was invalid, or Z_BUF_ERROR if
+ there was not enough output space to complete the compression of the
+ available input data before a change in the strategy or approach. Note that
+ in the case of a Z_BUF_ERROR, the parameters are not changed. A return
+ value of Z_BUF_ERROR is not fatal, in which case deflateParams() can be
+ retried with more output space.
+*/
+
+ZEXTERN int ZEXPORT deflateTune OF((z_streamp strm,
+ int good_length,
+ int max_lazy,
+ int nice_length,
+ int max_chain));
+/*
+ Fine tune deflate's internal compression parameters. This should only be
+ used by someone who understands the algorithm used by zlib's deflate for
+ searching for the best matching string, and even then only by the most
+ fanatic optimizer trying to squeeze out the last compressed bit for their
+ specific input data. Read the deflate.c source code for the meaning of the
+ max_lazy, good_length, nice_length, and max_chain parameters.
+
+ deflateTune() can be called after deflateInit() or deflateInit2(), and
+ returns Z_OK on success, or Z_STREAM_ERROR for an invalid deflate stream.
+ */
+
+ZEXTERN uLong ZEXPORT deflateBound OF((z_streamp strm,
+ uLong sourceLen));
+/*
+ deflateBound() returns an upper bound on the compressed size after
+ deflation of sourceLen bytes. It must be called after deflateInit() or
+ deflateInit2(), and after deflateSetHeader(), if used. This would be used
+ to allocate an output buffer for deflation in a single pass, and so would be
+ called before deflate(). If that first deflate() call is provided the
+ sourceLen input bytes, an output buffer allocated to the size returned by
+ deflateBound(), and the flush value Z_FINISH, then deflate() is guaranteed
+ to return Z_STREAM_END. Note that it is possible for the compressed size to
+ be larger than the value returned by deflateBound() if flush options other
+ than Z_FINISH or Z_NO_FLUSH are used.
+*/
+
+ZEXTERN int ZEXPORT deflatePending OF((z_streamp strm,
+ unsigned *pending,
+ int *bits));
+/*
+ deflatePending() returns the number of bytes and bits of output that have
+ been generated, but not yet provided in the available output. The bytes not
+ provided would be due to the available output space having being consumed.
+ The number of bits of output not provided are between 0 and 7, where they
+ await more bits to join them in order to fill out a full byte. If pending
+ or bits are Z_NULL, then those values are not set.
+
+ deflatePending returns Z_OK if success, or Z_STREAM_ERROR if the source
+ stream state was inconsistent.
+ */
+
+ZEXTERN int ZEXPORT deflatePrime OF((z_streamp strm,
+ int bits,
+ int value));
+/*
+ deflatePrime() inserts bits in the deflate output stream. The intent
+ is that this function is used to start off the deflate output with the bits
+ leftover from a previous deflate stream when appending to it. As such, this
+ function can only be used for raw deflate, and must be used before the first
+ deflate() call after a deflateInit2() or deflateReset(). bits must be less
+ than or equal to 16, and that many of the least significant bits of value
+ will be inserted in the output.
+
+ deflatePrime returns Z_OK if success, Z_BUF_ERROR if there was not enough
+ room in the internal buffer to insert the bits, or Z_STREAM_ERROR if the
+ source stream state was inconsistent.
+*/
+
+ZEXTERN int ZEXPORT deflateSetHeader OF((z_streamp strm,
+ gz_headerp head));
+/*
+ deflateSetHeader() provides gzip header information for when a gzip
+ stream is requested by deflateInit2(). deflateSetHeader() may be called
+ after deflateInit2() or deflateReset() and before the first call of
+ deflate(). The text, time, os, extra field, name, and comment information
+ in the provided gz_header structure are written to the gzip header (xflag is
+ ignored -- the extra flags are set according to the compression level). The
+ caller must assure that, if not Z_NULL, name and comment are terminated with
+ a zero byte, and that if extra is not Z_NULL, that extra_len bytes are
+ available there. If hcrc is true, a gzip header crc is included. Note that
+ the current versions of the command-line version of gzip (up through version
+ 1.3.x) do not support header crc's, and will report that it is a "multi-part
+ gzip file" and give up.
+
+ If deflateSetHeader is not used, the default gzip header has text false,
+ the time set to zero, and os set to 255, with no extra, name, or comment
+ fields. The gzip header is returned to the default state by deflateReset().
+
+ deflateSetHeader returns Z_OK if success, or Z_STREAM_ERROR if the source
+ stream state was inconsistent.
+*/
+
+/*
+ZEXTERN int ZEXPORT inflateInit2 OF((z_streamp strm,
+ int windowBits));
+
+ This is another version of inflateInit with an extra parameter. The
+ fields next_in, avail_in, zalloc, zfree and opaque must be initialized
+ before by the caller.
+
+ The windowBits parameter is the base two logarithm of the maximum window
+ size (the size of the history buffer). It should be in the range 8..15 for
+ this version of the library. The default value is 15 if inflateInit is used
+ instead. windowBits must be greater than or equal to the windowBits value
+ provided to deflateInit2() while compressing, or it must be equal to 15 if
+ deflateInit2() was not used. If a compressed stream with a larger window
+ size is given as input, inflate() will return with the error code
+ Z_DATA_ERROR instead of trying to allocate a larger window.
+
+ windowBits can also be zero to request that inflate use the window size in
+ the zlib header of the compressed stream.
+
+ windowBits can also be -8..-15 for raw inflate. In this case, -windowBits
+ determines the window size. inflate() will then process raw deflate data,
+ not looking for a zlib or gzip header, not generating a check value, and not
+ looking for any check values for comparison at the end of the stream. This
+ is for use with other formats that use the deflate compressed data format
+ such as zip. Those formats provide their own check values. If a custom
+ format is developed using the raw deflate format for compressed data, it is
+ recommended that a check value such as an Adler-32 or a CRC-32 be applied to
+ the uncompressed data as is done in the zlib, gzip, and zip formats. For
+ most applications, the zlib format should be used as is. Note that comments
+ above on the use in deflateInit2() applies to the magnitude of windowBits.
+
+ windowBits can also be greater than 15 for optional gzip decoding. Add
+ 32 to windowBits to enable zlib and gzip decoding with automatic header
+ detection, or add 16 to decode only the gzip format (the zlib format will
+ return a Z_DATA_ERROR). If a gzip stream is being decoded, strm->adler is a
+ CRC-32 instead of an Adler-32. Unlike the gunzip utility and gzread() (see
+ below), inflate() will not automatically decode concatenated gzip streams.
+ inflate() will return Z_STREAM_END at the end of the gzip stream. The state
+ would need to be reset to continue decoding a subsequent gzip stream.
+
+ inflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was not enough
+ memory, Z_VERSION_ERROR if the zlib library version is incompatible with the
+ version assumed by the caller, or Z_STREAM_ERROR if the parameters are
+ invalid, such as a null pointer to the structure. msg is set to null if
+ there is no error message. inflateInit2 does not perform any decompression
+ apart from possibly reading the zlib header if present: actual decompression
+ will be done by inflate(). (So next_in and avail_in may be modified, but
+ next_out and avail_out are unused and unchanged.) The current implementation
+ of inflateInit2() does not process any header information -- that is
+ deferred until inflate() is called.
+*/
+
+ZEXTERN int ZEXPORT inflateSetDictionary OF((z_streamp strm,
+ const Bytef *dictionary,
+ uInt dictLength));
+/*
+ Initializes the decompression dictionary from the given uncompressed byte
+ sequence. This function must be called immediately after a call of inflate,
+ if that call returned Z_NEED_DICT. The dictionary chosen by the compressor
+ can be determined from the Adler-32 value returned by that call of inflate.
+ The compressor and decompressor must use exactly the same dictionary (see
+ deflateSetDictionary). For raw inflate, this function can be called at any
+ time to set the dictionary. If the provided dictionary is smaller than the
+ window and there is already data in the window, then the provided dictionary
+ will amend what's there. The application must insure that the dictionary
+ that was used for compression is provided.
+
+ inflateSetDictionary returns Z_OK if success, Z_STREAM_ERROR if a
+ parameter is invalid (e.g. dictionary being Z_NULL) or the stream state is
+ inconsistent, Z_DATA_ERROR if the given dictionary doesn't match the
+ expected one (incorrect Adler-32 value). inflateSetDictionary does not
+ perform any decompression: this will be done by subsequent calls of
+ inflate().
+*/
+
+ZEXTERN int ZEXPORT inflateGetDictionary OF((z_streamp strm,
+ Bytef *dictionary,
+ uInt *dictLength));
+/*
+ Returns the sliding dictionary being maintained by inflate. dictLength is
+ set to the number of bytes in the dictionary, and that many bytes are copied
+ to dictionary. dictionary must have enough space, where 32768 bytes is
+ always enough. If inflateGetDictionary() is called with dictionary equal to
+ Z_NULL, then only the dictionary length is returned, and nothing is copied.
+ Similary, if dictLength is Z_NULL, then it is not set.
+
+ inflateGetDictionary returns Z_OK on success, or Z_STREAM_ERROR if the
+ stream state is inconsistent.
+*/
+
+ZEXTERN int ZEXPORT inflateSync OF((z_streamp strm));
+/*
+ Skips invalid compressed data until a possible full flush point (see above
+ for the description of deflate with Z_FULL_FLUSH) can be found, or until all
+ available input is skipped. No output is provided.
+
+ inflateSync searches for a 00 00 FF FF pattern in the compressed data.
+ All full flush points have this pattern, but not all occurrences of this
+ pattern are full flush points.
+
+ inflateSync returns Z_OK if a possible full flush point has been found,
+ Z_BUF_ERROR if no more input was provided, Z_DATA_ERROR if no flush point
+ has been found, or Z_STREAM_ERROR if the stream structure was inconsistent.
+ In the success case, the application may save the current current value of
+ total_in which indicates where valid compressed data was found. In the
+ error case, the application may repeatedly call inflateSync, providing more
+ input each time, until success or end of the input data.
+*/
+
+ZEXTERN int ZEXPORT inflateCopy OF((z_streamp dest,
+ z_streamp source));
+/*
+ Sets the destination stream as a complete copy of the source stream.
+
+ This function can be useful when randomly accessing a large stream. The
+ first pass through the stream can periodically record the inflate state,
+ allowing restarting inflate at those points when randomly accessing the
+ stream.
+
+ inflateCopy returns Z_OK if success, Z_MEM_ERROR if there was not
+ enough memory, Z_STREAM_ERROR if the source stream state was inconsistent
+ (such as zalloc being Z_NULL). msg is left unchanged in both source and
+ destination.
+*/
+
+ZEXTERN int ZEXPORT inflateReset OF((z_streamp strm));
+/*
+ This function is equivalent to inflateEnd followed by inflateInit,
+ but does not free and reallocate the internal decompression state. The
+ stream will keep attributes that may have been set by inflateInit2.
+
+ inflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source
+ stream state was inconsistent (such as zalloc or state being Z_NULL).
+*/
+
+ZEXTERN int ZEXPORT inflateReset2 OF((z_streamp strm,
+ int windowBits));
+/*
+ This function is the same as inflateReset, but it also permits changing
+ the wrap and window size requests. The windowBits parameter is interpreted
+ the same as it is for inflateInit2. If the window size is changed, then the
+ memory allocated for the window is freed, and the window will be reallocated
+ by inflate() if needed.
+
+ inflateReset2 returns Z_OK if success, or Z_STREAM_ERROR if the source
+ stream state was inconsistent (such as zalloc or state being Z_NULL), or if
+ the windowBits parameter is invalid.
+*/
+
+ZEXTERN int ZEXPORT inflatePrime OF((z_streamp strm,
+ int bits,
+ int value));
+/*
+ This function inserts bits in the inflate input stream. The intent is
+ that this function is used to start inflating at a bit position in the
+ middle of a byte. The provided bits will be used before any bytes are used
+ from next_in. This function should only be used with raw inflate, and
+ should be used before the first inflate() call after inflateInit2() or
+ inflateReset(). bits must be less than or equal to 16, and that many of the
+ least significant bits of value will be inserted in the input.
+
+ If bits is negative, then the input stream bit buffer is emptied. Then
+ inflatePrime() can be called again to put bits in the buffer. This is used
+ to clear out bits leftover after feeding inflate a block description prior
+ to feeding inflate codes.
+
+ inflatePrime returns Z_OK if success, or Z_STREAM_ERROR if the source
+ stream state was inconsistent.
+*/
+
+ZEXTERN long ZEXPORT inflateMark OF((z_streamp strm));
+/*
+ This function returns two values, one in the lower 16 bits of the return
+ value, and the other in the remaining upper bits, obtained by shifting the
+ return value down 16 bits. If the upper value is -1 and the lower value is
+ zero, then inflate() is currently decoding information outside of a block.
+ If the upper value is -1 and the lower value is non-zero, then inflate is in
+ the middle of a stored block, with the lower value equaling the number of
+ bytes from the input remaining to copy. If the upper value is not -1, then
+ it is the number of bits back from the current bit position in the input of
+ the code (literal or length/distance pair) currently being processed. In
+ that case the lower value is the number of bytes already emitted for that
+ code.
+
+ A code is being processed if inflate is waiting for more input to complete
+ decoding of the code, or if it has completed decoding but is waiting for
+ more output space to write the literal or match data.
+
+ inflateMark() is used to mark locations in the input data for random
+ access, which may be at bit positions, and to note those cases where the
+ output of a code may span boundaries of random access blocks. The current
+ location in the input stream can be determined from avail_in and data_type
+ as noted in the description for the Z_BLOCK flush parameter for inflate.
+
+ inflateMark returns the value noted above, or -65536 if the provided
+ source stream state was inconsistent.
+*/
+
+ZEXTERN int ZEXPORT inflateGetHeader OF((z_streamp strm,
+ gz_headerp head));
+/*
+ inflateGetHeader() requests that gzip header information be stored in the
+ provided gz_header structure. inflateGetHeader() may be called after
+ inflateInit2() or inflateReset(), and before the first call of inflate().
+ As inflate() processes the gzip stream, head->done is zero until the header
+ is completed, at which time head->done is set to one. If a zlib stream is
+ being decoded, then head->done is set to -1 to indicate that there will be
+ no gzip header information forthcoming. Note that Z_BLOCK or Z_TREES can be
+ used to force inflate() to return immediately after header processing is
+ complete and before any actual data is decompressed.
+
+ The text, time, xflags, and os fields are filled in with the gzip header
+ contents. hcrc is set to true if there is a header CRC. (The header CRC
+ was valid if done is set to one.) If extra is not Z_NULL, then extra_max
+ contains the maximum number of bytes to write to extra. Once done is true,
+ extra_len contains the actual extra field length, and extra contains the
+ extra field, or that field truncated if extra_max is less than extra_len.
+ If name is not Z_NULL, then up to name_max characters are written there,
+ terminated with a zero unless the length is greater than name_max. If
+ comment is not Z_NULL, then up to comm_max characters are written there,
+ terminated with a zero unless the length is greater than comm_max. When any
+ of extra, name, or comment are not Z_NULL and the respective field is not
+ present in the header, then that field is set to Z_NULL to signal its
+ absence. This allows the use of deflateSetHeader() with the returned
+ structure to duplicate the header. However if those fields are set to
+ allocated memory, then the application will need to save those pointers
+ elsewhere so that they can be eventually freed.
+
+ If inflateGetHeader is not used, then the header information is simply
+ discarded. The header is always checked for validity, including the header
+ CRC if present. inflateReset() will reset the process to discard the header
+ information. The application would need to call inflateGetHeader() again to
+ retrieve the header from the next gzip stream.
+
+ inflateGetHeader returns Z_OK if success, or Z_STREAM_ERROR if the source
+ stream state was inconsistent.
+*/
+
+/*
+ZEXTERN int ZEXPORT inflateBackInit OF((z_streamp strm, int windowBits,
+ unsigned char FAR *window));
+
+ Initialize the internal stream state for decompression using inflateBack()
+ calls. The fields zalloc, zfree and opaque in strm must be initialized
+ before the call. If zalloc and zfree are Z_NULL, then the default library-
+ derived memory allocation routines are used. windowBits is the base two
+ logarithm of the window size, in the range 8..15. window is a caller
+ supplied buffer of that size. Except for special applications where it is
+ assured that deflate was used with small window sizes, windowBits must be 15
+ and a 32K byte window must be supplied to be able to decompress general
+ deflate streams.
+
+ See inflateBack() for the usage of these routines.
+
+ inflateBackInit will return Z_OK on success, Z_STREAM_ERROR if any of
+ the parameters are invalid, Z_MEM_ERROR if the internal state could not be
+ allocated, or Z_VERSION_ERROR if the version of the library does not match
+ the version of the header file.
+*/
+
+typedef unsigned (*in_func) OF((void FAR *,
+ z_const unsigned char FAR * FAR *));
+typedef int (*out_func) OF((void FAR *, unsigned char FAR *, unsigned));
+
+ZEXTERN int ZEXPORT inflateBack OF((z_streamp strm,
+ in_func in, void FAR *in_desc,
+ out_func out, void FAR *out_desc));
+/*
+ inflateBack() does a raw inflate with a single call using a call-back
+ interface for input and output. This is potentially more efficient than
+ inflate() for file i/o applications, in that it avoids copying between the
+ output and the sliding window by simply making the window itself the output
+ buffer. inflate() can be faster on modern CPUs when used with large
+ buffers. inflateBack() trusts the application to not change the output
+ buffer passed by the output function, at least until inflateBack() returns.
+
+ inflateBackInit() must be called first to allocate the internal state
+ and to initialize the state with the user-provided window buffer.
+ inflateBack() may then be used multiple times to inflate a complete, raw
+ deflate stream with each call. inflateBackEnd() is then called to free the
+ allocated state.
+
+ A raw deflate stream is one with no zlib or gzip header or trailer.
+ This routine would normally be used in a utility that reads zip or gzip
+ files and writes out uncompressed files. The utility would decode the
+ header and process the trailer on its own, hence this routine expects only
+ the raw deflate stream to decompress. This is different from the default
+ behavior of inflate(), which expects a zlib header and trailer around the
+ deflate stream.
+
+ inflateBack() uses two subroutines supplied by the caller that are then
+ called by inflateBack() for input and output. inflateBack() calls those
+ routines until it reads a complete deflate stream and writes out all of the
+ uncompressed data, or until it encounters an error. The function's
+ parameters and return types are defined above in the in_func and out_func
+ typedefs. inflateBack() will call in(in_desc, &buf) which should return the
+ number of bytes of provided input, and a pointer to that input in buf. If
+ there is no input available, in() must return zero -- buf is ignored in that
+ case -- and inflateBack() will return a buffer error. inflateBack() will
+ call out(out_desc, buf, len) to write the uncompressed data buf[0..len-1].
+ out() should return zero on success, or non-zero on failure. If out()
+ returns non-zero, inflateBack() will return with an error. Neither in() nor
+ out() are permitted to change the contents of the window provided to
+ inflateBackInit(), which is also the buffer that out() uses to write from.
+ The length written by out() will be at most the window size. Any non-zero
+ amount of input may be provided by in().
+
+ For convenience, inflateBack() can be provided input on the first call by
+ setting strm->next_in and strm->avail_in. If that input is exhausted, then
+ in() will be called. Therefore strm->next_in must be initialized before
+ calling inflateBack(). If strm->next_in is Z_NULL, then in() will be called
+ immediately for input. If strm->next_in is not Z_NULL, then strm->avail_in
+ must also be initialized, and then if strm->avail_in is not zero, input will
+ initially be taken from strm->next_in[0 .. strm->avail_in - 1].
+
+ The in_desc and out_desc parameters of inflateBack() is passed as the
+ first parameter of in() and out() respectively when they are called. These
+ descriptors can be optionally used to pass any information that the caller-
+ supplied in() and out() functions need to do their job.
+
+ On return, inflateBack() will set strm->next_in and strm->avail_in to
+ pass back any unused input that was provided by the last in() call. The
+ return values of inflateBack() can be Z_STREAM_END on success, Z_BUF_ERROR
+ if in() or out() returned an error, Z_DATA_ERROR if there was a format error
+ in the deflate stream (in which case strm->msg is set to indicate the nature
+ of the error), or Z_STREAM_ERROR if the stream was not properly initialized.
+ In the case of Z_BUF_ERROR, an input or output error can be distinguished
+ using strm->next_in which will be Z_NULL only if in() returned an error. If
+ strm->next_in is not Z_NULL, then the Z_BUF_ERROR was due to out() returning
+ non-zero. (in() will always be called before out(), so strm->next_in is
+ assured to be defined if out() returns non-zero.) Note that inflateBack()
+ cannot return Z_OK.
+*/
+
+ZEXTERN int ZEXPORT inflateBackEnd OF((z_streamp strm));
+/*
+ All memory allocated by inflateBackInit() is freed.
+
+ inflateBackEnd() returns Z_OK on success, or Z_STREAM_ERROR if the stream
+ state was inconsistent.
+*/
+
+ZEXTERN uLong ZEXPORT zlibCompileFlags OF((void));
+/* Return flags indicating compile-time options.
+
+ Type sizes, two bits each, 00 = 16 bits, 01 = 32, 10 = 64, 11 = other:
+ 1.0: size of uInt
+ 3.2: size of uLong
+ 5.4: size of voidpf (pointer)
+ 7.6: size of z_off_t
+
+ Compiler, assembler, and debug options:
+ 8: ZLIB_DEBUG
+ 9: ASMV or ASMINF -- use ASM code
+ 10: ZLIB_WINAPI -- exported functions use the WINAPI calling convention
+ 11: 0 (reserved)
+
+ One-time table building (smaller code, but not thread-safe if true):
+ 12: BUILDFIXED -- build static block decoding tables when needed
+ 13: DYNAMIC_CRC_TABLE -- build CRC calculation tables when needed
+ 14,15: 0 (reserved)
+
+ Library content (indicates missing functionality):
+ 16: NO_GZCOMPRESS -- gz* functions cannot compress (to avoid linking
+ deflate code when not needed)
+ 17: NO_GZIP -- deflate can't write gzip streams, and inflate can't detect
+ and decode gzip streams (to avoid linking crc code)
+ 18-19: 0 (reserved)
+
+ Operation variations (changes in library functionality):
+ 20: PKZIP_BUG_WORKAROUND -- slightly more permissive inflate
+ 21: FASTEST -- deflate algorithm with only one, lowest compression level
+ 22,23: 0 (reserved)
+
+ The sprintf variant used by gzprintf (zero is best):
+ 24: 0 = vs*, 1 = s* -- 1 means limited to 20 arguments after the format
+ 25: 0 = *nprintf, 1 = *printf -- 1 means gzprintf() not secure!
+ 26: 0 = returns value, 1 = void -- 1 means inferred string length returned
+
+ Remainder:
+ 27-31: 0 (reserved)
+ */
+
+#ifndef Z_SOLO
+
+ /* utility functions */
+
+/*
+ The following utility functions are implemented on top of the basic
+ stream-oriented functions. To simplify the interface, some default options
+ are assumed (compression level and memory usage, standard memory allocation
+ functions). The source code of these utility functions can be modified if
+ you need special options.
+*/
+
+ZEXTERN int ZEXPORT compress OF((Bytef *dest, uLongf *destLen,
+ const Bytef *source, uLong sourceLen));
+/*
+ Compresses the source buffer into the destination buffer. sourceLen is
+ the byte length of the source buffer. Upon entry, destLen is the total size
+ of the destination buffer, which must be at least the value returned by
+ compressBound(sourceLen). Upon exit, destLen is the actual size of the
+ compressed data. compress() is equivalent to compress2() with a level
+ parameter of Z_DEFAULT_COMPRESSION.
+
+ compress returns Z_OK if success, Z_MEM_ERROR if there was not
+ enough memory, Z_BUF_ERROR if there was not enough room in the output
+ buffer.
+*/
+
+ZEXTERN int ZEXPORT compress2 OF((Bytef *dest, uLongf *destLen,
+ const Bytef *source, uLong sourceLen,
+ int level));
+/*
+ Compresses the source buffer into the destination buffer. The level
+ parameter has the same meaning as in deflateInit. sourceLen is the byte
+ length of the source buffer. Upon entry, destLen is the total size of the
+ destination buffer, which must be at least the value returned by
+ compressBound(sourceLen). Upon exit, destLen is the actual size of the
+ compressed data.
+
+ compress2 returns Z_OK if success, Z_MEM_ERROR if there was not enough
+ memory, Z_BUF_ERROR if there was not enough room in the output buffer,
+ Z_STREAM_ERROR if the level parameter is invalid.
+*/
+
+ZEXTERN uLong ZEXPORT compressBound OF((uLong sourceLen));
+/*
+ compressBound() returns an upper bound on the compressed size after
+ compress() or compress2() on sourceLen bytes. It would be used before a
+ compress() or compress2() call to allocate the destination buffer.
+*/
+
+ZEXTERN int ZEXPORT uncompress OF((Bytef *dest, uLongf *destLen,
+ const Bytef *source, uLong sourceLen));
+/*
+ Decompresses the source buffer into the destination buffer. sourceLen is
+ the byte length of the source buffer. Upon entry, destLen is the total size
+ of the destination buffer, which must be large enough to hold the entire
+ uncompressed data. (The size of the uncompressed data must have been saved
+ previously by the compressor and transmitted to the decompressor by some
+ mechanism outside the scope of this compression library.) Upon exit, destLen
+ is the actual size of the uncompressed data.
+
+ uncompress returns Z_OK if success, Z_MEM_ERROR if there was not
+ enough memory, Z_BUF_ERROR if there was not enough room in the output
+ buffer, or Z_DATA_ERROR if the input data was corrupted or incomplete. In
+ the case where there is not enough room, uncompress() will fill the output
+ buffer with the uncompressed data up to that point.
+*/
+
+ZEXTERN int ZEXPORT uncompress2 OF((Bytef *dest, uLongf *destLen,
+ const Bytef *source, uLong *sourceLen));
+/*
+ Same as uncompress, except that sourceLen is a pointer, where the
+ length of the source is *sourceLen. On return, *sourceLen is the number of
+ source bytes consumed.
+*/
+
+ /* gzip file access functions */
+
+/*
+ This library supports reading and writing files in gzip (.gz) format with
+ an interface similar to that of stdio, using the functions that start with
+ "gz". The gzip format is different from the zlib format. gzip is a gzip
+ wrapper, documented in RFC 1952, wrapped around a deflate stream.
+*/
+
+typedef struct gzFile_s *gzFile; /* semi-opaque gzip file descriptor */
+
+/*
+ZEXTERN gzFile ZEXPORT gzopen OF((const char *path, const char *mode));
+
+ Opens a gzip (.gz) file for reading or writing. The mode parameter is as
+ in fopen ("rb" or "wb") but can also include a compression level ("wb9") or
+ a strategy: 'f' for filtered data as in "wb6f", 'h' for Huffman-only
+ compression as in "wb1h", 'R' for run-length encoding as in "wb1R", or 'F'
+ for fixed code compression as in "wb9F". (See the description of
+ deflateInit2 for more information about the strategy parameter.) 'T' will
+ request transparent writing or appending with no compression and not using
+ the gzip format.
+
+ "a" can be used instead of "w" to request that the gzip stream that will
+ be written be appended to the file. "+" will result in an error, since
+ reading and writing to the same gzip file is not supported. The addition of
+ "x" when writing will create the file exclusively, which fails if the file
+ already exists. On systems that support it, the addition of "e" when
+ reading or writing will set the flag to close the file on an execve() call.
+
+ These functions, as well as gzip, will read and decode a sequence of gzip
+ streams in a file. The append function of gzopen() can be used to create
+ such a file. (Also see gzflush() for another way to do this.) When
+ appending, gzopen does not test whether the file begins with a gzip stream,
+ nor does it look for the end of the gzip streams to begin appending. gzopen
+ will simply append a gzip stream to the existing file.
+
+ gzopen can be used to read a file which is not in gzip format; in this
+ case gzread will directly read from the file without decompression. When
+ reading, this will be detected automatically by looking for the magic two-
+ byte gzip header.
+
+ gzopen returns NULL if the file could not be opened, if there was
+ insufficient memory to allocate the gzFile state, or if an invalid mode was
+ specified (an 'r', 'w', or 'a' was not provided, or '+' was provided).
+ errno can be checked to determine if the reason gzopen failed was that the
+ file could not be opened.
+*/
+
+ZEXTERN gzFile ZEXPORT gzdopen OF((int fd, const char *mode));
+/*
+ gzdopen associates a gzFile with the file descriptor fd. File descriptors
+ are obtained from calls like open, dup, creat, pipe or fileno (if the file
+ has been previously opened with fopen). The mode parameter is as in gzopen.
+
+ The next call of gzclose on the returned gzFile will also close the file
+ descriptor fd, just like fclose(fdopen(fd, mode)) closes the file descriptor
+ fd. If you want to keep fd open, use fd = dup(fd_keep); gz = gzdopen(fd,
+ mode);. The duplicated descriptor should be saved to avoid a leak, since
+ gzdopen does not close fd if it fails. If you are using fileno() to get the
+ file descriptor from a FILE *, then you will have to use dup() to avoid
+ double-close()ing the file descriptor. Both gzclose() and fclose() will
+ close the associated file descriptor, so they need to have different file
+ descriptors.
+
+ gzdopen returns NULL if there was insufficient memory to allocate the
+ gzFile state, if an invalid mode was specified (an 'r', 'w', or 'a' was not
+ provided, or '+' was provided), or if fd is -1. The file descriptor is not
+ used until the next gz* read, write, seek, or close operation, so gzdopen
+ will not detect if fd is invalid (unless fd is -1).
+*/
+
+ZEXTERN int ZEXPORT gzbuffer OF((gzFile file, unsigned size));
+/*
+ Set the internal buffer size used by this library's functions. The
+ default buffer size is 8192 bytes. This function must be called after
+ gzopen() or gzdopen(), and before any other calls that read or write the
+ file. The buffer memory allocation is always deferred to the first read or
+ write. Three times that size in buffer space is allocated. A larger buffer
+ size of, for example, 64K or 128K bytes will noticeably increase the speed
+ of decompression (reading).
+
+ The new buffer size also affects the maximum length for gzprintf().
+
+ gzbuffer() returns 0 on success, or -1 on failure, such as being called
+ too late.
+*/
+
+ZEXTERN int ZEXPORT gzsetparams OF((gzFile file, int level, int strategy));
+/*
+ Dynamically update the compression level or strategy. See the description
+ of deflateInit2 for the meaning of these parameters. Previously provided
+ data is flushed before the parameter change.
+
+ gzsetparams returns Z_OK if success, Z_STREAM_ERROR if the file was not
+ opened for writing, Z_ERRNO if there is an error writing the flushed data,
+ or Z_MEM_ERROR if there is a memory allocation error.
+*/
+
+ZEXTERN int ZEXPORT gzread OF((gzFile file, voidp buf, unsigned len));
+/*
+ Reads the given number of uncompressed bytes from the compressed file. If
+ the input file is not in gzip format, gzread copies the given number of
+ bytes into the buffer directly from the file.
+
+ After reaching the end of a gzip stream in the input, gzread will continue
+ to read, looking for another gzip stream. Any number of gzip streams may be
+ concatenated in the input file, and will all be decompressed by gzread().
+ If something other than a gzip stream is encountered after a gzip stream,
+ that remaining trailing garbage is ignored (and no error is returned).
+
+ gzread can be used to read a gzip file that is being concurrently written.
+ Upon reaching the end of the input, gzread will return with the available
+ data. If the error code returned by gzerror is Z_OK or Z_BUF_ERROR, then
+ gzclearerr can be used to clear the end of file indicator in order to permit
+ gzread to be tried again. Z_OK indicates that a gzip stream was completed
+ on the last gzread. Z_BUF_ERROR indicates that the input file ended in the
+ middle of a gzip stream. Note that gzread does not return -1 in the event
+ of an incomplete gzip stream. This error is deferred until gzclose(), which
+ will return Z_BUF_ERROR if the last gzread ended in the middle of a gzip
+ stream. Alternatively, gzerror can be used before gzclose to detect this
+ case.
+
+ gzread returns the number of uncompressed bytes actually read, less than
+ len for end of file, or -1 for error. If len is too large to fit in an int,
+ then nothing is read, -1 is returned, and the error state is set to
+ Z_STREAM_ERROR.
+*/
+
+ZEXTERN z_size_t ZEXPORT gzfread OF((voidp buf, z_size_t size, z_size_t nitems,
+ gzFile file));
+/*
+ Read up to nitems items of size size from file to buf, otherwise operating
+ as gzread() does. This duplicates the interface of stdio's fread(), with
+ size_t request and return types. If the library defines size_t, then
+ z_size_t is identical to size_t. If not, then z_size_t is an unsigned
+ integer type that can contain a pointer.
+
+ gzfread() returns the number of full items read of size size, or zero if
+ the end of the file was reached and a full item could not be read, or if
+ there was an error. gzerror() must be consulted if zero is returned in
+ order to determine if there was an error. If the multiplication of size and
+ nitems overflows, i.e. the product does not fit in a z_size_t, then nothing
+ is read, zero is returned, and the error state is set to Z_STREAM_ERROR.
+
+ In the event that the end of file is reached and only a partial item is
+ available at the end, i.e. the remaining uncompressed data length is not a
+ multiple of size, then the final partial item is nevetheless read into buf
+ and the end-of-file flag is set. The length of the partial item read is not
+ provided, but could be inferred from the result of gztell(). This behavior
+ is the same as the behavior of fread() implementations in common libraries,
+ but it prevents the direct use of gzfread() to read a concurrently written
+ file, reseting and retrying on end-of-file, when size is not 1.
+*/
+
+ZEXTERN int ZEXPORT gzwrite OF((gzFile file,
+ voidpc buf, unsigned len));
+/*
+ Writes the given number of uncompressed bytes into the compressed file.
+ gzwrite returns the number of uncompressed bytes written or 0 in case of
+ error.
+*/
+
+ZEXTERN z_size_t ZEXPORT gzfwrite OF((voidpc buf, z_size_t size,
+ z_size_t nitems, gzFile file));
+/*
+ gzfwrite() writes nitems items of size size from buf to file, duplicating
+ the interface of stdio's fwrite(), with size_t request and return types. If
+ the library defines size_t, then z_size_t is identical to size_t. If not,
+ then z_size_t is an unsigned integer type that can contain a pointer.
+
+ gzfwrite() returns the number of full items written of size size, or zero
+ if there was an error. If the multiplication of size and nitems overflows,
+ i.e. the product does not fit in a z_size_t, then nothing is written, zero
+ is returned, and the error state is set to Z_STREAM_ERROR.
+*/
+
+ZEXTERN int ZEXPORTVA gzprintf Z_ARG((gzFile file, const char *format, ...));
+/*
+ Converts, formats, and writes the arguments to the compressed file under
+ control of the format string, as in fprintf. gzprintf returns the number of
+ uncompressed bytes actually written, or a negative zlib error code in case
+ of error. The number of uncompressed bytes written is limited to 8191, or
+ one less than the buffer size given to gzbuffer(). The caller should assure
+ that this limit is not exceeded. If it is exceeded, then gzprintf() will
+ return an error (0) with nothing written. In this case, there may also be a
+ buffer overflow with unpredictable consequences, which is possible only if
+ zlib was compiled with the insecure functions sprintf() or vsprintf()
+ because the secure snprintf() or vsnprintf() functions were not available.
+ This can be determined using zlibCompileFlags().
+*/
+
+ZEXTERN int ZEXPORT gzputs OF((gzFile file, const char *s));
+/*
+ Writes the given null-terminated string to the compressed file, excluding
+ the terminating null character.
+
+ gzputs returns the number of characters written, or -1 in case of error.
+*/
+
+ZEXTERN char * ZEXPORT gzgets OF((gzFile file, char *buf, int len));
+/*
+ Reads bytes from the compressed file until len-1 characters are read, or a
+ newline character is read and transferred to buf, or an end-of-file
+ condition is encountered. If any characters are read or if len == 1, the
+ string is terminated with a null character. If no characters are read due
+ to an end-of-file or len < 1, then the buffer is left untouched.
+
+ gzgets returns buf which is a null-terminated string, or it returns NULL
+ for end-of-file or in case of error. If there was an error, the contents at
+ buf are indeterminate.
+*/
+
+ZEXTERN int ZEXPORT gzputc OF((gzFile file, int c));
+/*
+ Writes c, converted to an unsigned char, into the compressed file. gzputc
+ returns the value that was written, or -1 in case of error.
+*/
+
+ZEXTERN int ZEXPORT gzgetc OF((gzFile file));
+/*
+ Reads one byte from the compressed file. gzgetc returns this byte or -1
+ in case of end of file or error. This is implemented as a macro for speed.
+ As such, it does not do all of the checking the other functions do. I.e.
+ it does not check to see if file is NULL, nor whether the structure file
+ points to has been clobbered or not.
+*/
+
+ZEXTERN int ZEXPORT gzungetc OF((int c, gzFile file));
+/*
+ Push one character back onto the stream to be read as the first character
+ on the next read. At least one character of push-back is allowed.
+ gzungetc() returns the character pushed, or -1 on failure. gzungetc() will
+ fail if c is -1, and may fail if a character has been pushed but not read
+ yet. If gzungetc is used immediately after gzopen or gzdopen, at least the
+ output buffer size of pushed characters is allowed. (See gzbuffer above.)
+ The pushed character will be discarded if the stream is repositioned with
+ gzseek() or gzrewind().
+*/
+
+ZEXTERN int ZEXPORT gzflush OF((gzFile file, int flush));
+/*
+ Flushes all pending output into the compressed file. The parameter flush
+ is as in the deflate() function. The return value is the zlib error number
+ (see function gzerror below). gzflush is only permitted when writing.
+
+ If the flush parameter is Z_FINISH, the remaining data is written and the
+ gzip stream is completed in the output. If gzwrite() is called again, a new
+ gzip stream will be started in the output. gzread() is able to read such
+ concatenated gzip streams.
+
+ gzflush should be called only when strictly necessary because it will
+ degrade compression if called too often.
+*/
+
+/*
+ZEXTERN z_off_t ZEXPORT gzseek OF((gzFile file,
+ z_off_t offset, int whence));
+
+ Sets the starting position for the next gzread or gzwrite on the given
+ compressed file. The offset represents a number of bytes in the
+ uncompressed data stream. The whence parameter is defined as in lseek(2);
+ the value SEEK_END is not supported.
+
+ If the file is opened for reading, this function is emulated but can be
+ extremely slow. If the file is opened for writing, only forward seeks are
+ supported; gzseek then compresses a sequence of zeroes up to the new
+ starting position.
+
+ gzseek returns the resulting offset location as measured in bytes from
+ the beginning of the uncompressed stream, or -1 in case of error, in
+ particular if the file is opened for writing and the new starting position
+ would be before the current position.
+*/
+
+ZEXTERN int ZEXPORT gzrewind OF((gzFile file));
+/*
+ Rewinds the given file. This function is supported only for reading.
+
+ gzrewind(file) is equivalent to (int)gzseek(file, 0L, SEEK_SET)
+*/
+
+/*
+ZEXTERN z_off_t ZEXPORT gztell OF((gzFile file));
+
+ Returns the starting position for the next gzread or gzwrite on the given
+ compressed file. This position represents a number of bytes in the
+ uncompressed data stream, and is zero when starting, even if appending or
+ reading a gzip stream from the middle of a file using gzdopen().
+
+ gztell(file) is equivalent to gzseek(file, 0L, SEEK_CUR)
+*/
+
+/*
+ZEXTERN z_off_t ZEXPORT gzoffset OF((gzFile file));
+
+ Returns the current offset in the file being read or written. This offset
+ includes the count of bytes that precede the gzip stream, for example when
+ appending or when using gzdopen() for reading. When reading, the offset
+ does not include as yet unused buffered input. This information can be used
+ for a progress indicator. On error, gzoffset() returns -1.
+*/
+
+ZEXTERN int ZEXPORT gzeof OF((gzFile file));
+/*
+ Returns true (1) if the end-of-file indicator has been set while reading,
+ false (0) otherwise. Note that the end-of-file indicator is set only if the
+ read tried to go past the end of the input, but came up short. Therefore,
+ just like feof(), gzeof() may return false even if there is no more data to
+ read, in the event that the last read request was for the exact number of
+ bytes remaining in the input file. This will happen if the input file size
+ is an exact multiple of the buffer size.
+
+ If gzeof() returns true, then the read functions will return no more data,
+ unless the end-of-file indicator is reset by gzclearerr() and the input file
+ has grown since the previous end of file was detected.
+*/
+
+ZEXTERN int ZEXPORT gzdirect OF((gzFile file));
+/*
+ Returns true (1) if file is being copied directly while reading, or false
+ (0) if file is a gzip stream being decompressed.
+
+ If the input file is empty, gzdirect() will return true, since the input
+ does not contain a gzip stream.
+
+ If gzdirect() is used immediately after gzopen() or gzdopen() it will
+ cause buffers to be allocated to allow reading the file to determine if it
+ is a gzip file. Therefore if gzbuffer() is used, it should be called before
+ gzdirect().
+
+ When writing, gzdirect() returns true (1) if transparent writing was
+ requested ("wT" for the gzopen() mode), or false (0) otherwise. (Note:
+ gzdirect() is not needed when writing. Transparent writing must be
+ explicitly requested, so the application already knows the answer. When
+ linking statically, using gzdirect() will include all of the zlib code for
+ gzip file reading and decompression, which may not be desired.)
+*/
+
+ZEXTERN int ZEXPORT gzclose OF((gzFile file));
+/*
+ Flushes all pending output if necessary, closes the compressed file and
+ deallocates the (de)compression state. Note that once file is closed, you
+ cannot call gzerror with file, since its structures have been deallocated.
+ gzclose must not be called more than once on the same file, just as free
+ must not be called more than once on the same allocation.
+
+ gzclose will return Z_STREAM_ERROR if file is not valid, Z_ERRNO on a
+ file operation error, Z_MEM_ERROR if out of memory, Z_BUF_ERROR if the
+ last read ended in the middle of a gzip stream, or Z_OK on success.
+*/
+
+ZEXTERN int ZEXPORT gzclose_r OF((gzFile file));
+ZEXTERN int ZEXPORT gzclose_w OF((gzFile file));
+/*
+ Same as gzclose(), but gzclose_r() is only for use when reading, and
+ gzclose_w() is only for use when writing or appending. The advantage to
+ using these instead of gzclose() is that they avoid linking in zlib
+ compression or decompression code that is not used when only reading or only
+ writing respectively. If gzclose() is used, then both compression and
+ decompression code will be included the application when linking to a static
+ zlib library.
+*/
+
+ZEXTERN const char * ZEXPORT gzerror OF((gzFile file, int *errnum));
+/*
+ Returns the error message for the last error which occurred on the given
+ compressed file. errnum is set to zlib error number. If an error occurred
+ in the file system and not in the compression library, errnum is set to
+ Z_ERRNO and the application may consult errno to get the exact error code.
+
+ The application must not modify the returned string. Future calls to
+ this function may invalidate the previously returned string. If file is
+ closed, then the string previously returned by gzerror will no longer be
+ available.
+
+ gzerror() should be used to distinguish errors from end-of-file for those
+ functions above that do not distinguish those cases in their return values.
+*/
+
+ZEXTERN void ZEXPORT gzclearerr OF((gzFile file));
+/*
+ Clears the error and end-of-file flags for file. This is analogous to the
+ clearerr() function in stdio. This is useful for continuing to read a gzip
+ file that is being written concurrently.
+*/
+
+#endif /* !Z_SOLO */
+
+ /* checksum functions */
+
+/*
+ These functions are not related to compression but are exported
+ anyway because they might be useful in applications using the compression
+ library.
+*/
+
+ZEXTERN uLong ZEXPORT adler32 OF((uLong adler, const Bytef *buf, uInt len));
+/*
+ Update a running Adler-32 checksum with the bytes buf[0..len-1] and
+ return the updated checksum. If buf is Z_NULL, this function returns the
+ required initial value for the checksum.
+
+ An Adler-32 checksum is almost as reliable as a CRC-32 but can be computed
+ much faster.
+
+ Usage example:
+
+ uLong adler = adler32(0L, Z_NULL, 0);
+
+ while (read_buffer(buffer, length) != EOF) {
+ adler = adler32(adler, buffer, length);
+ }
+ if (adler != original_adler) error();
+*/
+
+ZEXTERN uLong ZEXPORT adler32_z OF((uLong adler, const Bytef *buf,
+ z_size_t len));
+/*
+ Same as adler32(), but with a size_t length.
+*/
+
+/*
+ZEXTERN uLong ZEXPORT adler32_combine OF((uLong adler1, uLong adler2,
+ z_off_t len2));
+
+ Combine two Adler-32 checksums into one. For two sequences of bytes, seq1
+ and seq2 with lengths len1 and len2, Adler-32 checksums were calculated for
+ each, adler1 and adler2. adler32_combine() returns the Adler-32 checksum of
+ seq1 and seq2 concatenated, requiring only adler1, adler2, and len2. Note
+ that the z_off_t type (like off_t) is a signed integer. If len2 is
+ negative, the result has no meaning or utility.
+*/
+
+ZEXTERN uLong ZEXPORT crc32 OF((uLong crc, const Bytef *buf, uInt len));
+/*
+ Update a running CRC-32 with the bytes buf[0..len-1] and return the
+ updated CRC-32. If buf is Z_NULL, this function returns the required
+ initial value for the crc. Pre- and post-conditioning (one's complement) is
+ performed within this function so it shouldn't be done by the application.
+
+ Usage example:
+
+ uLong crc = crc32(0L, Z_NULL, 0);
+
+ while (read_buffer(buffer, length) != EOF) {
+ crc = crc32(crc, buffer, length);
+ }
+ if (crc != original_crc) error();
+*/
+
+ZEXTERN uLong ZEXPORT crc32_z OF((uLong adler, const Bytef *buf,
+ z_size_t len));
+/*
+ Same as crc32(), but with a size_t length.
+*/
+
+/*
+ZEXTERN uLong ZEXPORT crc32_combine OF((uLong crc1, uLong crc2, z_off_t len2));
+
+ Combine two CRC-32 check values into one. For two sequences of bytes,
+ seq1 and seq2 with lengths len1 and len2, CRC-32 check values were
+ calculated for each, crc1 and crc2. crc32_combine() returns the CRC-32
+ check value of seq1 and seq2 concatenated, requiring only crc1, crc2, and
+ len2.
+*/
+
+
+ /* various hacks, don't look :) */
+
+/* deflateInit and inflateInit are macros to allow checking the zlib version
+ * and the compiler's view of z_stream:
+ */
+ZEXTERN int ZEXPORT deflateInit_ OF((z_streamp strm, int level,
+ const char *version, int stream_size));
+ZEXTERN int ZEXPORT inflateInit_ OF((z_streamp strm,
+ const char *version, int stream_size));
+ZEXTERN int ZEXPORT deflateInit2_ OF((z_streamp strm, int level, int method,
+ int windowBits, int memLevel,
+ int strategy, const char *version,
+ int stream_size));
+ZEXTERN int ZEXPORT inflateInit2_ OF((z_streamp strm, int windowBits,
+ const char *version, int stream_size));
+ZEXTERN int ZEXPORT inflateBackInit_ OF((z_streamp strm, int windowBits,
+ unsigned char FAR *window,
+ const char *version,
+ int stream_size));
+#ifdef Z_PREFIX_SET
+# define z_deflateInit(strm, level) \
+ deflateInit_((strm), (level), ZLIB_VERSION, (int)sizeof(z_stream))
+# define z_inflateInit(strm) \
+ inflateInit_((strm), ZLIB_VERSION, (int)sizeof(z_stream))
+# define z_deflateInit2(strm, level, method, windowBits, memLevel, strategy) \
+ deflateInit2_((strm),(level),(method),(windowBits),(memLevel),\
+ (strategy), ZLIB_VERSION, (int)sizeof(z_stream))
+# define z_inflateInit2(strm, windowBits) \
+ inflateInit2_((strm), (windowBits), ZLIB_VERSION, \
+ (int)sizeof(z_stream))
+# define z_inflateBackInit(strm, windowBits, window) \
+ inflateBackInit_((strm), (windowBits), (window), \
+ ZLIB_VERSION, (int)sizeof(z_stream))
+#else
+# define deflateInit(strm, level) \
+ deflateInit_((strm), (level), ZLIB_VERSION, (int)sizeof(z_stream))
+# define inflateInit(strm) \
+ inflateInit_((strm), ZLIB_VERSION, (int)sizeof(z_stream))
+# define deflateInit2(strm, level, method, windowBits, memLevel, strategy) \
+ deflateInit2_((strm),(level),(method),(windowBits),(memLevel),\
+ (strategy), ZLIB_VERSION, (int)sizeof(z_stream))
+# define inflateInit2(strm, windowBits) \
+ inflateInit2_((strm), (windowBits), ZLIB_VERSION, \
+ (int)sizeof(z_stream))
+# define inflateBackInit(strm, windowBits, window) \
+ inflateBackInit_((strm), (windowBits), (window), \
+ ZLIB_VERSION, (int)sizeof(z_stream))
+#endif
+
+#ifndef Z_SOLO
+
+/* gzgetc() macro and its supporting function and exposed data structure. Note
+ * that the real internal state is much larger than the exposed structure.
+ * This abbreviated structure exposes just enough for the gzgetc() macro. The
+ * user should not mess with these exposed elements, since their names or
+ * behavior could change in the future, perhaps even capriciously. They can
+ * only be used by the gzgetc() macro. You have been warned.
+ */
+struct gzFile_s {
+ unsigned have;
+ unsigned char *next;
+ z_off64_t pos;
+};
+ZEXTERN int ZEXPORT gzgetc_ OF((gzFile file)); /* backward compatibility */
+#ifdef Z_PREFIX_SET
+# undef z_gzgetc
+# define z_gzgetc(g) \
+ ((g)->have ? ((g)->have--, (g)->pos++, *((g)->next)++) : (gzgetc)(g))
+#else
+# define gzgetc(g) \
+ ((g)->have ? ((g)->have--, (g)->pos++, *((g)->next)++) : (gzgetc)(g))
+#endif
+
+/* provide 64-bit offset functions if _LARGEFILE64_SOURCE defined, and/or
+ * change the regular functions to 64 bits if _FILE_OFFSET_BITS is 64 (if
+ * both are true, the application gets the *64 functions, and the regular
+ * functions are changed to 64 bits) -- in case these are set on systems
+ * without large file support, _LFS64_LARGEFILE must also be true
+ */
+#ifdef Z_LARGE64
+ ZEXTERN gzFile ZEXPORT gzopen64 OF((const char *, const char *));
+ ZEXTERN z_off64_t ZEXPORT gzseek64 OF((gzFile, z_off64_t, int));
+ ZEXTERN z_off64_t ZEXPORT gztell64 OF((gzFile));
+ ZEXTERN z_off64_t ZEXPORT gzoffset64 OF((gzFile));
+ ZEXTERN uLong ZEXPORT adler32_combine64 OF((uLong, uLong, z_off64_t));
+ ZEXTERN uLong ZEXPORT crc32_combine64 OF((uLong, uLong, z_off64_t));
+#endif
+
+#if !defined(ZLIB_INTERNAL) && defined(Z_WANT64)
+# ifdef Z_PREFIX_SET
+# define z_gzopen z_gzopen64
+# define z_gzseek z_gzseek64
+# define z_gztell z_gztell64
+# define z_gzoffset z_gzoffset64
+# define z_adler32_combine z_adler32_combine64
+# define z_crc32_combine z_crc32_combine64
+# else
+# define gzopen gzopen64
+# define gzseek gzseek64
+# define gztell gztell64
+# define gzoffset gzoffset64
+# define adler32_combine adler32_combine64
+# define crc32_combine crc32_combine64
+# endif
+# ifndef Z_LARGE64
+ ZEXTERN gzFile ZEXPORT gzopen64 OF((const char *, const char *));
+ ZEXTERN z_off_t ZEXPORT gzseek64 OF((gzFile, z_off_t, int));
+ ZEXTERN z_off_t ZEXPORT gztell64 OF((gzFile));
+ ZEXTERN z_off_t ZEXPORT gzoffset64 OF((gzFile));
+ ZEXTERN uLong ZEXPORT adler32_combine64 OF((uLong, uLong, z_off_t));
+ ZEXTERN uLong ZEXPORT crc32_combine64 OF((uLong, uLong, z_off_t));
+# endif
+#else
+ ZEXTERN gzFile ZEXPORT gzopen OF((const char *, const char *));
+ ZEXTERN z_off_t ZEXPORT gzseek OF((gzFile, z_off_t, int));
+ ZEXTERN z_off_t ZEXPORT gztell OF((gzFile));
+ ZEXTERN z_off_t ZEXPORT gzoffset OF((gzFile));
+ ZEXTERN uLong ZEXPORT adler32_combine OF((uLong, uLong, z_off_t));
+ ZEXTERN uLong ZEXPORT crc32_combine OF((uLong, uLong, z_off_t));
+#endif
+
+#else /* Z_SOLO */
+
+ ZEXTERN uLong ZEXPORT adler32_combine OF((uLong, uLong, z_off_t));
+ ZEXTERN uLong ZEXPORT crc32_combine OF((uLong, uLong, z_off_t));
+
+#endif /* !Z_SOLO */
+
+/* undocumented functions */
+ZEXTERN const char * ZEXPORT zError OF((int));
+ZEXTERN int ZEXPORT inflateSyncPoint OF((z_streamp));
+ZEXTERN const z_crc_t FAR * ZEXPORT get_crc_table OF((void));
+ZEXTERN int ZEXPORT inflateUndermine OF((z_streamp, int));
+ZEXTERN int ZEXPORT inflateValidate OF((z_streamp, int));
+ZEXTERN unsigned long ZEXPORT inflateCodesUsed OF ((z_streamp));
+ZEXTERN int ZEXPORT inflateResetKeep OF((z_streamp));
+ZEXTERN int ZEXPORT deflateResetKeep OF((z_streamp));
+#if (defined(_WIN32) || defined(__CYGWIN__)) && !defined(Z_SOLO)
+ZEXTERN gzFile ZEXPORT gzopen_w OF((const wchar_t *path,
+ const char *mode));
+#endif
+#if defined(STDC) || defined(Z_HAVE_STDARG_H)
+# ifndef Z_SOLO
+ZEXTERN int ZEXPORTVA gzvprintf Z_ARG((gzFile file,
+ const char *format,
+ va_list va));
+# endif
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* ZLIB_H */
diff --git a/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/zlib.pc.in b/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/zlib.pc.in
new file mode 100644
index 00000000..7e5acf9c
--- /dev/null
+++ b/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/zlib.pc.in
@@ -0,0 +1,13 @@
+prefix=@prefix@
+exec_prefix=@exec_prefix@
+libdir=@libdir@
+sharedlibdir=@sharedlibdir@
+includedir=@includedir@
+
+Name: zlib
+Description: zlib compression library
+Version: @VERSION@
+
+Requires:
+Libs: -L${libdir} -L${sharedlibdir} -lz
+Cflags: -I${includedir}
diff --git a/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/zutil.c b/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/zutil.c
new file mode 100644
index 00000000..a76c6b0c
--- /dev/null
+++ b/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/zutil.c
@@ -0,0 +1,325 @@
+/* zutil.c -- target dependent utility functions for the compression library
+ * Copyright (C) 1995-2017 Jean-loup Gailly
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* @(#) $Id$ */
+
+#include "zutil.h"
+#ifndef Z_SOLO
+# include "gzguts.h"
+#endif
+
+z_const char * const z_errmsg[10] = {
+ (z_const char *)"need dictionary", /* Z_NEED_DICT 2 */
+ (z_const char *)"stream end", /* Z_STREAM_END 1 */
+ (z_const char *)"", /* Z_OK 0 */
+ (z_const char *)"file error", /* Z_ERRNO (-1) */
+ (z_const char *)"stream error", /* Z_STREAM_ERROR (-2) */
+ (z_const char *)"data error", /* Z_DATA_ERROR (-3) */
+ (z_const char *)"insufficient memory", /* Z_MEM_ERROR (-4) */
+ (z_const char *)"buffer error", /* Z_BUF_ERROR (-5) */
+ (z_const char *)"incompatible version",/* Z_VERSION_ERROR (-6) */
+ (z_const char *)""
+};
+
+
+const char * ZEXPORT zlibVersion()
+{
+ return ZLIB_VERSION;
+}
+
+uLong ZEXPORT zlibCompileFlags()
+{
+ uLong flags;
+
+ flags = 0;
+ switch ((int)(sizeof(uInt))) {
+ case 2: break;
+ case 4: flags += 1; break;
+ case 8: flags += 2; break;
+ default: flags += 3;
+ }
+ switch ((int)(sizeof(uLong))) {
+ case 2: break;
+ case 4: flags += 1 << 2; break;
+ case 8: flags += 2 << 2; break;
+ default: flags += 3 << 2;
+ }
+ switch ((int)(sizeof(voidpf))) {
+ case 2: break;
+ case 4: flags += 1 << 4; break;
+ case 8: flags += 2 << 4; break;
+ default: flags += 3 << 4;
+ }
+ switch ((int)(sizeof(z_off_t))) {
+ case 2: break;
+ case 4: flags += 1 << 6; break;
+ case 8: flags += 2 << 6; break;
+ default: flags += 3 << 6;
+ }
+#ifdef ZLIB_DEBUG
+ flags += 1 << 8;
+#endif
+#if defined(ASMV) || defined(ASMINF)
+ flags += 1 << 9;
+#endif
+#ifdef ZLIB_WINAPI
+ flags += 1 << 10;
+#endif
+#ifdef BUILDFIXED
+ flags += 1 << 12;
+#endif
+#ifdef DYNAMIC_CRC_TABLE
+ flags += 1 << 13;
+#endif
+#ifdef NO_GZCOMPRESS
+ flags += 1L << 16;
+#endif
+#ifdef NO_GZIP
+ flags += 1L << 17;
+#endif
+#ifdef PKZIP_BUG_WORKAROUND
+ flags += 1L << 20;
+#endif
+#ifdef FASTEST
+ flags += 1L << 21;
+#endif
+#if defined(STDC) || defined(Z_HAVE_STDARG_H)
+# ifdef NO_vsnprintf
+ flags += 1L << 25;
+# ifdef HAS_vsprintf_void
+ flags += 1L << 26;
+# endif
+# else
+# ifdef HAS_vsnprintf_void
+ flags += 1L << 26;
+# endif
+# endif
+#else
+ flags += 1L << 24;
+# ifdef NO_snprintf
+ flags += 1L << 25;
+# ifdef HAS_sprintf_void
+ flags += 1L << 26;
+# endif
+# else
+# ifdef HAS_snprintf_void
+ flags += 1L << 26;
+# endif
+# endif
+#endif
+ return flags;
+}
+
+#ifdef ZLIB_DEBUG
+#include <stdlib.h>
+# ifndef verbose
+# define verbose 0
+# endif
+int ZLIB_INTERNAL z_verbose = verbose;
+
+void ZLIB_INTERNAL z_error (m)
+ char *m;
+{
+ fprintf(stderr, "%s\n", m);
+ exit(1);
+}
+#endif
+
+/* exported to allow conversion of error code to string for compress() and
+ * uncompress()
+ */
+const char * ZEXPORT zError(err)
+ int err;
+{
+ return ERR_MSG(err);
+}
+
+#if defined(_WIN32_WCE)
+ /* The Microsoft C Run-Time Library for Windows CE doesn't have
+ * errno. We define it as a global variable to simplify porting.
+ * Its value is always 0 and should not be used.
+ */
+ int errno = 0;
+#endif
+
+#ifndef HAVE_MEMCPY
+
+void ZLIB_INTERNAL zmemcpy(dest, source, len)
+ Bytef* dest;
+ const Bytef* source;
+ uInt len;
+{
+ if (len == 0) return;
+ do {
+ *dest++ = *source++; /* ??? to be unrolled */
+ } while (--len != 0);
+}
+
+int ZLIB_INTERNAL zmemcmp(s1, s2, len)
+ const Bytef* s1;
+ const Bytef* s2;
+ uInt len;
+{
+ uInt j;
+
+ for (j = 0; j < len; j++) {
+ if (s1[j] != s2[j]) return 2*(s1[j] > s2[j])-1;
+ }
+ return 0;
+}
+
+void ZLIB_INTERNAL zmemzero(dest, len)
+ Bytef* dest;
+ uInt len;
+{
+ if (len == 0) return;
+ do {
+ *dest++ = 0; /* ??? to be unrolled */
+ } while (--len != 0);
+}
+#endif
+
+#ifndef Z_SOLO
+
+#ifdef SYS16BIT
+
+#ifdef __TURBOC__
+/* Turbo C in 16-bit mode */
+
+# define MY_ZCALLOC
+
+/* Turbo C malloc() does not allow dynamic allocation of 64K bytes
+ * and farmalloc(64K) returns a pointer with an offset of 8, so we
+ * must fix the pointer. Warning: the pointer must be put back to its
+ * original form in order to free it, use zcfree().
+ */
+
+#define MAX_PTR 10
+/* 10*64K = 640K */
+
+local int next_ptr = 0;
+
+typedef struct ptr_table_s {
+ voidpf org_ptr;
+ voidpf new_ptr;
+} ptr_table;
+
+local ptr_table table[MAX_PTR];
+/* This table is used to remember the original form of pointers
+ * to large buffers (64K). Such pointers are normalized with a zero offset.
+ * Since MSDOS is not a preemptive multitasking OS, this table is not
+ * protected from concurrent access. This hack doesn't work anyway on
+ * a protected system like OS/2. Use Microsoft C instead.
+ */
+
+voidpf ZLIB_INTERNAL zcalloc (voidpf opaque, unsigned items, unsigned size)
+{
+ voidpf buf;
+ ulg bsize = (ulg)items*size;
+
+ (void)opaque;
+
+ /* If we allocate less than 65520 bytes, we assume that farmalloc
+ * will return a usable pointer which doesn't have to be normalized.
+ */
+ if (bsize < 65520L) {
+ buf = farmalloc(bsize);
+ if (*(ush*)&buf != 0) return buf;
+ } else {
+ buf = farmalloc(bsize + 16L);
+ }
+ if (buf == NULL || next_ptr >= MAX_PTR) return NULL;
+ table[next_ptr].org_ptr = buf;
+
+ /* Normalize the pointer to seg:0 */
+ *((ush*)&buf+1) += ((ush)((uch*)buf-0) + 15) >> 4;
+ *(ush*)&buf = 0;
+ table[next_ptr++].new_ptr = buf;
+ return buf;
+}
+
+void ZLIB_INTERNAL zcfree (voidpf opaque, voidpf ptr)
+{
+ int n;
+
+ (void)opaque;
+
+ if (*(ush*)&ptr != 0) { /* object < 64K */
+ farfree(ptr);
+ return;
+ }
+ /* Find the original pointer */
+ for (n = 0; n < next_ptr; n++) {
+ if (ptr != table[n].new_ptr) continue;
+
+ farfree(table[n].org_ptr);
+ while (++n < next_ptr) {
+ table[n-1] = table[n];
+ }
+ next_ptr--;
+ return;
+ }
+ Assert(0, "zcfree: ptr not found");
+}
+
+#endif /* __TURBOC__ */
+
+
+#ifdef M_I86
+/* Microsoft C in 16-bit mode */
+
+# define MY_ZCALLOC
+
+#if (!defined(_MSC_VER) || (_MSC_VER <= 600))
+# define _halloc halloc
+# define _hfree hfree
+#endif
+
+voidpf ZLIB_INTERNAL zcalloc (voidpf opaque, uInt items, uInt size)
+{
+ (void)opaque;
+ return _halloc((long)items, size);
+}
+
+void ZLIB_INTERNAL zcfree (voidpf opaque, voidpf ptr)
+{
+ (void)opaque;
+ _hfree(ptr);
+}
+
+#endif /* M_I86 */
+
+#endif /* SYS16BIT */
+
+
+#ifndef MY_ZCALLOC /* Any system without a special alloc function */
+
+#ifndef STDC
+extern voidp malloc OF((uInt size));
+extern voidp calloc OF((uInt items, uInt size));
+extern void free OF((voidpf ptr));
+#endif
+
+voidpf ZLIB_INTERNAL zcalloc (opaque, items, size)
+ voidpf opaque;
+ unsigned items;
+ unsigned size;
+{
+ (void)opaque;
+ return sizeof(uInt) > 2 ? (voidpf)malloc(items * size) :
+ (voidpf)calloc(items, size);
+}
+
+void ZLIB_INTERNAL zcfree (opaque, ptr)
+ voidpf opaque;
+ voidpf ptr;
+{
+ (void)opaque;
+ free(ptr);
+}
+
+#endif /* MY_ZCALLOC */
+
+#endif /* !Z_SOLO */
diff --git a/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/zutil.h b/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/zutil.h
new file mode 100644
index 00000000..b079ea6a
--- /dev/null
+++ b/mongodb-1.4.2/src/libmongoc/src/zlib-1.2.11/zutil.h
@@ -0,0 +1,271 @@
+/* zutil.h -- internal interface and configuration of the compression library
+ * Copyright (C) 1995-2016 Jean-loup Gailly, Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* WARNING: this file should *not* be used by applications. It is
+ part of the implementation of the compression library and is
+ subject to change. Applications should only use zlib.h.
+ */
+
+/* @(#) $Id$ */
+
+#ifndef ZUTIL_H
+#define ZUTIL_H
+
+#ifdef HAVE_HIDDEN
+# define ZLIB_INTERNAL __attribute__((visibility ("hidden")))
+#else
+# define ZLIB_INTERNAL
+#endif
+
+#include "zlib.h"
+
+#if defined(STDC) && !defined(Z_SOLO)
+# if !(defined(_WIN32_WCE) && defined(_MSC_VER))
+# include <stddef.h>
+# endif
+# include <string.h>
+# include <stdlib.h>
+#endif
+
+#ifdef Z_SOLO
+ typedef long ptrdiff_t; /* guess -- will be caught if guess is wrong */
+#endif
+
+#ifndef local
+# define local static
+#endif
+/* since "static" is used to mean two completely different things in C, we
+ define "local" for the non-static meaning of "static", for readability
+ (compile with -Dlocal if your debugger can't find static symbols) */
+
+typedef unsigned char uch;
+typedef uch FAR uchf;
+typedef unsigned short ush;
+typedef ush FAR ushf;
+typedef unsigned long ulg;
+
+extern z_const char * const z_errmsg[10]; /* indexed by 2-zlib_error */
+/* (size given to avoid silly warnings with Visual C++) */
+
+#define ERR_MSG(err) z_errmsg[Z_NEED_DICT-(err)]
+
+#define ERR_RETURN(strm,err) \
+ return (strm->msg = ERR_MSG(err), (err))
+/* To be used only when the state is known to be valid */
+
+ /* common constants */
+
+#ifndef DEF_WBITS
+# define DEF_WBITS MAX_WBITS
+#endif
+/* default windowBits for decompression. MAX_WBITS is for compression only */
+
+#if MAX_MEM_LEVEL >= 8
+# define DEF_MEM_LEVEL 8
+#else
+# define DEF_MEM_LEVEL MAX_MEM_LEVEL
+#endif
+/* default memLevel */
+
+#define STORED_BLOCK 0
+#define STATIC_TREES 1
+#define DYN_TREES 2
+/* The three kinds of block type */
+
+#define MIN_MATCH 3
+#define MAX_MATCH 258
+/* The minimum and maximum match lengths */
+
+#define PRESET_DICT 0x20 /* preset dictionary flag in zlib header */
+
+ /* target dependencies */
+
+#if defined(MSDOS) || (defined(WINDOWS) && !defined(WIN32))
+# define OS_CODE 0x00
+# ifndef Z_SOLO
+# if defined(__TURBOC__) || defined(__BORLANDC__)
+# if (__STDC__ == 1) && (defined(__LARGE__) || defined(__COMPACT__))
+ /* Allow compilation with ANSI keywords only enabled */
+ void _Cdecl farfree( void *block );
+ void *_Cdecl farmalloc( unsigned long nbytes );
+# else
+# include <alloc.h>
+# endif
+# else /* MSC or DJGPP */
+# include <malloc.h>
+# endif
+# endif
+#endif
+
+#ifdef AMIGA
+# define OS_CODE 1
+#endif
+
+#if defined(VAXC) || defined(VMS)
+# define OS_CODE 2
+# define F_OPEN(name, mode) \
+ fopen((name), (mode), "mbc=60", "ctx=stm", "rfm=fix", "mrs=512")
+#endif
+
+#ifdef __370__
+# if __TARGET_LIB__ < 0x20000000
+# define OS_CODE 4
+# elif __TARGET_LIB__ < 0x40000000
+# define OS_CODE 11
+# else
+# define OS_CODE 8
+# endif
+#endif
+
+#if defined(ATARI) || defined(atarist)
+# define OS_CODE 5
+#endif
+
+#ifdef OS2
+# define OS_CODE 6
+# if defined(M_I86) && !defined(Z_SOLO)
+# include <malloc.h>
+# endif
+#endif
+
+#if defined(MACOS) || defined(TARGET_OS_MAC)
+# define OS_CODE 7
+# ifndef Z_SOLO
+# if defined(__MWERKS__) && __dest_os != __be_os && __dest_os != __win32_os
+# include <unix.h> /* for fdopen */
+# else
+# ifndef fdopen
+# define fdopen(fd,mode) NULL /* No fdopen() */
+# endif
+# endif
+# endif
+#endif
+
+#ifdef __acorn
+# define OS_CODE 13
+#endif
+
+#if defined(WIN32) && !defined(__CYGWIN__)
+# define OS_CODE 10
+#endif
+
+#ifdef _BEOS_
+# define OS_CODE 16
+#endif
+
+#ifdef __TOS_OS400__
+# define OS_CODE 18
+#endif
+
+#ifdef __APPLE__
+# define OS_CODE 19
+#endif
+
+#if defined(_BEOS_) || defined(RISCOS)
+# define fdopen(fd,mode) NULL /* No fdopen() */
+#endif
+
+#if (defined(_MSC_VER) && (_MSC_VER > 600)) && !defined __INTERIX
+# if defined(_WIN32_WCE)
+# define fdopen(fd,mode) NULL /* No fdopen() */
+# ifndef _PTRDIFF_T_DEFINED
+ typedef int ptrdiff_t;
+# define _PTRDIFF_T_DEFINED
+# endif
+# else
+# define fdopen(fd,type) _fdopen(fd,type)
+# endif
+#endif
+
+#if defined(__BORLANDC__) && !defined(MSDOS)
+ #pragma warn -8004
+ #pragma warn -8008
+ #pragma warn -8066
+#endif
+
+/* provide prototypes for these when building zlib without LFS */
+#if !defined(_WIN32) && \
+ (!defined(_LARGEFILE64_SOURCE) || _LFS64_LARGEFILE-0 == 0)
+ ZEXTERN uLong ZEXPORT adler32_combine64 OF((uLong, uLong, z_off_t));
+ ZEXTERN uLong ZEXPORT crc32_combine64 OF((uLong, uLong, z_off_t));
+#endif
+
+ /* common defaults */
+
+#ifndef OS_CODE
+# define OS_CODE 3 /* assume Unix */
+#endif
+
+#ifndef F_OPEN
+# define F_OPEN(name, mode) fopen((name), (mode))
+#endif
+
+ /* functions */
+
+#if defined(pyr) || defined(Z_SOLO)
+# define NO_MEMCPY
+#endif
+#if defined(SMALL_MEDIUM) && !defined(_MSC_VER) && !defined(__SC__)
+ /* Use our own functions for small and medium model with MSC <= 5.0.
+ * You may have to use the same strategy for Borland C (untested).
+ * The __SC__ check is for Symantec.
+ */
+# define NO_MEMCPY
+#endif
+#if defined(STDC) && !defined(HAVE_MEMCPY) && !defined(NO_MEMCPY)
+# define HAVE_MEMCPY
+#endif
+#ifdef HAVE_MEMCPY
+# ifdef SMALL_MEDIUM /* MSDOS small or medium model */
+# define zmemcpy _fmemcpy
+# define zmemcmp _fmemcmp
+# define zmemzero(dest, len) _fmemset(dest, 0, len)
+# else
+# define zmemcpy memcpy
+# define zmemcmp memcmp
+# define zmemzero(dest, len) memset(dest, 0, len)
+# endif
+#else
+ void ZLIB_INTERNAL zmemcpy OF((Bytef* dest, const Bytef* source, uInt len));
+ int ZLIB_INTERNAL zmemcmp OF((const Bytef* s1, const Bytef* s2, uInt len));
+ void ZLIB_INTERNAL zmemzero OF((Bytef* dest, uInt len));
+#endif
+
+/* Diagnostic functions */
+#ifdef ZLIB_DEBUG
+# include <stdio.h>
+ extern int ZLIB_INTERNAL z_verbose;
+ extern void ZLIB_INTERNAL z_error OF((char *m));
+# define Assert(cond,msg) {if(!(cond)) z_error(msg);}
+# define Trace(x) {if (z_verbose>=0) fprintf x ;}
+# define Tracev(x) {if (z_verbose>0) fprintf x ;}
+# define Tracevv(x) {if (z_verbose>1) fprintf x ;}
+# define Tracec(c,x) {if (z_verbose>0 && (c)) fprintf x ;}
+# define Tracecv(c,x) {if (z_verbose>1 && (c)) fprintf x ;}
+#else
+# define Assert(cond,msg)
+# define Trace(x)
+# define Tracev(x)
+# define Tracevv(x)
+# define Tracec(c,x)
+# define Tracecv(c,x)
+#endif
+
+#ifndef Z_SOLO
+ voidpf ZLIB_INTERNAL zcalloc OF((voidpf opaque, unsigned items,
+ unsigned size));
+ void ZLIB_INTERNAL zcfree OF((voidpf opaque, voidpf ptr));
+#endif
+
+#define ZALLOC(strm, items, size) \
+ (*((strm)->zalloc))((strm)->opaque, (items), (size))
+#define ZFREE(strm, addr) (*((strm)->zfree))((strm)->opaque, (voidpf)(addr))
+#define TRY_FREE(s, p) {if (p) ZFREE(s, p);}
+
+/* Reverse the bytes in a 32-bit value */
+#define ZSWAP32(q) ((((q) >> 24) & 0xff) + (((q) >> 8) & 0xff00) + \
+ (((q) & 0xff00) << 8) + (((q) & 0xff) << 24))
+
+#endif /* ZUTIL_H */
diff --git a/mongodb-1.3.4/tests/apm/bug0950-001.phpt b/mongodb-1.4.2/tests/apm/bug0950-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/apm/bug0950-001.phpt
rename to mongodb-1.4.2/tests/apm/bug0950-001.phpt
diff --git a/mongodb-1.3.4/tests/apm/bug0950-002.phpt b/mongodb-1.4.2/tests/apm/bug0950-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/apm/bug0950-002.phpt
rename to mongodb-1.4.2/tests/apm/bug0950-002.phpt
diff --git a/mongodb-1.3.4/tests/apm/monitoring-addSubscriber-001.phpt b/mongodb-1.4.2/tests/apm/monitoring-addSubscriber-001.phpt
similarity index 66%
rename from mongodb-1.3.4/tests/apm/monitoring-addSubscriber-001.phpt
rename to mongodb-1.4.2/tests/apm/monitoring-addSubscriber-001.phpt
index 44ab4228..bfe25fcd 100644
--- a/mongodb-1.3.4/tests/apm/monitoring-addSubscriber-001.phpt
+++ b/mongodb-1.4.2/tests/apm/monitoring-addSubscriber-001.phpt
@@ -1,42 +1,42 @@
--TEST--
MongoDB\Driver\Monitoring\addSubscriber(): Adding one subscriber
--SKIPIF--
<?php require __DIR__ . "/../utils/basic-skipif.inc"; CLEANUP(STANDALONE) ?>
--FILE--
<?php
require_once __DIR__ . "/../utils/basic.inc";
$m = new MongoDB\Driver\Manager(STANDALONE);
class MySubscriber implements MongoDB\Driver\Monitoring\CommandSubscriber
{
- public function commandStarted( \MongoDB\Driver\Monitoring\CommandStartedEvent $event )
- {
- echo "- started: ", $event->getCommandName(), "\n";
- }
-
- public function commandSucceeded( \MongoDB\Driver\Monitoring\CommandSucceededEvent $event )
- {
- }
-
- public function commandFailed( \MongoDB\Driver\Monitoring\CommandFailedEvent $event )
- {
- }
+ public function commandStarted( \MongoDB\Driver\Monitoring\CommandStartedEvent $event )
+ {
+ echo "- started: ", $event->getCommandName(), "\n";
+ }
+
+ public function commandSucceeded( \MongoDB\Driver\Monitoring\CommandSucceededEvent $event )
+ {
+ }
+
+ public function commandFailed( \MongoDB\Driver\Monitoring\CommandFailedEvent $event )
+ {
+ }
}
CLEANUP( STANDALONE );
$query = new MongoDB\Driver\Query( [] );
$subscriber = new MySubscriber;
echo "Before addSubscriber\n";
$cursor = $m->executeQuery( "demo.test", $query );
MongoDB\Driver\Monitoring\addSubscriber( $subscriber );
echo "After addSubscriber\n";
$cursor = $m->executeQuery( "demo.test", $query );
?>
--EXPECT--
Before addSubscriber
After addSubscriber
- started: find
diff --git a/mongodb-1.3.4/tests/apm/monitoring-addSubscriber-002.phpt b/mongodb-1.4.2/tests/apm/monitoring-addSubscriber-002.phpt
similarity index 66%
rename from mongodb-1.3.4/tests/apm/monitoring-addSubscriber-002.phpt
rename to mongodb-1.4.2/tests/apm/monitoring-addSubscriber-002.phpt
index bc8fb9c6..9b22bec5 100644
--- a/mongodb-1.3.4/tests/apm/monitoring-addSubscriber-002.phpt
+++ b/mongodb-1.4.2/tests/apm/monitoring-addSubscriber-002.phpt
@@ -1,58 +1,58 @@
--TEST--
MongoDB\Driver\Monitoring\addSubscriber(): Adding two subscribers
--SKIPIF--
<?php require __DIR__ . "/../utils/basic-skipif.inc"; CLEANUP(STANDALONE) ?>
--FILE--
<?php
require_once __DIR__ . "/../utils/basic.inc";
$m = new MongoDB\Driver\Manager(STANDALONE);
class MySubscriber implements MongoDB\Driver\Monitoring\CommandSubscriber
{
- private $instanceName;
+ private $instanceName;
- public function __construct( $instanceName )
- {
- $this->instanceName = $instanceName;
- }
+ public function __construct( $instanceName )
+ {
+ $this->instanceName = $instanceName;
+ }
- public function commandStarted( \MongoDB\Driver\Monitoring\CommandStartedEvent $event )
- {
- echo "- ({$this->instanceName}) - started: ", $event->getCommandName(), "\n";
- }
+ public function commandStarted( \MongoDB\Driver\Monitoring\CommandStartedEvent $event )
+ {
+ echo "- ({$this->instanceName}) - started: ", $event->getCommandName(), "\n";
+ }
- public function commandSucceeded( \MongoDB\Driver\Monitoring\CommandSucceededEvent $event )
- {
- }
+ public function commandSucceeded( \MongoDB\Driver\Monitoring\CommandSucceededEvent $event )
+ {
+ }
- public function commandFailed( \MongoDB\Driver\Monitoring\CommandFailedEvent $event )
- {
- }
+ public function commandFailed( \MongoDB\Driver\Monitoring\CommandFailedEvent $event )
+ {
+ }
}
CLEANUP( STANDALONE );
$query = new MongoDB\Driver\Query( [] );
$subscriber1 = new MySubscriber( "ONE" );
$subscriber2 = new MySubscriber( "TWO" );
echo "Before addSubscriber\n";
$cursor = $m->executeQuery( "demo.test", $query );
MongoDB\Driver\Monitoring\addSubscriber( $subscriber1 );
echo "After addSubscriber (ONE)\n";
$cursor = $m->executeQuery( "demo.test", $query );
MongoDB\Driver\Monitoring\addSubscriber( $subscriber2 );
echo "After addSubscriber (TWO)\n";
$cursor = $m->executeQuery( "demo.test", $query );
?>
--EXPECT--
Before addSubscriber
After addSubscriber (ONE)
- (ONE) - started: find
After addSubscriber (TWO)
- (ONE) - started: find
- (TWO) - started: find
diff --git a/mongodb-1.3.4/tests/apm/monitoring-addSubscriber-003.phpt b/mongodb-1.4.2/tests/apm/monitoring-addSubscriber-003.phpt
similarity index 70%
rename from mongodb-1.3.4/tests/apm/monitoring-addSubscriber-003.phpt
rename to mongodb-1.4.2/tests/apm/monitoring-addSubscriber-003.phpt
index 8de63af9..d4c71d6a 100644
--- a/mongodb-1.3.4/tests/apm/monitoring-addSubscriber-003.phpt
+++ b/mongodb-1.4.2/tests/apm/monitoring-addSubscriber-003.phpt
@@ -1,48 +1,48 @@
--TEST--
MongoDB\Driver\Monitoring\addSubscriber(): Adding one subscriber multiple times
--SKIPIF--
<?php require __DIR__ . "/../utils/basic-skipif.inc"; CLEANUP(STANDALONE) ?>
--FILE--
<?php
require_once __DIR__ . "/../utils/basic.inc";
$m = new MongoDB\Driver\Manager(STANDALONE);
class MySubscriber implements MongoDB\Driver\Monitoring\CommandSubscriber
{
- public function commandStarted( \MongoDB\Driver\Monitoring\CommandStartedEvent $event )
- {
- echo "- started: ", $event->getCommandName(), "\n";
- }
-
- public function commandSucceeded( \MongoDB\Driver\Monitoring\CommandSucceededEvent $event )
- {
- }
-
- public function commandFailed( \MongoDB\Driver\Monitoring\CommandFailedEvent $event )
- {
- }
+ public function commandStarted( \MongoDB\Driver\Monitoring\CommandStartedEvent $event )
+ {
+ echo "- started: ", $event->getCommandName(), "\n";
+ }
+
+ public function commandSucceeded( \MongoDB\Driver\Monitoring\CommandSucceededEvent $event )
+ {
+ }
+
+ public function commandFailed( \MongoDB\Driver\Monitoring\CommandFailedEvent $event )
+ {
+ }
}
$query = new MongoDB\Driver\Query( [] );
$subscriber = new MySubscriber();
echo "Before addSubscriber\n";
$cursor = $m->executeQuery( "demo.test", $query );
MongoDB\Driver\Monitoring\addSubscriber( $subscriber );
echo "After addSubscriber\n";
$cursor = $m->executeQuery( "demo.test", $query );
MongoDB\Driver\Monitoring\addSubscriber( $subscriber );
echo "After addSubscriber\n";
$cursor = $m->executeQuery( "demo.test", $query );
?>
--EXPECT--
Before addSubscriber
After addSubscriber
- started: find
After addSubscriber
- started: find
diff --git a/mongodb-1.3.4/tests/apm/monitoring-addSubscriber-004.phpt b/mongodb-1.4.2/tests/apm/monitoring-addSubscriber-004.phpt
similarity index 71%
rename from mongodb-1.3.4/tests/apm/monitoring-addSubscriber-004.phpt
rename to mongodb-1.4.2/tests/apm/monitoring-addSubscriber-004.phpt
index 07876c1b..ff13fee0 100644
--- a/mongodb-1.3.4/tests/apm/monitoring-addSubscriber-004.phpt
+++ b/mongodb-1.4.2/tests/apm/monitoring-addSubscriber-004.phpt
@@ -1,68 +1,68 @@
--TEST--
MongoDB\Driver\Monitoring\addSubscriber(): Adding three subscribers
--SKIPIF--
<?php require __DIR__ . "/../utils/basic-skipif.inc"; CLEANUP(STANDALONE) ?>
--FILE--
<?php
require_once __DIR__ . "/../utils/basic.inc";
$m = new MongoDB\Driver\Manager(STANDALONE);
class MySubscriber implements MongoDB\Driver\Monitoring\CommandSubscriber
{
- private $instanceName;
+ private $instanceName;
- public function __construct( $instanceName )
- {
- $this->instanceName = $instanceName;
- }
+ public function __construct( $instanceName )
+ {
+ $this->instanceName = $instanceName;
+ }
- public function commandStarted( \MongoDB\Driver\Monitoring\CommandStartedEvent $event )
- {
- echo "- ({$this->instanceName}) - started: ", $event->getCommandName(), "\n";
- }
+ public function commandStarted( \MongoDB\Driver\Monitoring\CommandStartedEvent $event )
+ {
+ echo "- ({$this->instanceName}) - started: ", $event->getCommandName(), "\n";
+ }
- public function commandSucceeded( \MongoDB\Driver\Monitoring\CommandSucceededEvent $event )
- {
- }
+ public function commandSucceeded( \MongoDB\Driver\Monitoring\CommandSucceededEvent $event )
+ {
+ }
- public function commandFailed( \MongoDB\Driver\Monitoring\CommandFailedEvent $event )
- {
- }
+ public function commandFailed( \MongoDB\Driver\Monitoring\CommandFailedEvent $event )
+ {
+ }
}
CLEANUP( STANDALONE );
$query = new MongoDB\Driver\Query( [] );
$subscriber1 = new MySubscriber( "ONE" );
$subscriber2 = new MySubscriber( "TWO" );
$subscriber3 = new MySubscriber( "THR" );
echo "Before addSubscriber\n";
$cursor = $m->executeQuery( "demo.test", $query );
MongoDB\Driver\Monitoring\addSubscriber( $subscriber1 );
echo "After addSubscriber (ONE)\n";
$cursor = $m->executeQuery( "demo.test", $query );
MongoDB\Driver\Monitoring\addSubscriber( $subscriber2 );
echo "After addSubscriber (TWO)\n";
$cursor = $m->executeQuery( "demo.test", $query );
MongoDB\Driver\Monitoring\addSubscriber( $subscriber3 );
echo "After addSubscriber (THR)\n";
$cursor = $m->executeQuery( "demo.test", $query );
?>
--EXPECT--
Before addSubscriber
After addSubscriber (ONE)
- (ONE) - started: find
After addSubscriber (TWO)
- (ONE) - started: find
- (TWO) - started: find
After addSubscriber (THR)
- (ONE) - started: find
- (TWO) - started: find
- (THR) - started: find
diff --git a/mongodb-1.4.2/tests/apm/monitoring-commandFailed-001.phpt b/mongodb-1.4.2/tests/apm/monitoring-commandFailed-001.phpt
new file mode 100644
index 00000000..6c064de8
--- /dev/null
+++ b/mongodb-1.4.2/tests/apm/monitoring-commandFailed-001.phpt
@@ -0,0 +1,56 @@
+--TEST--
+MongoDB\Driver\Monitoring\CommandFailedEvent
+--SKIPIF--
+<?php require __DIR__ . "/../utils/basic-skipif.inc"; CLEANUP(STANDALONE); ?>
+--FILE--
+<?php
+require_once __DIR__ . "/../utils/basic.inc";
+
+$m = new MongoDB\Driver\Manager(STANDALONE);
+
+class MySubscriber implements MongoDB\Driver\Monitoring\CommandSubscriber
+{
+ public function commandStarted( \MongoDB\Driver\Monitoring\CommandStartedEvent $event )
+ {
+ echo "started: ", $event->getCommandName(), "\n";
+ }
+
+ public function commandSucceeded( \MongoDB\Driver\Monitoring\CommandSucceededEvent $event )
+ {
+ }
+
+ public function commandFailed( \MongoDB\Driver\Monitoring\CommandFailedEvent $event )
+ {
+ echo "failed: ", $event->getCommandName(), "\n";
+ echo "- getError() returns an object: ", is_object( $event->getError() ) ? 'yes' : 'no', "\n";
+ echo "- getError() returns an MongoDB\Driver\Exception\Exception object: ", $event->getError() instanceof MongoDB\Driver\Exception\Exception ? 'yes' : 'no', "\n";
+ echo "- getDurationMicros() returns an integer: ", is_integer( $event->getDurationMicros() ) ? 'yes' : 'no', "\n";
+ echo "- getDurationMicros() returns > 0: ", $event->getDurationMicros() > 0 ? 'yes' : 'no', "\n";
+ echo "- getCommandName() returns a string: ", is_string( $event->getCommandName() ) ? 'yes' : 'no', "\n";
+ echo "- getCommandName() returns '", $event->getCommandName(), "'\n";
+ echo "- getServer() returns an object: ", is_object( $event->getServer() ) ? 'yes' : 'no', "\n";
+ echo "- getServer() returns a Server object: ", $event->getServer() instanceof MongoDB\Driver\Server ? 'yes' : 'no', "\n";
+ echo "- getOperationId() returns a string: ", is_string( $event->getOperationId() ) ? 'yes' : 'no', "\n";
+ echo "- getRequestId() returns a string: ", is_string( $event->getRequestId() ) ? 'yes' : 'no', "\n";
+ }
+}
+
+$subscriber = new MySubscriber;
+
+MongoDB\Driver\Monitoring\addSubscriber( $subscriber );
+
+CLEANUP( STANDALONE );
+?>
+--EXPECT--
+started: drop
+failed: drop
+- getError() returns an object: yes
+- getError() returns an MongoDB\Driver\Exception\Exception object: yes
+- getDurationMicros() returns an integer: yes
+- getDurationMicros() returns > 0: yes
+- getCommandName() returns a string: yes
+- getCommandName() returns 'drop'
+- getServer() returns an object: yes
+- getServer() returns a Server object: yes
+- getOperationId() returns a string: yes
+- getRequestId() returns a string: yes
diff --git a/mongodb-1.4.2/tests/apm/monitoring-commandFailed-002.phpt b/mongodb-1.4.2/tests/apm/monitoring-commandFailed-002.phpt
new file mode 100644
index 00000000..4bd6b896
--- /dev/null
+++ b/mongodb-1.4.2/tests/apm/monitoring-commandFailed-002.phpt
@@ -0,0 +1,43 @@
+--TEST--
+MongoDB\Driver\Monitoring\CommandFailedEvent: requestId and operationId match
+--SKIPIF--
+<?php require __DIR__ . "/../utils/basic-skipif.inc"; CLEANUP(STANDALONE) ?>
+--FILE--
+<?php
+require_once __DIR__ . "/../utils/basic.inc";
+
+$m = new MongoDB\Driver\Manager(STANDALONE);
+
+class MySubscriber implements MongoDB\Driver\Monitoring\CommandSubscriber
+{
+ public function commandStarted( \MongoDB\Driver\Monitoring\CommandStartedEvent $event )
+ {
+ echo "started: ", $event->getCommandName(), "\n";
+ $this->startRequestId = $event->getRequestId();
+ $this->startOperationId = $event->getOperationId();
+ }
+
+ public function commandSucceeded( \MongoDB\Driver\Monitoring\CommandSucceededEvent $event )
+ {
+ }
+
+ public function commandFailed( \MongoDB\Driver\Monitoring\CommandFailedEvent $event )
+ {
+ echo "failed: ", $event->getCommandName(), "\n";
+ echo "- requestId matches: ", $this->startRequestId == $event->getRequestId() ? 'yes' : 'no', " \n";
+ echo "- operationId matches: ", $this->startOperationId == $event->getOperationId() ? 'yes' : 'no', " \n";
+ }
+}
+
+$query = new MongoDB\Driver\Query( [] );
+$subscriber = new MySubscriber;
+
+MongoDB\Driver\Monitoring\addSubscriber( $subscriber );
+
+CLEANUP( STANDALONE );
+?>
+--EXPECT--
+started: drop
+failed: drop
+- requestId matches: yes
+- operationId matches: yes
diff --git a/mongodb-1.4.2/tests/apm/monitoring-commandStarted-001.phpt b/mongodb-1.4.2/tests/apm/monitoring-commandStarted-001.phpt
new file mode 100644
index 00000000..19cc202c
--- /dev/null
+++ b/mongodb-1.4.2/tests/apm/monitoring-commandStarted-001.phpt
@@ -0,0 +1,55 @@
+--TEST--
+MongoDB\Driver\Monitoring\CommandStartedEvent
+--SKIPIF--
+<?php require __DIR__ . "/../utils/basic-skipif.inc"; CLEANUP(STANDALONE) ?>
+--FILE--
+<?php
+require_once __DIR__ . "/../utils/basic.inc";
+
+$m = new MongoDB\Driver\Manager(STANDALONE);
+
+class MySubscriber implements MongoDB\Driver\Monitoring\CommandSubscriber
+{
+ public function commandStarted( \MongoDB\Driver\Monitoring\CommandStartedEvent $event )
+ {
+ echo "started: ", $event->getCommandName(), "\n";
+ echo "- getCommand() returns an object: ", is_object( $event->getCommand() ) ? 'yes' : 'no', "\n";
+ echo "- getCommand() returns a stdClass object: ", $event->getCommand() instanceof stdClass ? 'yes' : 'no', "\n";
+ echo "- getDatabaseName() returns a string: ", is_string( $event->getDatabaseName() ) ? 'yes' : 'no', "\n";
+ echo "- getDatabaseName() returns '", $event->getDatabaseName(), "'\n";
+ echo "- getCommandName() returns a string: ", is_string( $event->getCommandName() ) ? 'yes' : 'no', "\n";
+ echo "- getCommandName() returns '", $event->getCommandName(), "'\n";
+ echo "- getServer() returns an object: ", is_object( $event->getServer() ) ? 'yes' : 'no', "\n";
+ echo "- getServer() returns a Server object: ", $event->getServer() instanceof MongoDB\Driver\Server ? 'yes' : 'no', "\n";
+ echo "- getOperationId() returns a string: ", is_string( $event->getOperationId() ) ? 'yes' : 'no', "\n";
+ echo "- getRequestId() returns a string: ", is_string( $event->getRequestId() ) ? 'yes' : 'no', "\n";
+ }
+
+ public function commandSucceeded( \MongoDB\Driver\Monitoring\CommandSucceededEvent $event )
+ {
+ }
+
+ public function commandFailed( \MongoDB\Driver\Monitoring\CommandFailedEvent $event )
+ {
+ }
+}
+
+$query = new MongoDB\Driver\Query( [] );
+$subscriber = new MySubscriber;
+
+MongoDB\Driver\Monitoring\addSubscriber( $subscriber );
+
+$cursor = $m->executeQuery( "demo.test", $query );
+?>
+--EXPECT--
+started: find
+- getCommand() returns an object: yes
+- getCommand() returns a stdClass object: yes
+- getDatabaseName() returns a string: yes
+- getDatabaseName() returns 'demo'
+- getCommandName() returns a string: yes
+- getCommandName() returns 'find'
+- getServer() returns an object: yes
+- getServer() returns a Server object: yes
+- getOperationId() returns a string: yes
+- getRequestId() returns a string: yes
diff --git a/mongodb-1.4.2/tests/apm/monitoring-commandSucceeded-001.phpt b/mongodb-1.4.2/tests/apm/monitoring-commandSucceeded-001.phpt
new file mode 100644
index 00000000..e4ff5eaa
--- /dev/null
+++ b/mongodb-1.4.2/tests/apm/monitoring-commandSucceeded-001.phpt
@@ -0,0 +1,57 @@
+--TEST--
+MongoDB\Driver\Monitoring\CommandSucceededEvent
+--SKIPIF--
+<?php require __DIR__ . "/../utils/basic-skipif.inc"; CLEANUP(STANDALONE) ?>
+--FILE--
+<?php
+require_once __DIR__ . "/../utils/basic.inc";
+
+$m = new MongoDB\Driver\Manager(STANDALONE);
+
+class MySubscriber implements MongoDB\Driver\Monitoring\CommandSubscriber
+{
+ public function commandStarted( \MongoDB\Driver\Monitoring\CommandStartedEvent $event )
+ {
+ echo "started: ", $event->getCommandName(), "\n";
+ }
+
+ public function commandSucceeded( \MongoDB\Driver\Monitoring\CommandSucceededEvent $event )
+ {
+ echo "succeeded: ", $event->getCommandName(), "\n";
+ echo "- getReply() returns an object: ", is_object( $event->getReply() ) ? 'yes' : 'no', "\n";
+ echo "- getReply() returns a stdClass object: ", $event->getReply() instanceof stdClass ? 'yes' : 'no', "\n";
+ echo "- getDurationMicros() returns an integer: ", is_integer( $event->getDurationMicros() ) ? 'yes' : 'no', "\n";
+ echo "- getDurationMicros() returns > 0: ", $event->getDurationMicros() > 0 ? 'yes' : 'no', "\n";
+ echo "- getCommandName() returns a string: ", is_string( $event->getCommandName() ) ? 'yes' : 'no', "\n";
+ echo "- getCommandName() returns '", $event->getCommandName(), "'\n";
+ echo "- getServer() returns an object: ", is_object( $event->getServer() ) ? 'yes' : 'no', "\n";
+ echo "- getServer() returns a Server object: ", $event->getServer() instanceof MongoDB\Driver\Server ? 'yes' : 'no', "\n";
+ echo "- getOperationId() returns a string: ", is_string( $event->getOperationId() ) ? 'yes' : 'no', "\n";
+ echo "- getRequestId() returns a string: ", is_string( $event->getRequestId() ) ? 'yes' : 'no', "\n";
+ }
+
+ public function commandFailed( \MongoDB\Driver\Monitoring\CommandFailedEvent $event )
+ {
+ }
+}
+
+$query = new MongoDB\Driver\Query( [] );
+$subscriber = new MySubscriber;
+
+MongoDB\Driver\Monitoring\addSubscriber( $subscriber );
+
+$cursor = $m->executeQuery( "demo.test", $query );
+?>
+--EXPECT--
+started: find
+succeeded: find
+- getReply() returns an object: yes
+- getReply() returns a stdClass object: yes
+- getDurationMicros() returns an integer: yes
+- getDurationMicros() returns > 0: yes
+- getCommandName() returns a string: yes
+- getCommandName() returns 'find'
+- getServer() returns an object: yes
+- getServer() returns a Server object: yes
+- getOperationId() returns a string: yes
+- getRequestId() returns a string: yes
diff --git a/mongodb-1.4.2/tests/apm/monitoring-commandSucceeded-002.phpt b/mongodb-1.4.2/tests/apm/monitoring-commandSucceeded-002.phpt
new file mode 100644
index 00000000..30185181
--- /dev/null
+++ b/mongodb-1.4.2/tests/apm/monitoring-commandSucceeded-002.phpt
@@ -0,0 +1,43 @@
+--TEST--
+MongoDB\Driver\Monitoring\CommandSucceededEvent: requestId and operationId match
+--SKIPIF--
+<?php require __DIR__ . "/../utils/basic-skipif.inc"; CLEANUP(STANDALONE) ?>
+--FILE--
+<?php
+require_once __DIR__ . "/../utils/basic.inc";
+
+$m = new MongoDB\Driver\Manager(STANDALONE);
+
+class MySubscriber implements MongoDB\Driver\Monitoring\CommandSubscriber
+{
+ public function commandStarted( \MongoDB\Driver\Monitoring\CommandStartedEvent $event )
+ {
+ echo "started: ", $event->getCommandName(), "\n";
+ $this->startRequestId = $event->getRequestId();
+ $this->startOperationId = $event->getOperationId();
+ }
+
+ public function commandSucceeded( \MongoDB\Driver\Monitoring\CommandSucceededEvent $event )
+ {
+ echo "succeeded: ", $event->getCommandName(), "\n";
+ echo "- requestId matches: ", $this->startRequestId == $event->getRequestId() ? 'yes' : 'no', " \n";
+ echo "- operationId matches: ", $this->startOperationId == $event->getOperationId() ? 'yes' : 'no', " \n";
+ }
+
+ public function commandFailed( \MongoDB\Driver\Monitoring\CommandFailedEvent $event )
+ {
+ }
+}
+
+$query = new MongoDB\Driver\Query( [] );
+$subscriber = new MySubscriber;
+
+MongoDB\Driver\Monitoring\addSubscriber( $subscriber );
+
+$cursor = $m->executeQuery( "demo.test", $query );
+?>
+--EXPECT--
+started: find
+succeeded: find
+- requestId matches: yes
+- operationId matches: yes
diff --git a/mongodb-1.3.4/tests/apm/monitoring-removeSubscriber-001.phpt b/mongodb-1.4.2/tests/apm/monitoring-removeSubscriber-001.phpt
similarity index 70%
rename from mongodb-1.3.4/tests/apm/monitoring-removeSubscriber-001.phpt
rename to mongodb-1.4.2/tests/apm/monitoring-removeSubscriber-001.phpt
index 27959cc7..bce2589a 100644
--- a/mongodb-1.3.4/tests/apm/monitoring-removeSubscriber-001.phpt
+++ b/mongodb-1.4.2/tests/apm/monitoring-removeSubscriber-001.phpt
@@ -1,47 +1,47 @@
--TEST--
MongoDB\Driver\Monitoring\removeSubscriber(): Removing the only subscriber
--SKIPIF--
<?php require __DIR__ . "/../utils/basic-skipif.inc"; CLEANUP(STANDALONE) ?>
--FILE--
<?php
require_once __DIR__ . "/../utils/basic.inc";
$m = new MongoDB\Driver\Manager(STANDALONE);
class MySubscriber implements MongoDB\Driver\Monitoring\CommandSubscriber
{
- public function commandStarted( \MongoDB\Driver\Monitoring\CommandStartedEvent $event )
- {
- echo "- started: ", $event->getCommandName(), "\n";
- }
-
- public function commandSucceeded( \MongoDB\Driver\Monitoring\CommandSucceededEvent $event )
- {
- }
-
- public function commandFailed( \MongoDB\Driver\Monitoring\CommandFailedEvent $event )
- {
- }
+ public function commandStarted( \MongoDB\Driver\Monitoring\CommandStartedEvent $event )
+ {
+ echo "- started: ", $event->getCommandName(), "\n";
+ }
+
+ public function commandSucceeded( \MongoDB\Driver\Monitoring\CommandSucceededEvent $event )
+ {
+ }
+
+ public function commandFailed( \MongoDB\Driver\Monitoring\CommandFailedEvent $event )
+ {
+ }
}
$query = new MongoDB\Driver\Query( [] );
$subscriber = new MySubscriber;
echo "Before addSubscriber\n";
$cursor = $m->executeQuery( "demo.test", $query );
MongoDB\Driver\Monitoring\addSubscriber( $subscriber );
echo "After addSubscriber\n";
$cursor = $m->executeQuery( "demo.test", $query );
MongoDB\Driver\Monitoring\removeSubscriber( $subscriber );
echo "After removeSubscriber\n";
$cursor = $m->executeQuery( "demo.test", $query );
?>
--EXPECT--
Before addSubscriber
After addSubscriber
- started: find
After removeSubscriber
diff --git a/mongodb-1.3.4/tests/apm/monitoring-removeSubscriber-002.phpt b/mongodb-1.4.2/tests/apm/monitoring-removeSubscriber-002.phpt
similarity index 69%
rename from mongodb-1.3.4/tests/apm/monitoring-removeSubscriber-002.phpt
rename to mongodb-1.4.2/tests/apm/monitoring-removeSubscriber-002.phpt
index 9e625faf..8d0547e6 100644
--- a/mongodb-1.3.4/tests/apm/monitoring-removeSubscriber-002.phpt
+++ b/mongodb-1.4.2/tests/apm/monitoring-removeSubscriber-002.phpt
@@ -1,64 +1,64 @@
--TEST--
MongoDB\Driver\Monitoring\removeSubscriber(): Removing one of multiple subscribers
--SKIPIF--
<?php require __DIR__ . "/../utils/basic-skipif.inc"; CLEANUP(STANDALONE) ?>
--FILE--
<?php
require_once __DIR__ . "/../utils/basic.inc";
$m = new MongoDB\Driver\Manager(STANDALONE);
class MySubscriber implements MongoDB\Driver\Monitoring\CommandSubscriber
{
- private $instanceName;
+ private $instanceName;
- public function __construct( $instanceName )
- {
- $this->instanceName = $instanceName;
- }
+ public function __construct( $instanceName )
+ {
+ $this->instanceName = $instanceName;
+ }
- public function commandStarted( \MongoDB\Driver\Monitoring\CommandStartedEvent $event )
- {
- echo "- ({$this->instanceName}) - started: ", $event->getCommandName(), "\n";
- }
+ public function commandStarted( \MongoDB\Driver\Monitoring\CommandStartedEvent $event )
+ {
+ echo "- ({$this->instanceName}) - started: ", $event->getCommandName(), "\n";
+ }
- public function commandSucceeded( \MongoDB\Driver\Monitoring\CommandSucceededEvent $event )
- {
- }
+ public function commandSucceeded( \MongoDB\Driver\Monitoring\CommandSucceededEvent $event )
+ {
+ }
- public function commandFailed( \MongoDB\Driver\Monitoring\CommandFailedEvent $event )
- {
- }
+ public function commandFailed( \MongoDB\Driver\Monitoring\CommandFailedEvent $event )
+ {
+ }
}
$query = new MongoDB\Driver\Query( [] );
$subscriber1 = new MySubscriber( "ONE" );
$subscriber2 = new MySubscriber( "TWO" );
echo "Before addSubscriber\n";
$cursor = $m->executeQuery( "demo.test", $query );
MongoDB\Driver\Monitoring\addSubscriber( $subscriber1 );
echo "After addSubscriber (ONE)\n";
$cursor = $m->executeQuery( "demo.test", $query );
MongoDB\Driver\Monitoring\addSubscriber( $subscriber2 );
echo "After addSubscriber (TWO)\n";
$cursor = $m->executeQuery( "demo.test", $query );
MongoDB\Driver\Monitoring\removeSubscriber( $subscriber2 );
echo "After removeSubscriber (TWO)\n";
$cursor = $m->executeQuery( "demo.test", $query );
?>
--EXPECT--
Before addSubscriber
After addSubscriber (ONE)
- (ONE) - started: find
After addSubscriber (TWO)
- (ONE) - started: find
- (TWO) - started: find
After removeSubscriber (TWO)
- (ONE) - started: find
diff --git a/mongodb-1.3.4/tests/apm/overview.phpt b/mongodb-1.4.2/tests/apm/overview.phpt
similarity index 87%
rename from mongodb-1.3.4/tests/apm/overview.phpt
rename to mongodb-1.4.2/tests/apm/overview.phpt
index d510eb08..97d6dbc4 100644
--- a/mongodb-1.3.4/tests/apm/overview.phpt
+++ b/mongodb-1.4.2/tests/apm/overview.phpt
@@ -1,208 +1,205 @@
--TEST--
PHPC-349: APM Specification
--SKIPIF--
<?php require __DIR__ . "/../utils/basic-skipif.inc"; CLEANUP(STANDALONE); ?>
--FILE--
<?php
require_once __DIR__ . "/../utils/basic.inc";
$m = new MongoDB\Driver\Manager(STANDALONE);
class MySubscriber implements MongoDB\Driver\Monitoring\CommandSubscriber
{
- public function commandStarted( \MongoDB\Driver\Monitoring\CommandStartedEvent $event )
- {
- echo "started:\n";
- var_dump( $event );
- }
+ public function commandStarted( \MongoDB\Driver\Monitoring\CommandStartedEvent $event )
+ {
+ echo "started:\n";
+ var_dump( $event );
+ }
- public function commandSucceeded( \MongoDB\Driver\Monitoring\CommandSucceededEvent $event )
- {
- echo "succeeded:\n";
- var_dump( $event );
- }
+ public function commandSucceeded( \MongoDB\Driver\Monitoring\CommandSucceededEvent $event )
+ {
+ echo "succeeded:\n";
+ var_dump( $event );
+ }
- public function commandFailed( \MongoDB\Driver\Monitoring\CommandFailedEvent $event )
- {
- echo "failed:\n";
- var_dump( $event );
- }
+ public function commandFailed( \MongoDB\Driver\Monitoring\CommandFailedEvent $event )
+ {
+ echo "failed:\n";
+ var_dump( $event );
+ }
}
MongoDB\Driver\Monitoring\addSubscriber( new MySubscriber() );
CLEANUP(STANDALONE);
$d = 12345678;
$bw = new MongoDB\Driver\BulkWrite( [ 'ordered' => false ] );
$_id = $bw->insert( [ 'decimal' => $d ] );
$r = $m->executeBulkWrite( DATABASE_NAME . '.' . COLLECTION_NAME, $bw );
$query = new MongoDB\Driver\Query( [] );
$cursor = $m->executeQuery( DATABASE_NAME . '.' . COLLECTION_NAME, $query );
var_dump( $cursor->toArray() );
?>
--EXPECTF--
started:
object(MongoDB\Driver\Monitoring\CommandStartedEvent)#%d (%d) {
["command"]=>
object(stdClass)#%d (%d) {
["drop"]=>
- string(12) "apm_overview"
+ string(12) "apm_overview"%A
}
["commandName"]=>
string(4) "drop"
["databaseName"]=>
string(6) "phongo"
["operationId"]=>
string(%d) "%s"
["requestId"]=>
string(%d) "%s"
["server"]=>
object(MongoDB\Driver\Server)#%d (%d) {
%a
}
}
failed:
object(MongoDB\Driver\Monitoring\CommandFailedEvent)#%d (%d) {
["commandName"]=>
string(4) "drop"
["durationMicros"]=>
int(%d)
["error"]=>
object(MongoDB\Driver\Exception\RuntimeException)#%d (%d) {
["message":protected]=>
string(12) "ns not found"
["string":"Exception":private]=>
string(0) ""
["code":protected]=>
int(26)
["file":protected]=>
string(%d) "%stests/%s"
["line":protected]=>
int(%d)
["trace":"Exception":private]=>
%a
["previous":"Exception":private]=>
NULL
}
["operationId"]=>
string(%d) "%s"
["requestId"]=>
string(%d) "%s"
["server"]=>
object(MongoDB\Driver\Server)#%d (%d) {
%a
}
}
started:
object(MongoDB\Driver\Monitoring\CommandStartedEvent)#%d (%d) {
["command"]=>
object(stdClass)#%d (%d) {
["insert"]=>
string(12) "apm_overview"
- ["writeConcern"]=>
- object(stdClass)#%d (%d) {
- }
["ordered"]=>
- bool(false)
+ bool(false)%A
["documents"]=>
- array(%d) {
+ array(1) {
[0]=>
object(stdClass)#%d (%d) {
["decimal"]=>
int(12345678)
["_id"]=>
object(MongoDB\BSON\ObjectId)#%d (%d) {
["oid"]=>
string(24) "%s"
}
}
}
}
["commandName"]=>
string(6) "insert"
["databaseName"]=>
string(6) "phongo"
["operationId"]=>
string(%d) "%s"
["requestId"]=>
string(%d) "%s"
["server"]=>
object(MongoDB\Driver\Server)#%d (%d) {
%a
}
}
succeeded:
object(MongoDB\Driver\Monitoring\CommandSucceededEvent)#%d (%d) {
["commandName"]=>
string(6) "insert"
["durationMicros"]=>
int(%d)
["operationId"]=>
string(%d) "%s"
["reply"]=>
object(stdClass)#%d (%d) {
%a
}
["requestId"]=>
string(%d) "%s"
["server"]=>
object(MongoDB\Driver\Server)#%d (%d) {
%a
}
}
started:
object(MongoDB\Driver\Monitoring\CommandStartedEvent)#%d (%d) {
["command"]=>
object(stdClass)#%d (%d) {
["find"]=>
string(12) "apm_overview"
["filter"]=>
object(stdClass)#%d (%d) {
- }
+ }%A
}
["commandName"]=>
string(4) "find"
["databaseName"]=>
string(6) "phongo"
["operationId"]=>
string(%d) "%s"
["requestId"]=>
string(%d) "%s"
["server"]=>
object(MongoDB\Driver\Server)#%d (%d) {
%a
}
}
succeeded:
object(MongoDB\Driver\Monitoring\CommandSucceededEvent)#%d (%d) {
["commandName"]=>
string(4) "find"
["durationMicros"]=>
int(%d)
["operationId"]=>
string(%d) "%s"
["reply"]=>
object(stdClass)#%d (%d) {
%a
}
["requestId"]=>
string(%d) "%s"
["server"]=>
object(MongoDB\Driver\Server)#%d (%d) {
%a
}
}
array(%d) {
[0]=>
object(stdClass)#%d (%d) {
["_id"]=>
object(MongoDB\BSON\ObjectId)#%d (%d) {
["oid"]=>
string(24) "%s"
}
["decimal"]=>
int(12345678)
}
}
diff --git a/mongodb-1.3.4/tests/bson-corpus/array-decodeError-001.phpt b/mongodb-1.4.2/tests/bson-corpus/array-decodeError-001.phpt
similarity index 100%
copy from mongodb-1.3.4/tests/bson-corpus/array-decodeError-001.phpt
copy to mongodb-1.4.2/tests/bson-corpus/array-decodeError-001.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/array-decodeError-002.phpt b/mongodb-1.4.2/tests/bson-corpus/array-decodeError-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/array-decodeError-002.phpt
rename to mongodb-1.4.2/tests/bson-corpus/array-decodeError-002.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/array-decodeError-003.phpt b/mongodb-1.4.2/tests/bson-corpus/array-decodeError-003.phpt
similarity index 100%
copy from mongodb-1.3.4/tests/bson-corpus/array-decodeError-003.phpt
copy to mongodb-1.4.2/tests/bson-corpus/array-decodeError-003.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/array-valid-001.phpt b/mongodb-1.4.2/tests/bson-corpus/array-valid-001.phpt
similarity index 100%
copy from mongodb-1.3.4/tests/bson-corpus/array-valid-001.phpt
copy to mongodb-1.4.2/tests/bson-corpus/array-valid-001.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/array-valid-002.phpt b/mongodb-1.4.2/tests/bson-corpus/array-valid-002.phpt
similarity index 100%
copy from mongodb-1.3.4/tests/bson-corpus/array-valid-002.phpt
copy to mongodb-1.4.2/tests/bson-corpus/array-valid-002.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/array-valid-003.phpt b/mongodb-1.4.2/tests/bson-corpus/array-valid-003.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/array-valid-003.phpt
rename to mongodb-1.4.2/tests/bson-corpus/array-valid-003.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/array-valid-004.phpt b/mongodb-1.4.2/tests/bson-corpus/array-valid-004.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/array-valid-004.phpt
rename to mongodb-1.4.2/tests/bson-corpus/array-valid-004.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/binary-decodeError-001.phpt b/mongodb-1.4.2/tests/bson-corpus/binary-decodeError-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/binary-decodeError-001.phpt
rename to mongodb-1.4.2/tests/bson-corpus/binary-decodeError-001.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/binary-decodeError-002.phpt b/mongodb-1.4.2/tests/bson-corpus/binary-decodeError-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/binary-decodeError-002.phpt
rename to mongodb-1.4.2/tests/bson-corpus/binary-decodeError-002.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/binary-decodeError-003.phpt b/mongodb-1.4.2/tests/bson-corpus/binary-decodeError-003.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/binary-decodeError-003.phpt
rename to mongodb-1.4.2/tests/bson-corpus/binary-decodeError-003.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/binary-decodeError-004.phpt b/mongodb-1.4.2/tests/bson-corpus/binary-decodeError-004.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/binary-decodeError-004.phpt
rename to mongodb-1.4.2/tests/bson-corpus/binary-decodeError-004.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/binary-decodeError-005.phpt b/mongodb-1.4.2/tests/bson-corpus/binary-decodeError-005.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/binary-decodeError-005.phpt
rename to mongodb-1.4.2/tests/bson-corpus/binary-decodeError-005.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/binary-valid-001.phpt b/mongodb-1.4.2/tests/bson-corpus/binary-valid-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/binary-valid-001.phpt
rename to mongodb-1.4.2/tests/bson-corpus/binary-valid-001.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/binary-valid-002.phpt b/mongodb-1.4.2/tests/bson-corpus/binary-valid-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/binary-valid-002.phpt
rename to mongodb-1.4.2/tests/bson-corpus/binary-valid-002.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/binary-valid-003.phpt b/mongodb-1.4.2/tests/bson-corpus/binary-valid-003.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/binary-valid-003.phpt
rename to mongodb-1.4.2/tests/bson-corpus/binary-valid-003.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/binary-valid-004.phpt b/mongodb-1.4.2/tests/bson-corpus/binary-valid-004.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/binary-valid-004.phpt
rename to mongodb-1.4.2/tests/bson-corpus/binary-valid-004.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/binary-valid-005.phpt b/mongodb-1.4.2/tests/bson-corpus/binary-valid-005.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/binary-valid-005.phpt
rename to mongodb-1.4.2/tests/bson-corpus/binary-valid-005.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/binary-valid-006.phpt b/mongodb-1.4.2/tests/bson-corpus/binary-valid-006.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/binary-valid-006.phpt
rename to mongodb-1.4.2/tests/bson-corpus/binary-valid-006.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/binary-valid-007.phpt b/mongodb-1.4.2/tests/bson-corpus/binary-valid-007.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/binary-valid-007.phpt
rename to mongodb-1.4.2/tests/bson-corpus/binary-valid-007.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/binary-valid-008.phpt b/mongodb-1.4.2/tests/bson-corpus/binary-valid-008.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/binary-valid-008.phpt
rename to mongodb-1.4.2/tests/bson-corpus/binary-valid-008.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/binary-valid-009.phpt b/mongodb-1.4.2/tests/bson-corpus/binary-valid-009.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/binary-valid-009.phpt
rename to mongodb-1.4.2/tests/bson-corpus/binary-valid-009.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/binary-valid-010.phpt b/mongodb-1.4.2/tests/bson-corpus/binary-valid-010.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/binary-valid-010.phpt
rename to mongodb-1.4.2/tests/bson-corpus/binary-valid-010.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/binary-valid-011.phpt b/mongodb-1.4.2/tests/bson-corpus/binary-valid-011.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/binary-valid-011.phpt
rename to mongodb-1.4.2/tests/bson-corpus/binary-valid-011.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/boolean-decodeError-001.phpt b/mongodb-1.4.2/tests/bson-corpus/boolean-decodeError-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/boolean-decodeError-001.phpt
rename to mongodb-1.4.2/tests/bson-corpus/boolean-decodeError-001.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/boolean-decodeError-002.phpt b/mongodb-1.4.2/tests/bson-corpus/boolean-decodeError-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/boolean-decodeError-002.phpt
rename to mongodb-1.4.2/tests/bson-corpus/boolean-decodeError-002.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/boolean-valid-001.phpt b/mongodb-1.4.2/tests/bson-corpus/boolean-valid-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/boolean-valid-001.phpt
rename to mongodb-1.4.2/tests/bson-corpus/boolean-valid-001.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/boolean-valid-002.phpt b/mongodb-1.4.2/tests/bson-corpus/boolean-valid-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/boolean-valid-002.phpt
rename to mongodb-1.4.2/tests/bson-corpus/boolean-valid-002.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/code-decodeError-001.phpt b/mongodb-1.4.2/tests/bson-corpus/code-decodeError-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/code-decodeError-001.phpt
rename to mongodb-1.4.2/tests/bson-corpus/code-decodeError-001.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/code-decodeError-002.phpt b/mongodb-1.4.2/tests/bson-corpus/code-decodeError-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/code-decodeError-002.phpt
rename to mongodb-1.4.2/tests/bson-corpus/code-decodeError-002.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/code-decodeError-003.phpt b/mongodb-1.4.2/tests/bson-corpus/code-decodeError-003.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/code-decodeError-003.phpt
rename to mongodb-1.4.2/tests/bson-corpus/code-decodeError-003.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/code-decodeError-004.phpt b/mongodb-1.4.2/tests/bson-corpus/code-decodeError-004.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/code-decodeError-004.phpt
rename to mongodb-1.4.2/tests/bson-corpus/code-decodeError-004.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/code-decodeError-005.phpt b/mongodb-1.4.2/tests/bson-corpus/code-decodeError-005.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/code-decodeError-005.phpt
rename to mongodb-1.4.2/tests/bson-corpus/code-decodeError-005.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/code-decodeError-006.phpt b/mongodb-1.4.2/tests/bson-corpus/code-decodeError-006.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/code-decodeError-006.phpt
rename to mongodb-1.4.2/tests/bson-corpus/code-decodeError-006.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/code-decodeError-007.phpt b/mongodb-1.4.2/tests/bson-corpus/code-decodeError-007.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/code-decodeError-007.phpt
rename to mongodb-1.4.2/tests/bson-corpus/code-decodeError-007.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/code-valid-001.phpt b/mongodb-1.4.2/tests/bson-corpus/code-valid-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/code-valid-001.phpt
rename to mongodb-1.4.2/tests/bson-corpus/code-valid-001.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/code-valid-002.phpt b/mongodb-1.4.2/tests/bson-corpus/code-valid-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/code-valid-002.phpt
rename to mongodb-1.4.2/tests/bson-corpus/code-valid-002.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/code-valid-003.phpt b/mongodb-1.4.2/tests/bson-corpus/code-valid-003.phpt
similarity index 100%
copy from mongodb-1.3.4/tests/bson-corpus/code-valid-003.phpt
copy to mongodb-1.4.2/tests/bson-corpus/code-valid-003.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/code-valid-004.phpt b/mongodb-1.4.2/tests/bson-corpus/code-valid-004.phpt
similarity index 100%
copy from mongodb-1.3.4/tests/bson-corpus/code-valid-004.phpt
copy to mongodb-1.4.2/tests/bson-corpus/code-valid-004.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/code-valid-005.phpt b/mongodb-1.4.2/tests/bson-corpus/code-valid-005.phpt
similarity index 100%
copy from mongodb-1.3.4/tests/bson-corpus/code-valid-005.phpt
copy to mongodb-1.4.2/tests/bson-corpus/code-valid-005.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/code-valid-006.phpt b/mongodb-1.4.2/tests/bson-corpus/code-valid-006.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/code-valid-006.phpt
rename to mongodb-1.4.2/tests/bson-corpus/code-valid-006.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/code_w_scope-decodeError-001.phpt b/mongodb-1.4.2/tests/bson-corpus/code_w_scope-decodeError-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/code_w_scope-decodeError-001.phpt
rename to mongodb-1.4.2/tests/bson-corpus/code_w_scope-decodeError-001.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/code_w_scope-decodeError-002.phpt b/mongodb-1.4.2/tests/bson-corpus/code_w_scope-decodeError-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/code_w_scope-decodeError-002.phpt
rename to mongodb-1.4.2/tests/bson-corpus/code_w_scope-decodeError-002.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/code_w_scope-decodeError-003.phpt b/mongodb-1.4.2/tests/bson-corpus/code_w_scope-decodeError-003.phpt
similarity index 100%
copy from mongodb-1.3.4/tests/bson-corpus/code_w_scope-decodeError-003.phpt
copy to mongodb-1.4.2/tests/bson-corpus/code_w_scope-decodeError-003.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/code_w_scope-decodeError-004.phpt b/mongodb-1.4.2/tests/bson-corpus/code_w_scope-decodeError-004.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/code_w_scope-decodeError-004.phpt
rename to mongodb-1.4.2/tests/bson-corpus/code_w_scope-decodeError-004.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/code_w_scope-decodeError-005.phpt b/mongodb-1.4.2/tests/bson-corpus/code_w_scope-decodeError-005.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/code_w_scope-decodeError-005.phpt
rename to mongodb-1.4.2/tests/bson-corpus/code_w_scope-decodeError-005.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/code_w_scope-decodeError-006.phpt b/mongodb-1.4.2/tests/bson-corpus/code_w_scope-decodeError-006.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/code_w_scope-decodeError-006.phpt
rename to mongodb-1.4.2/tests/bson-corpus/code_w_scope-decodeError-006.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/code_w_scope-decodeError-007.phpt b/mongodb-1.4.2/tests/bson-corpus/code_w_scope-decodeError-007.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/code_w_scope-decodeError-007.phpt
rename to mongodb-1.4.2/tests/bson-corpus/code_w_scope-decodeError-007.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/code_w_scope-decodeError-008.phpt b/mongodb-1.4.2/tests/bson-corpus/code_w_scope-decodeError-008.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/code_w_scope-decodeError-008.phpt
rename to mongodb-1.4.2/tests/bson-corpus/code_w_scope-decodeError-008.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/code_w_scope-decodeError-009.phpt b/mongodb-1.4.2/tests/bson-corpus/code_w_scope-decodeError-009.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/code_w_scope-decodeError-009.phpt
rename to mongodb-1.4.2/tests/bson-corpus/code_w_scope-decodeError-009.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/code_w_scope-decodeError-010.phpt b/mongodb-1.4.2/tests/bson-corpus/code_w_scope-decodeError-010.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/code_w_scope-decodeError-010.phpt
rename to mongodb-1.4.2/tests/bson-corpus/code_w_scope-decodeError-010.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/code_w_scope-decodeError-011.phpt b/mongodb-1.4.2/tests/bson-corpus/code_w_scope-decodeError-011.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/code_w_scope-decodeError-011.phpt
rename to mongodb-1.4.2/tests/bson-corpus/code_w_scope-decodeError-011.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/code_w_scope-valid-001.phpt b/mongodb-1.4.2/tests/bson-corpus/code_w_scope-valid-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/code_w_scope-valid-001.phpt
rename to mongodb-1.4.2/tests/bson-corpus/code_w_scope-valid-001.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/code_w_scope-valid-002.phpt b/mongodb-1.4.2/tests/bson-corpus/code_w_scope-valid-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/code_w_scope-valid-002.phpt
rename to mongodb-1.4.2/tests/bson-corpus/code_w_scope-valid-002.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/code_w_scope-valid-003.phpt b/mongodb-1.4.2/tests/bson-corpus/code_w_scope-valid-003.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/code_w_scope-valid-003.phpt
rename to mongodb-1.4.2/tests/bson-corpus/code_w_scope-valid-003.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/code_w_scope-valid-004.phpt b/mongodb-1.4.2/tests/bson-corpus/code_w_scope-valid-004.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/code_w_scope-valid-004.phpt
rename to mongodb-1.4.2/tests/bson-corpus/code_w_scope-valid-004.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/code_w_scope-valid-005.phpt b/mongodb-1.4.2/tests/bson-corpus/code_w_scope-valid-005.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/code_w_scope-valid-005.phpt
rename to mongodb-1.4.2/tests/bson-corpus/code_w_scope-valid-005.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/datetime-decodeError-001.phpt b/mongodb-1.4.2/tests/bson-corpus/datetime-decodeError-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/datetime-decodeError-001.phpt
rename to mongodb-1.4.2/tests/bson-corpus/datetime-decodeError-001.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/datetime-valid-001.phpt b/mongodb-1.4.2/tests/bson-corpus/datetime-valid-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/datetime-valid-001.phpt
rename to mongodb-1.4.2/tests/bson-corpus/datetime-valid-001.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/datetime-valid-002.phpt b/mongodb-1.4.2/tests/bson-corpus/datetime-valid-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/datetime-valid-002.phpt
rename to mongodb-1.4.2/tests/bson-corpus/datetime-valid-002.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/datetime-valid-003.phpt b/mongodb-1.4.2/tests/bson-corpus/datetime-valid-003.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/datetime-valid-003.phpt
rename to mongodb-1.4.2/tests/bson-corpus/datetime-valid-003.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/datetime-valid-004.phpt b/mongodb-1.4.2/tests/bson-corpus/datetime-valid-004.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/datetime-valid-004.phpt
rename to mongodb-1.4.2/tests/bson-corpus/datetime-valid-004.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/array-decodeError-001.phpt b/mongodb-1.4.2/tests/bson-corpus/dbpointer-decodeError-001.phpt
similarity index 75%
copy from mongodb-1.3.4/tests/bson-corpus/array-decodeError-001.phpt
copy to mongodb-1.4.2/tests/bson-corpus/dbpointer-decodeError-001.phpt
index 8de4e238..85651d78 100644
--- a/mongodb-1.3.4/tests/bson-corpus/array-decodeError-001.phpt
+++ b/mongodb-1.4.2/tests/bson-corpus/dbpointer-decodeError-001.phpt
@@ -1,23 +1,23 @@
--TEST--
-Array: Array length too long: eats outer terminator
+DBPointer type (deprecated): String with negative length
--DESCRIPTION--
Generated by scripts/convert-bson-corpus-tests.php
DO NOT EDIT THIS FILE
--FILE--
<?php
require_once __DIR__ . '/../utils/tools.php';
-$bson = hex2bin('140000000461000D0000001030000A0000000000');
+$bson = hex2bin('1A0000000C6100FFFFFFFF620056E1FC72E0C917E9C471416100');
throws(function() use ($bson) {
var_dump(toPHP($bson));
}, 'MongoDB\Driver\Exception\UnexpectedValueException');
?>
===DONE===
<?php exit(0); ?>
--EXPECT--
OK: Got MongoDB\Driver\Exception\UnexpectedValueException
===DONE===
\ No newline at end of file
diff --git a/mongodb-1.3.4/tests/bson-corpus/array-decodeError-001.phpt b/mongodb-1.4.2/tests/bson-corpus/dbpointer-decodeError-002.phpt
similarity index 75%
copy from mongodb-1.3.4/tests/bson-corpus/array-decodeError-001.phpt
copy to mongodb-1.4.2/tests/bson-corpus/dbpointer-decodeError-002.phpt
index 8de4e238..ddb485e2 100644
--- a/mongodb-1.3.4/tests/bson-corpus/array-decodeError-001.phpt
+++ b/mongodb-1.4.2/tests/bson-corpus/dbpointer-decodeError-002.phpt
@@ -1,23 +1,23 @@
--TEST--
-Array: Array length too long: eats outer terminator
+DBPointer type (deprecated): String with zero length
--DESCRIPTION--
Generated by scripts/convert-bson-corpus-tests.php
DO NOT EDIT THIS FILE
--FILE--
<?php
require_once __DIR__ . '/../utils/tools.php';
-$bson = hex2bin('140000000461000D0000001030000A0000000000');
+$bson = hex2bin('1A0000000C610000000000620056E1FC72E0C917E9C471416100');
throws(function() use ($bson) {
var_dump(toPHP($bson));
}, 'MongoDB\Driver\Exception\UnexpectedValueException');
?>
===DONE===
<?php exit(0); ?>
--EXPECT--
OK: Got MongoDB\Driver\Exception\UnexpectedValueException
===DONE===
\ No newline at end of file
diff --git a/mongodb-1.3.4/tests/bson-corpus/array-decodeError-001.phpt b/mongodb-1.4.2/tests/bson-corpus/dbpointer-decodeError-003.phpt
similarity index 75%
copy from mongodb-1.3.4/tests/bson-corpus/array-decodeError-001.phpt
copy to mongodb-1.4.2/tests/bson-corpus/dbpointer-decodeError-003.phpt
index 8de4e238..27a930ac 100644
--- a/mongodb-1.3.4/tests/bson-corpus/array-decodeError-001.phpt
+++ b/mongodb-1.4.2/tests/bson-corpus/dbpointer-decodeError-003.phpt
@@ -1,23 +1,23 @@
--TEST--
-Array: Array length too long: eats outer terminator
+DBPointer type (deprecated): String not null terminated
--DESCRIPTION--
Generated by scripts/convert-bson-corpus-tests.php
DO NOT EDIT THIS FILE
--FILE--
<?php
require_once __DIR__ . '/../utils/tools.php';
-$bson = hex2bin('140000000461000D0000001030000A0000000000');
+$bson = hex2bin('1A0000000C610002000000626256E1FC72E0C917E9C471416100');
throws(function() use ($bson) {
var_dump(toPHP($bson));
}, 'MongoDB\Driver\Exception\UnexpectedValueException');
?>
===DONE===
<?php exit(0); ?>
--EXPECT--
OK: Got MongoDB\Driver\Exception\UnexpectedValueException
===DONE===
\ No newline at end of file
diff --git a/mongodb-1.3.4/tests/bson-corpus/code_w_scope-decodeError-003.phpt b/mongodb-1.4.2/tests/bson-corpus/dbpointer-decodeError-004.phpt
similarity index 73%
rename from mongodb-1.3.4/tests/bson-corpus/code_w_scope-decodeError-003.phpt
rename to mongodb-1.4.2/tests/bson-corpus/dbpointer-decodeError-004.phpt
index 851b3bc6..73a5608a 100644
--- a/mongodb-1.3.4/tests/bson-corpus/code_w_scope-decodeError-003.phpt
+++ b/mongodb-1.4.2/tests/bson-corpus/dbpointer-decodeError-004.phpt
@@ -1,23 +1,23 @@
--TEST--
-Javascript Code with Scope: field length too short (less than minimum size)
+DBPointer type (deprecated): short OID (less than minimum length for field)
--DESCRIPTION--
Generated by scripts/convert-bson-corpus-tests.php
DO NOT EDIT THIS FILE
--FILE--
<?php
require_once __DIR__ . '/../utils/tools.php';
-$bson = hex2bin('160000000F61000D0000000100000000050000000000');
+$bson = hex2bin('160000000C61000300000061620056E1FC72E0C91700');
throws(function() use ($bson) {
var_dump(toPHP($bson));
}, 'MongoDB\Driver\Exception\UnexpectedValueException');
?>
===DONE===
<?php exit(0); ?>
--EXPECT--
OK: Got MongoDB\Driver\Exception\UnexpectedValueException
===DONE===
\ No newline at end of file
diff --git a/mongodb-1.3.4/tests/bson-corpus/array-decodeError-003.phpt b/mongodb-1.4.2/tests/bson-corpus/dbpointer-decodeError-005.phpt
similarity index 72%
rename from mongodb-1.3.4/tests/bson-corpus/array-decodeError-003.phpt
rename to mongodb-1.4.2/tests/bson-corpus/dbpointer-decodeError-005.phpt
index f35c5f4c..1f6ca2f4 100644
--- a/mongodb-1.3.4/tests/bson-corpus/array-decodeError-003.phpt
+++ b/mongodb-1.4.2/tests/bson-corpus/dbpointer-decodeError-005.phpt
@@ -1,23 +1,23 @@
--TEST--
-Array: Invalid Array: bad string length in field
+DBPointer type (deprecated): short OID (greater than minimum, but truncated)
--DESCRIPTION--
Generated by scripts/convert-bson-corpus-tests.php
DO NOT EDIT THIS FILE
--FILE--
<?php
require_once __DIR__ . '/../utils/tools.php';
-$bson = hex2bin('1A00000004666F6F00100000000230000500000062617A000000');
+$bson = hex2bin('1A0000000C61000300000061620056E1FC72E0C917E9C4716100');
throws(function() use ($bson) {
var_dump(toPHP($bson));
}, 'MongoDB\Driver\Exception\UnexpectedValueException');
?>
===DONE===
<?php exit(0); ?>
--EXPECT--
OK: Got MongoDB\Driver\Exception\UnexpectedValueException
===DONE===
\ No newline at end of file
diff --git a/mongodb-1.3.4/tests/bson-corpus/array-decodeError-001.phpt b/mongodb-1.4.2/tests/bson-corpus/dbpointer-decodeError-006.phpt
similarity index 75%
rename from mongodb-1.3.4/tests/bson-corpus/array-decodeError-001.phpt
rename to mongodb-1.4.2/tests/bson-corpus/dbpointer-decodeError-006.phpt
index 8de4e238..ebe71ec5 100644
--- a/mongodb-1.3.4/tests/bson-corpus/array-decodeError-001.phpt
+++ b/mongodb-1.4.2/tests/bson-corpus/dbpointer-decodeError-006.phpt
@@ -1,23 +1,23 @@
--TEST--
-Array: Array length too long: eats outer terminator
+DBPointer type (deprecated): String with bad UTF-8
--DESCRIPTION--
Generated by scripts/convert-bson-corpus-tests.php
DO NOT EDIT THIS FILE
--FILE--
<?php
require_once __DIR__ . '/../utils/tools.php';
-$bson = hex2bin('140000000461000D0000001030000A0000000000');
+$bson = hex2bin('1A0000000C610002000000E90056E1FC72E0C917E9C471416100');
throws(function() use ($bson) {
var_dump(toPHP($bson));
}, 'MongoDB\Driver\Exception\UnexpectedValueException');
?>
===DONE===
<?php exit(0); ?>
--EXPECT--
OK: Got MongoDB\Driver\Exception\UnexpectedValueException
===DONE===
\ No newline at end of file
diff --git a/mongodb-1.4.2/tests/bson-corpus/dbpointer-valid-001.phpt b/mongodb-1.4.2/tests/bson-corpus/dbpointer-valid-001.phpt
new file mode 100644
index 00000000..5eddc55c
--- /dev/null
+++ b/mongodb-1.4.2/tests/bson-corpus/dbpointer-valid-001.phpt
@@ -0,0 +1,33 @@
+--TEST--
+DBPointer type (deprecated): DBpointer
+--DESCRIPTION--
+Generated by scripts/convert-bson-corpus-tests.php
+
+DO NOT EDIT THIS FILE
+--FILE--
+<?php
+
+require_once __DIR__ . '/../utils/tools.php';
+
+$canonicalBson = hex2bin('1A0000000C610002000000620056E1FC72E0C917E9C471416100');
+$convertedBson = hex2bin('2a00000003610022000000022472656600020000006200072469640056e1fc72e0c917e9c47141610000');
+$canonicalExtJson = '{"a": {"$dbPointer": {"$ref": "b", "$id": {"$oid": "56e1fc72e0c917e9c4714161"}}}}';
+$convertedExtJson = '{"a": {"$ref": "b", "$id": {"$oid": "56e1fc72e0c917e9c4714161"}}}';
+
+// Canonical BSON -> Native -> Canonical BSON
+echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n";
+
+// Canonical BSON -> Canonical extJSON
+echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n";
+
+// Canonical extJSON -> Canonical BSON
+echo bin2hex(fromJSON($canonicalExtJson)), "\n";
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECT--
+1a0000000c610002000000620056e1fc72e0c917e9c471416100
+{"a":{"$dbPointer":{"$ref":"b","$id":{"$oid":"56e1fc72e0c917e9c4714161"}}}}
+1a0000000c610002000000620056e1fc72e0c917e9c471416100
+===DONE===
\ No newline at end of file
diff --git a/mongodb-1.4.2/tests/bson-corpus/dbpointer-valid-002.phpt b/mongodb-1.4.2/tests/bson-corpus/dbpointer-valid-002.phpt
new file mode 100644
index 00000000..195037c4
--- /dev/null
+++ b/mongodb-1.4.2/tests/bson-corpus/dbpointer-valid-002.phpt
@@ -0,0 +1,38 @@
+--TEST--
+DBPointer type (deprecated): DBpointer with opposite key order
+--DESCRIPTION--
+Generated by scripts/convert-bson-corpus-tests.php
+
+DO NOT EDIT THIS FILE
+--FILE--
+<?php
+
+require_once __DIR__ . '/../utils/tools.php';
+
+$canonicalBson = hex2bin('1A0000000C610002000000620056E1FC72E0C917E9C471416100');
+$convertedBson = hex2bin('2a00000003610022000000022472656600020000006200072469640056e1fc72e0c917e9c47141610000');
+$canonicalExtJson = '{"a": {"$dbPointer": {"$ref": "b", "$id": {"$oid": "56e1fc72e0c917e9c4714161"}}}}';
+$degenerateExtJson = '{"a": {"$dbPointer": {"$id": {"$oid": "56e1fc72e0c917e9c4714161"}, "$ref": "b"}}}';
+$convertedExtJson = '{"a": {"$ref": "b", "$id": {"$oid": "56e1fc72e0c917e9c4714161"}}}';
+
+// Canonical BSON -> Native -> Canonical BSON
+echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n";
+
+// Canonical BSON -> Canonical extJSON
+echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n";
+
+// Canonical extJSON -> Canonical BSON
+echo bin2hex(fromJSON($canonicalExtJson)), "\n";
+
+// Degenerate extJSON -> Canonical BSON
+echo bin2hex(fromJSON($degenerateExtJson)), "\n";
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECT--
+1a0000000c610002000000620056e1fc72e0c917e9c471416100
+{"a":{"$dbPointer":{"$ref":"b","$id":{"$oid":"56e1fc72e0c917e9c4714161"}}}}
+1a0000000c610002000000620056e1fc72e0c917e9c471416100
+1a0000000c610002000000620056e1fc72e0c917e9c471416100
+===DONE===
\ No newline at end of file
diff --git a/mongodb-1.4.2/tests/bson-corpus/dbpointer-valid-003.phpt b/mongodb-1.4.2/tests/bson-corpus/dbpointer-valid-003.phpt
new file mode 100644
index 00000000..88492616
--- /dev/null
+++ b/mongodb-1.4.2/tests/bson-corpus/dbpointer-valid-003.phpt
@@ -0,0 +1,33 @@
+--TEST--
+DBPointer type (deprecated): With two-byte UTF-8
+--DESCRIPTION--
+Generated by scripts/convert-bson-corpus-tests.php
+
+DO NOT EDIT THIS FILE
+--FILE--
+<?php
+
+require_once __DIR__ . '/../utils/tools.php';
+
+$canonicalBson = hex2bin('1B0000000C610003000000C3A90056E1FC72E0C917E9C471416100');
+$convertedBson = hex2bin('2B0000000361002300000002247265660003000000C3A900072469640056E1FC72E0C917E9C47141610000');
+$canonicalExtJson = '{"a": {"$dbPointer": {"$ref": "é", "$id": {"$oid": "56e1fc72e0c917e9c4714161"}}}}';
+$convertedExtJson = '{"a": {"$ref": "é", "$id": {"$oid": "56e1fc72e0c917e9c4714161"}}}';
+
+// Canonical BSON -> Native -> Canonical BSON
+echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n";
+
+// Canonical BSON -> Canonical extJSON
+echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n";
+
+// Canonical extJSON -> Canonical BSON
+echo bin2hex(fromJSON($canonicalExtJson)), "\n";
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECT--
+1b0000000c610003000000c3a90056e1fc72e0c917e9c471416100
+{"a":{"$dbPointer":{"$ref":"\u00e9","$id":{"$oid":"56e1fc72e0c917e9c4714161"}}}}
+1b0000000c610003000000c3a90056e1fc72e0c917e9c471416100
+===DONE===
\ No newline at end of file
diff --git a/mongodb-1.3.4/tests/bson-corpus/dbref-valid-001.phpt b/mongodb-1.4.2/tests/bson-corpus/dbref-valid-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/dbref-valid-001.phpt
rename to mongodb-1.4.2/tests/bson-corpus/dbref-valid-001.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/dbref-valid-002.phpt b/mongodb-1.4.2/tests/bson-corpus/dbref-valid-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/dbref-valid-002.phpt
rename to mongodb-1.4.2/tests/bson-corpus/dbref-valid-002.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/dbref-valid-003.phpt b/mongodb-1.4.2/tests/bson-corpus/dbref-valid-003.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/dbref-valid-003.phpt
rename to mongodb-1.4.2/tests/bson-corpus/dbref-valid-003.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/dbref-valid-004.phpt b/mongodb-1.4.2/tests/bson-corpus/dbref-valid-004.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/dbref-valid-004.phpt
rename to mongodb-1.4.2/tests/bson-corpus/dbref-valid-004.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/dbref-valid-005.phpt b/mongodb-1.4.2/tests/bson-corpus/dbref-valid-005.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/dbref-valid-005.phpt
rename to mongodb-1.4.2/tests/bson-corpus/dbref-valid-005.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-001.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-001.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-001.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-002.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-002.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-002.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-003.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-003.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-003.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-003.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-004.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-004.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-004.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-004.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-005.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-005.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-005.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-005.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-006.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-006.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-006.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-006.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-007.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-007.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-007.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-007.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-008.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-008.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-008.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-008.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-009.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-009.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-009.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-009.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-010.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-010.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-010.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-010.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-011.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-011.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-011.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-011.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-012.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-012.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-012.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-012.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-013.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-013.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-013.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-013.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-014.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-014.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-014.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-014.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-015.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-015.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-015.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-015.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-016.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-016.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-016.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-016.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-017.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-017.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-017.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-017.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-018.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-018.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-018.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-018.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-019.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-019.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-019.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-019.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-020.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-020.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-020.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-020.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-021.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-021.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-021.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-021.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-022.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-022.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-022.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-022.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-023.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-023.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-023.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-023.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-024.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-024.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-024.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-024.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-025.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-025.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-025.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-025.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-026.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-026.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-026.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-026.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-027.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-027.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-027.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-027.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-028.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-028.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-028.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-028.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-029.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-029.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-029.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-029.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-030.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-030.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-030.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-030.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-031.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-031.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-031.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-031.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-032.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-032.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-032.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-032.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-033.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-033.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-033.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-033.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-034.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-034.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-034.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-034.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-035.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-035.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-035.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-035.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-036.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-036.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-036.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-036.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-037.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-037.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-037.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-037.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-038.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-038.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-038.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-038.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-039.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-039.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-039.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-039.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-040.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-040.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-040.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-040.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-041.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-041.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-041.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-041.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-042.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-042.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-042.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-042.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-043.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-043.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-043.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-043.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-044.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-044.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-044.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-044.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-045.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-045.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-045.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-045.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-046.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-046.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-046.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-046.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-047.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-047.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-047.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-047.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-048.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-048.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-048.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-048.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-049.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-049.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-049.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-049.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-050.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-050.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-050.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-050.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-051.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-051.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-051.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-051.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-052.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-052.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-052.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-052.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-053.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-053.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-053.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-053.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-054.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-054.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-054.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-054.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-055.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-055.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-055.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-055.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-056.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-056.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-1-valid-056.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-1-valid-056.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-001.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-001.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-001.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-002.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-002.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-002.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-003.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-003.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-003.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-003.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-004.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-004.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-004.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-004.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-005.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-005.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-005.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-005.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-006.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-006.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-006.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-006.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-007.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-007.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-007.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-007.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-008.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-008.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-008.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-008.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-009.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-009.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-009.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-009.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-010.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-010.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-010.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-010.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-011.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-011.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-011.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-011.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-012.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-012.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-012.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-012.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-013.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-013.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-013.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-013.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-014.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-014.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-014.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-014.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-015.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-015.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-015.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-015.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-016.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-016.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-016.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-016.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-017.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-017.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-017.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-017.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-018.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-018.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-018.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-018.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-019.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-019.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-019.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-019.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-020.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-020.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-020.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-020.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-021.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-021.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-021.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-021.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-022.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-022.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-022.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-022.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-023.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-023.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-023.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-023.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-024.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-024.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-024.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-024.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-025.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-025.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-025.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-025.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-026.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-026.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-026.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-026.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-027.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-027.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-027.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-027.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-028.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-028.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-028.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-028.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-029.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-029.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-029.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-029.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-030.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-030.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-030.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-030.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-031.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-031.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-031.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-031.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-032.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-032.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-032.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-032.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-033.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-033.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-033.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-033.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-034.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-034.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-034.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-034.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-035.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-035.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-035.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-035.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-036.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-036.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-036.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-036.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-037.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-037.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-037.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-037.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-038.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-038.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-038.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-038.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-039.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-039.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-039.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-039.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-040.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-040.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-040.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-040.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-041.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-041.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-041.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-041.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-042.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-042.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-042.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-042.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-043.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-043.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-043.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-043.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-044.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-044.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-044.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-044.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-045.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-045.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-045.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-045.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-046.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-046.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-046.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-046.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-047.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-047.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-047.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-047.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-048.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-048.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-048.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-048.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-049.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-049.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-049.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-049.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-050.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-050.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-050.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-050.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-051.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-051.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-051.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-051.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-052.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-052.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-052.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-052.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-053.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-053.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-053.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-053.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-054.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-054.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-054.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-054.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-055.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-055.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-055.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-055.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-056.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-056.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-056.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-056.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-057.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-057.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-057.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-057.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-058.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-058.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-058.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-058.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-059.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-059.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-059.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-059.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-060.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-060.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-060.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-060.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-061.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-061.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-061.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-061.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-062.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-062.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-062.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-062.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-063.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-063.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-063.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-063.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-064.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-064.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-064.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-064.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-065.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-065.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-065.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-065.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-066.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-066.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-066.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-066.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-067.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-067.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-067.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-067.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-068.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-068.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-068.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-068.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-069.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-069.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-069.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-069.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-070.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-070.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-070.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-070.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-071.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-071.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-071.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-071.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-072.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-072.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-072.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-072.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-073.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-073.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-073.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-073.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-074.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-074.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-074.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-074.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-075.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-075.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-075.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-075.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-076.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-076.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-076.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-076.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-077.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-077.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-077.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-077.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-078.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-078.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-078.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-078.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-079.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-079.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-079.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-079.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-080.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-080.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-080.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-080.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-081.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-081.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-081.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-081.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-082.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-082.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-082.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-082.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-083.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-083.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-083.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-083.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-084.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-084.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-084.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-084.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-085.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-085.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-085.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-085.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-086.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-086.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-086.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-086.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-087.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-087.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-087.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-087.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-088.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-088.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-088.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-088.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-089.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-089.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-089.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-089.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-090.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-090.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-090.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-090.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-091.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-091.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-091.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-091.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-092.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-092.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-092.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-092.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-093.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-093.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-093.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-093.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-094.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-094.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-094.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-094.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-095.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-095.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-095.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-095.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-096.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-096.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-096.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-096.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-097.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-097.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-097.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-097.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-098.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-098.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-098.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-098.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-099.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-099.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-099.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-099.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-100.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-100.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-100.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-100.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-101.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-101.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-101.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-101.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-102.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-102.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-102.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-102.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-103.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-103.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-103.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-103.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-104.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-104.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-104.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-104.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-105.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-105.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-105.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-105.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-106.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-106.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-106.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-106.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-107.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-107.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-107.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-107.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-108.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-108.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-108.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-108.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-109.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-109.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-109.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-109.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-110.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-110.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-110.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-110.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-111.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-111.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-111.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-111.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-112.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-112.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-112.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-112.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-113.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-113.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-113.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-113.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-114.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-114.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-114.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-114.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-115.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-115.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-115.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-115.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-116.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-116.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-116.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-116.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-117.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-117.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-117.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-117.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-118.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-118.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-118.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-118.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-119.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-119.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-119.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-119.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-120.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-120.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-120.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-120.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-121.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-121.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-121.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-121.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-122.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-122.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-122.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-122.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-123.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-123.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-123.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-123.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-124.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-124.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-124.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-124.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-125.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-125.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-125.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-125.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-126.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-126.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-126.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-126.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-127.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-127.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-127.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-127.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-128.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-128.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-128.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-128.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-129.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-129.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-129.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-129.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-130.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-130.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-130.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-130.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-131.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-131.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-131.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-131.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-132.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-132.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-132.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-132.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-133.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-133.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-133.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-133.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-134.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-134.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-134.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-134.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-135.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-135.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-135.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-135.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-136.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-136.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-136.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-136.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-137.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-137.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-137.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-137.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-138.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-138.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-138.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-138.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-139.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-139.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-139.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-139.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-140.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-140.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-140.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-140.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-141.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-141.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-141.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-141.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-142.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-142.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-142.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-142.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-143.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-143.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-143.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-143.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-144.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-144.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-144.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-144.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-145.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-145.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-145.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-145.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-146.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-146.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-146.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-146.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-147.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-147.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-147.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-147.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-148.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-148.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-148.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-148.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-149.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-149.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-149.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-149.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-150.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-150.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-150.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-150.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-151.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-151.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-151.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-151.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-152.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-152.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-152.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-152.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-153.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-153.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-153.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-153.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-154.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-154.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-154.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-154.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-155.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-155.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-155.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-155.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-156.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-156.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-156.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-156.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-157.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-157.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-2-valid-157.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-2-valid-157.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-001.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-001.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-001.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-002.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-002.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-002.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-003.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-003.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-003.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-003.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-004.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-004.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-004.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-004.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-005.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-005.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-005.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-005.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-006.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-006.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-006.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-006.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-007.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-007.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-007.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-007.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-008.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-008.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-008.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-008.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-009.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-009.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-009.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-009.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-010.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-010.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-010.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-010.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-011.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-011.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-011.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-011.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-012.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-012.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-012.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-012.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-013.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-013.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-013.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-013.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-014.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-014.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-014.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-014.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-015.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-015.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-015.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-015.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-016.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-016.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-016.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-016.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-017.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-017.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-017.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-017.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-018.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-018.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-018.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-018.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-019.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-019.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-019.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-019.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-020.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-020.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-020.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-020.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-021.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-021.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-021.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-021.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-022.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-022.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-022.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-022.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-023.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-023.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-023.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-023.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-024.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-024.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-024.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-024.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-025.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-025.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-025.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-025.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-026.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-026.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-026.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-026.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-027.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-027.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-027.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-027.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-028.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-028.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-028.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-028.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-029.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-029.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-029.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-029.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-030.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-030.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-030.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-030.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-031.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-031.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-031.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-031.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-032.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-032.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-032.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-032.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-033.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-033.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-033.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-033.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-034.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-034.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-034.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-034.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-035.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-035.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-035.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-035.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-036.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-036.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-036.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-036.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-037.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-037.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-037.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-037.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-038.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-038.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-038.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-038.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-039.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-039.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-039.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-039.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-040.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-040.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-040.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-040.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-041.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-041.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-041.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-041.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-042.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-042.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-042.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-042.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-043.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-043.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-043.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-043.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-044.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-044.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-044.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-044.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-045.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-045.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-045.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-045.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-046.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-046.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-046.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-046.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-047.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-047.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-047.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-047.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-048.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-048.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-048.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-048.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-049.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-049.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-049.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-049.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-050.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-050.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-050.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-050.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-051.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-051.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-051.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-051.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-052.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-052.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-052.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-052.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-053.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-053.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-053.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-053.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-054.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-054.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-054.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-054.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-055.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-055.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-055.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-055.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-056.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-056.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-056.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-056.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-057.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-057.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-057.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-057.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-058.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-058.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-058.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-058.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-059.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-059.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-059.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-059.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-060.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-060.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-060.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-060.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-061.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-061.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-061.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-061.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-062.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-062.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-062.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-062.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-063.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-063.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-063.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-063.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-064.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-064.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-064.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-064.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-065.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-065.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-065.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-065.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-066.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-066.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-066.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-066.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-067.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-067.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-067.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-067.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-068.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-068.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-068.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-068.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-069.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-069.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-069.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-069.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-070.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-070.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-070.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-070.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-071.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-071.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-071.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-071.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-072.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-072.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-072.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-072.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-073.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-073.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-073.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-073.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-074.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-074.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-074.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-074.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-075.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-075.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-075.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-075.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-076.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-076.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-076.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-076.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-077.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-077.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-077.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-077.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-078.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-078.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-078.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-078.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-079.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-079.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-079.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-079.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-080.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-080.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-080.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-080.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-081.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-081.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-081.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-081.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-082.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-082.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-082.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-082.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-083.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-083.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-083.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-083.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-084.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-084.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-084.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-084.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-085.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-085.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-085.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-085.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-086.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-086.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-086.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-086.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-087.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-087.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-087.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-087.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-088.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-088.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-088.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-088.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-089.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-089.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-089.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-089.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-090.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-090.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-090.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-090.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-091.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-091.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-091.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-091.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-092.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-092.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-092.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-092.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-093.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-093.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-093.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-093.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-094.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-094.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-094.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-094.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-095.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-095.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-095.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-095.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-096.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-096.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-096.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-096.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-097.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-097.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-097.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-097.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-098.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-098.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-098.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-098.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-099.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-099.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-099.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-099.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-100.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-100.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-100.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-100.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-101.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-101.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-101.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-101.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-102.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-102.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-102.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-102.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-103.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-103.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-103.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-103.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-104.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-104.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-104.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-104.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-105.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-105.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-105.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-105.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-106.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-106.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-106.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-106.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-107.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-107.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-107.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-107.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-108.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-108.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-108.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-108.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-109.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-109.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-109.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-109.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-110.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-110.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-110.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-110.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-111.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-111.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-111.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-111.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-112.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-112.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-112.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-112.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-113.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-113.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-113.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-113.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-114.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-114.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-114.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-114.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-115.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-115.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-115.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-115.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-116.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-116.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-116.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-116.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-117.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-117.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-117.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-117.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-118.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-118.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-118.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-118.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-119.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-119.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-119.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-119.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-120.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-120.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-120.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-120.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-121.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-121.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-121.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-121.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-122.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-122.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-122.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-122.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-123.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-123.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-123.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-123.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-124.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-124.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-124.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-124.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-125.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-125.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-125.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-125.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-126.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-126.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-126.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-126.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-127.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-127.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-127.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-127.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-128.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-128.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-128.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-128.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-129.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-129.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-129.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-129.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-130.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-130.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-130.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-130.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-131.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-131.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-131.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-131.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-132.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-132.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-132.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-132.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-133.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-133.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-133.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-133.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-134.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-134.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-134.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-134.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-135.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-135.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-135.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-135.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-136.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-136.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-136.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-136.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-137.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-137.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-137.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-137.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-138.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-138.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-138.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-138.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-139.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-139.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-139.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-139.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-140.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-140.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-140.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-140.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-141.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-141.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-141.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-141.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-142.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-142.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-142.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-142.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-143.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-143.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-143.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-143.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-144.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-144.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-144.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-144.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-145.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-145.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-145.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-145.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-146.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-146.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-146.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-146.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-147.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-147.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-147.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-147.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-148.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-148.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-148.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-148.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-149.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-149.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-149.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-149.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-150.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-150.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-150.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-150.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-151.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-151.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-151.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-151.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-152.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-152.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-152.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-152.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-153.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-153.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-153.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-153.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-154.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-154.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-154.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-154.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-155.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-155.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-155.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-155.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-156.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-156.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-156.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-156.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-157.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-157.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-157.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-157.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-158.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-158.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-158.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-158.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-159.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-159.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-159.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-159.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-160.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-160.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-160.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-160.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-161.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-161.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-161.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-161.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-162.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-162.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-162.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-162.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-163.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-163.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-163.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-163.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-164.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-164.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-164.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-164.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-165.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-165.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-165.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-165.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-166.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-166.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-166.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-166.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-167.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-167.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-167.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-167.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-168.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-168.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-168.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-168.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-169.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-169.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-169.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-169.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-170.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-170.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-170.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-170.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-171.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-171.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-171.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-171.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-172.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-172.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-172.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-172.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-173.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-173.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-173.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-173.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-174.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-174.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-174.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-174.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-175.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-175.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-175.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-175.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-176.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-176.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-176.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-176.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-177.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-177.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-177.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-177.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-178.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-178.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-178.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-178.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-179.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-179.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-179.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-179.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-180.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-180.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-180.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-180.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-181.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-181.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-181.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-181.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-182.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-182.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-182.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-182.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-183.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-183.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-183.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-183.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-184.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-184.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-184.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-184.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-185.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-185.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-185.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-185.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-186.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-186.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-186.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-186.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-187.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-187.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-187.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-187.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-188.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-188.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-188.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-188.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-189.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-189.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-189.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-189.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-190.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-190.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-190.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-190.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-191.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-191.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-191.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-191.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-192.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-192.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-192.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-192.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-193.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-193.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-193.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-193.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-194.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-194.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-194.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-194.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-195.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-195.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-195.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-195.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-196.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-196.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-196.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-196.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-197.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-197.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-197.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-197.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-198.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-198.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-198.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-198.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-199.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-199.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-199.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-199.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-200.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-200.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-200.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-200.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-201.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-201.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-201.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-201.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-202.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-202.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-202.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-202.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-203.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-203.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-203.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-203.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-204.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-204.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-204.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-204.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-205.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-205.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-205.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-205.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-206.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-206.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-206.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-206.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-207.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-207.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-207.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-207.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-208.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-208.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-208.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-208.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-209.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-209.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-209.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-209.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-210.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-210.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-210.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-210.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-211.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-211.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-211.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-211.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-212.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-212.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-212.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-212.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-213.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-213.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-213.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-213.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-214.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-214.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-214.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-214.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-215.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-215.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-215.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-215.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-216.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-216.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-216.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-216.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-217.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-217.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-217.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-217.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-218.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-218.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-218.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-218.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-219.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-219.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-219.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-219.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-220.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-220.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-220.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-220.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-221.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-221.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-221.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-221.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-222.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-222.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-222.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-222.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-223.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-223.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-223.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-223.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-224.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-224.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-224.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-224.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-225.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-225.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-225.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-225.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-226.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-226.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-226.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-226.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-227.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-227.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-227.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-227.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-228.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-228.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-228.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-228.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-229.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-229.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-229.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-229.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-230.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-230.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-230.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-230.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-231.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-231.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-231.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-231.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-232.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-232.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-232.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-232.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-233.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-233.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-233.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-233.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-234.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-234.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-234.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-234.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-235.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-235.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-235.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-235.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-236.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-236.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-236.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-236.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-237.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-237.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-237.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-237.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-238.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-238.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-238.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-238.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-239.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-239.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-239.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-239.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-240.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-240.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-240.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-240.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-241.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-241.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-241.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-241.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-242.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-242.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-242.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-242.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-243.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-243.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-243.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-243.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-244.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-244.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-244.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-244.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-245.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-245.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-245.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-245.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-246.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-246.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-246.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-246.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-247.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-247.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-247.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-247.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-248.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-248.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-248.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-248.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-249.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-249.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-249.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-249.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-250.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-250.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-250.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-250.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-251.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-251.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-251.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-251.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-252.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-252.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-252.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-252.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-253.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-253.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-253.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-253.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-254.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-254.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-254.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-254.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-255.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-255.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-255.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-255.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-256.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-256.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-256.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-256.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-257.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-257.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-257.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-257.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-258.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-258.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-258.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-258.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-259.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-259.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-259.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-259.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-260.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-260.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-260.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-260.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-261.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-261.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-261.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-261.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-262.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-262.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-262.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-262.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-263.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-263.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-263.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-263.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-264.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-264.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-264.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-264.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-265.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-265.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-265.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-265.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-266.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-266.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-266.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-266.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-267.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-267.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-267.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-267.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-268.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-268.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-268.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-268.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-269.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-269.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-269.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-269.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-270.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-270.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-270.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-270.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-271.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-271.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-271.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-271.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-272.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-272.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-272.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-272.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-273.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-273.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-273.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-273.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-274.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-274.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-274.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-274.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-275.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-275.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-275.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-275.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-276.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-276.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-276.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-276.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-277.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-277.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-277.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-277.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-278.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-278.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-278.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-278.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-279.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-279.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-279.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-279.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-280.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-280.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-280.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-280.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-281.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-281.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-281.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-281.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-282.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-282.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-282.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-282.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-283.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-283.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-283.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-283.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-284.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-284.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-284.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-284.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-285.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-285.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-285.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-285.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-286.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-286.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-286.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-286.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-287.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-287.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-287.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-287.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-288.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-288.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-288.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-288.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-289.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-289.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-289.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-289.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-290.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-290.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-290.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-290.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-291.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-291.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-291.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-291.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-292.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-292.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-292.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-292.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-293.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-293.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-293.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-293.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-294.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-294.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-294.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-294.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-295.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-295.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-295.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-295.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-296.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-296.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-296.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-296.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-297.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-297.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-297.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-297.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-298.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-298.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-298.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-298.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-299.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-299.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-299.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-299.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-300.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-300.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-300.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-300.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-301.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-301.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-301.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-301.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-302.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-302.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-302.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-302.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-303.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-303.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-303.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-303.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-304.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-304.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-304.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-304.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-305.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-305.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-305.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-305.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-306.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-306.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-306.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-306.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-307.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-307.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-307.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-307.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-308.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-308.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-3-valid-308.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-3-valid-308.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-4-parseError-001.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-4-parseError-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-4-parseError-001.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-4-parseError-001.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-4-parseError-002.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-4-parseError-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-4-parseError-002.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-4-parseError-002.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-4-parseError-003.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-4-parseError-003.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-4-parseError-003.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-4-parseError-003.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-4-parseError-004.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-4-parseError-004.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-4-parseError-004.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-4-parseError-004.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-4-parseError-005.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-4-parseError-005.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-4-parseError-005.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-4-parseError-005.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-4-parseError-006.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-4-parseError-006.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-4-parseError-006.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-4-parseError-006.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-4-parseError-007.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-4-parseError-007.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-4-parseError-007.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-4-parseError-007.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-4-parseError-008.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-4-parseError-008.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-4-parseError-008.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-4-parseError-008.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-4-parseError-009.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-4-parseError-009.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-4-parseError-009.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-4-parseError-009.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-4-parseError-010.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-4-parseError-010.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-4-parseError-010.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-4-parseError-010.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-4-parseError-011.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-4-parseError-011.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-4-parseError-011.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-4-parseError-011.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-4-parseError-012.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-4-parseError-012.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-4-parseError-012.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-4-parseError-012.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-4-parseError-013.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-4-parseError-013.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-4-parseError-013.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-4-parseError-013.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-4-parseError-014.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-4-parseError-014.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-4-parseError-014.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-4-parseError-014.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-4-parseError-015.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-4-parseError-015.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-4-parseError-015.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-4-parseError-015.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-4-parseError-016.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-4-parseError-016.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-4-parseError-016.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-4-parseError-016.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-4-parseError-017.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-4-parseError-017.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-4-parseError-017.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-4-parseError-017.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-4-parseError-018.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-4-parseError-018.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-4-parseError-018.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-4-parseError-018.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-4-parseError-019.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-4-parseError-019.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-4-parseError-019.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-4-parseError-019.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-4-parseError-020.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-4-parseError-020.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-4-parseError-020.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-4-parseError-020.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-4-valid-001.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-4-valid-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-4-valid-001.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-4-valid-001.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-4-valid-002.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-4-valid-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-4-valid-002.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-4-valid-002.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-4-valid-003.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-4-valid-003.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-4-valid-003.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-4-valid-003.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-4-valid-004.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-4-valid-004.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-4-valid-004.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-4-valid-004.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-4-valid-005.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-4-valid-005.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-4-valid-005.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-4-valid-005.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-4-valid-006.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-4-valid-006.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-4-valid-006.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-4-valid-006.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-4-valid-007.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-4-valid-007.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-4-valid-007.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-4-valid-007.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-4-valid-008.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-4-valid-008.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-4-valid-008.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-4-valid-008.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-4-valid-009.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-4-valid-009.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-4-valid-009.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-4-valid-009.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-4-valid-010.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-4-valid-010.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-4-valid-010.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-4-valid-010.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-4-valid-011.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-4-valid-011.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-4-valid-011.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-4-valid-011.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-4-valid-012.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-4-valid-012.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-4-valid-012.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-4-valid-012.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-4-valid-013.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-4-valid-013.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-4-valid-013.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-4-valid-013.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-001.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-001.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-001.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-002.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-002.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-002.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-003.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-003.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-003.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-003.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-004.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-004.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-004.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-004.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-005.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-005.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-005.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-005.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-006.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-006.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-006.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-006.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-007.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-007.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-007.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-007.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-008.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-008.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-008.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-008.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-009.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-009.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-009.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-009.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-010.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-010.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-010.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-010.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-011.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-011.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-011.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-011.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-012.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-012.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-012.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-012.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-013.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-013.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-013.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-013.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-014.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-014.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-014.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-014.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-015.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-015.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-015.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-015.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-016.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-016.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-016.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-016.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-017.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-017.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-017.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-017.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-018.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-018.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-018.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-018.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-019.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-019.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-019.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-019.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-020.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-020.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-020.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-020.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-021.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-021.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-021.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-021.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-022.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-022.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-022.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-022.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-023.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-023.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-023.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-023.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-024.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-024.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-024.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-024.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-025.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-025.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-025.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-025.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-026.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-026.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-026.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-026.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-027.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-027.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-027.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-027.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-028.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-028.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-028.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-028.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-029.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-029.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-029.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-029.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-030.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-030.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-030.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-030.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-031.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-031.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-031.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-031.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-032.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-032.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-032.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-032.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-033.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-033.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-033.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-033.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-034.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-034.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-034.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-034.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-035.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-035.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-035.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-035.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-036.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-036.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-036.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-036.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-037.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-037.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-037.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-037.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-038.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-038.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-038.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-038.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-039.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-039.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-039.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-039.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-040.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-040.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-040.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-040.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-041.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-041.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-041.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-041.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-042.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-042.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-042.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-042.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-043.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-043.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-043.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-043.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-044.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-044.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-044.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-044.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-045.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-045.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-045.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-045.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-046.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-046.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-046.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-046.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-047.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-047.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-047.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-047.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-048.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-048.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-048.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-048.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-049.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-049.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-049.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-049.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-050.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-050.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-050.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-050.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-051.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-051.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-051.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-051.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-052.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-052.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-052.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-052.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-053.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-053.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-053.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-053.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-054.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-054.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-054.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-054.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-055.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-055.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-055.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-055.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-056.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-056.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-056.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-056.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-057.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-057.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-057.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-057.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-058.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-058.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-058.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-058.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-059.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-059.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-059.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-059.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-060.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-060.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-060.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-060.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-061.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-061.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-061.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-061.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-062.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-062.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-062.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-062.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-063.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-063.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-063.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-063.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-064.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-064.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-064.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-064.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-065.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-065.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-065.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-065.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-066.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-066.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-066.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-066.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-067.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-067.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-5-valid-067.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-5-valid-067.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-6-parseError-001.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-6-parseError-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-6-parseError-001.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-6-parseError-001.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-6-parseError-002.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-6-parseError-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-6-parseError-002.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-6-parseError-002.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-6-parseError-003.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-6-parseError-003.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-6-parseError-003.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-6-parseError-003.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-6-parseError-004.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-6-parseError-004.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-6-parseError-004.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-6-parseError-004.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-6-parseError-005.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-6-parseError-005.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-6-parseError-005.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-6-parseError-005.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-6-parseError-006.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-6-parseError-006.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-6-parseError-006.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-6-parseError-006.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-6-parseError-007.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-6-parseError-007.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-6-parseError-007.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-6-parseError-007.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-6-parseError-008.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-6-parseError-008.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-6-parseError-008.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-6-parseError-008.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-6-parseError-009.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-6-parseError-009.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-6-parseError-009.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-6-parseError-009.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-6-parseError-010.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-6-parseError-010.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-6-parseError-010.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-6-parseError-010.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-6-parseError-011.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-6-parseError-011.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-6-parseError-011.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-6-parseError-011.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-6-parseError-012.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-6-parseError-012.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-6-parseError-012.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-6-parseError-012.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-6-parseError-013.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-6-parseError-013.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-6-parseError-013.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-6-parseError-013.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-6-parseError-014.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-6-parseError-014.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-6-parseError-014.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-6-parseError-014.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-6-parseError-015.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-6-parseError-015.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-6-parseError-015.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-6-parseError-015.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-6-parseError-016.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-6-parseError-016.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-6-parseError-016.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-6-parseError-016.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-6-parseError-017.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-6-parseError-017.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-6-parseError-017.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-6-parseError-017.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-6-parseError-018.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-6-parseError-018.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-6-parseError-018.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-6-parseError-018.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-6-parseError-019.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-6-parseError-019.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-6-parseError-019.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-6-parseError-019.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-6-parseError-020.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-6-parseError-020.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-6-parseError-020.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-6-parseError-020.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-6-parseError-021.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-6-parseError-021.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-6-parseError-021.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-6-parseError-021.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-6-parseError-022.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-6-parseError-022.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-6-parseError-022.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-6-parseError-022.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-6-parseError-023.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-6-parseError-023.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-6-parseError-023.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-6-parseError-023.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-6-parseError-024.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-6-parseError-024.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-6-parseError-024.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-6-parseError-024.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-6-parseError-025.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-6-parseError-025.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-6-parseError-025.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-6-parseError-025.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-6-parseError-026.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-6-parseError-026.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-6-parseError-026.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-6-parseError-026.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-6-parseError-027.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-6-parseError-027.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-6-parseError-027.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-6-parseError-027.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-6-parseError-028.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-6-parseError-028.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-6-parseError-028.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-6-parseError-028.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-6-parseError-029.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-6-parseError-029.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-6-parseError-029.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-6-parseError-029.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-6-parseError-030.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-6-parseError-030.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-6-parseError-030.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-6-parseError-030.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-6-parseError-031.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-6-parseError-031.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-6-parseError-031.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-6-parseError-031.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-001.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-001.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-001.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-002.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-002.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-002.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-003.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-003.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-003.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-003.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-004.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-004.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-004.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-004.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-005.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-005.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-005.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-005.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-006.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-006.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-006.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-006.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-007.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-007.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-007.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-007.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-008.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-008.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-008.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-008.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-009.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-009.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-009.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-009.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-010.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-010.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-010.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-010.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-011.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-011.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-011.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-011.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-012.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-012.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-012.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-012.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-013.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-013.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-013.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-013.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-014.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-014.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-014.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-014.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-015.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-015.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-015.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-015.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-016.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-016.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-016.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-016.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-017.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-017.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-017.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-017.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-018.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-018.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-018.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-018.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-019.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-019.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-019.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-019.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-020.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-020.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-020.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-020.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-021.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-021.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-021.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-021.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-022.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-022.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-022.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-022.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-023.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-023.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-023.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-023.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-024.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-024.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-024.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-024.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-025.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-025.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-025.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-025.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-026.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-026.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-026.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-026.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-027.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-027.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-027.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-027.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-028.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-028.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-028.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-028.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-029.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-029.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-029.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-029.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-030.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-030.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-030.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-030.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-031.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-031.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-031.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-031.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-032.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-032.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-032.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-032.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-033.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-033.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-033.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-033.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-034.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-034.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-034.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-034.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-035.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-035.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-035.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-035.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-036.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-036.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-036.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-036.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-037.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-037.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-037.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-037.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-038.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-038.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-038.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-038.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-039.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-039.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-039.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-039.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-040.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-040.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-040.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-040.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-041.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-041.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-041.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-041.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-042.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-042.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-042.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-042.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-043.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-043.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-043.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-043.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-044.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-044.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-044.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-044.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-045.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-045.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-045.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-045.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-046.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-046.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-046.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-046.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-047.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-047.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-047.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-047.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-048.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-048.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-048.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-048.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-049.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-049.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-049.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-049.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-050.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-050.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-050.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-050.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-051.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-051.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-051.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-051.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-052.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-052.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-052.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-052.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-053.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-053.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-053.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-053.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-054.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-054.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-054.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-054.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-055.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-055.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-055.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-055.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-056.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-056.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-056.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-056.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-057.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-057.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-057.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-057.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-058.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-058.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-058.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-058.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-059.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-059.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-059.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-059.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-060.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-060.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-060.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-060.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-061.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-061.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-061.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-061.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-062.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-062.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-062.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-062.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-063.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-063.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-063.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-063.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-064.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-064.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-064.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-064.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-065.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-065.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-065.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-065.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-066.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-066.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-066.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-066.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-067.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-067.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-067.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-067.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-068.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-068.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-068.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-068.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-069.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-069.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-069.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-069.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-070.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-070.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-070.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-070.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-071.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-071.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-071.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-071.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-072.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-072.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-072.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-072.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-073.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-073.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-073.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-073.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-074.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-074.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-074.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-074.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-075.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-075.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-075.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-075.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-076.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-076.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-076.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-076.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-077.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-077.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-077.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-077.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-078.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-078.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-078.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-078.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-079.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-079.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-079.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-079.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-080.phpt b/mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-080.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/decimal128-7-parseError-080.phpt
rename to mongodb-1.4.2/tests/bson-corpus/decimal128-7-parseError-080.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/document-decodeError-001.phpt b/mongodb-1.4.2/tests/bson-corpus/document-decodeError-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/document-decodeError-001.phpt
rename to mongodb-1.4.2/tests/bson-corpus/document-decodeError-001.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/document-decodeError-002.phpt b/mongodb-1.4.2/tests/bson-corpus/document-decodeError-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/document-decodeError-002.phpt
rename to mongodb-1.4.2/tests/bson-corpus/document-decodeError-002.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/document-decodeError-003.phpt b/mongodb-1.4.2/tests/bson-corpus/document-decodeError-003.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/document-decodeError-003.phpt
rename to mongodb-1.4.2/tests/bson-corpus/document-decodeError-003.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/document-valid-001.phpt b/mongodb-1.4.2/tests/bson-corpus/document-valid-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/document-valid-001.phpt
rename to mongodb-1.4.2/tests/bson-corpus/document-valid-001.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/document-valid-002.phpt b/mongodb-1.4.2/tests/bson-corpus/document-valid-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/document-valid-002.phpt
rename to mongodb-1.4.2/tests/bson-corpus/document-valid-002.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/document-valid-003.phpt b/mongodb-1.4.2/tests/bson-corpus/document-valid-003.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/document-valid-003.phpt
rename to mongodb-1.4.2/tests/bson-corpus/document-valid-003.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/double-decodeError-001.phpt b/mongodb-1.4.2/tests/bson-corpus/double-decodeError-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/double-decodeError-001.phpt
rename to mongodb-1.4.2/tests/bson-corpus/double-decodeError-001.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/double-valid-001.phpt b/mongodb-1.4.2/tests/bson-corpus/double-valid-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/double-valid-001.phpt
rename to mongodb-1.4.2/tests/bson-corpus/double-valid-001.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/double-valid-002.phpt b/mongodb-1.4.2/tests/bson-corpus/double-valid-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/double-valid-002.phpt
rename to mongodb-1.4.2/tests/bson-corpus/double-valid-002.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/double-valid-003.phpt b/mongodb-1.4.2/tests/bson-corpus/double-valid-003.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/double-valid-003.phpt
rename to mongodb-1.4.2/tests/bson-corpus/double-valid-003.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/double-valid-004.phpt b/mongodb-1.4.2/tests/bson-corpus/double-valid-004.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/double-valid-004.phpt
rename to mongodb-1.4.2/tests/bson-corpus/double-valid-004.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/double-valid-005.phpt b/mongodb-1.4.2/tests/bson-corpus/double-valid-005.phpt
similarity index 95%
rename from mongodb-1.3.4/tests/bson-corpus/double-valid-005.phpt
rename to mongodb-1.4.2/tests/bson-corpus/double-valid-005.phpt
index 11acef72..904698a4 100644
--- a/mongodb-1.3.4/tests/bson-corpus/double-valid-005.phpt
+++ b/mongodb-1.4.2/tests/bson-corpus/double-valid-005.phpt
@@ -1,42 +1,42 @@
--TEST--
Double type: 1.23456789012345677E+18
--XFAIL--
Variation in double's string representation (SPEC-850)
--DESCRIPTION--
Generated by scripts/convert-bson-corpus-tests.php
DO NOT EDIT THIS FILE
--FILE--
<?php
require_once __DIR__ . '/../utils/tools.php';
$canonicalBson = hex2bin('1000000001640081E97DF41022B14300');
$canonicalExtJson = '{"d" : {"$numberDouble": "1.23456789012345677E+18"}}';
$relaxedExtJson = '{"d" : 1.23456789012345677E+18}';
// Canonical BSON -> Native -> Canonical BSON
echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n";
// Canonical BSON -> Canonical extJSON
echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n";
// Canonical BSON -> Relaxed extJSON
echo json_canonicalize(toRelaxedExtendedJSON($canonicalBson)), "\n";
// Canonical extJSON -> Canonical BSON
echo bin2hex(fromJSON($canonicalExtJson)), "\n";
// Relaxed extJSON -> BSON -> Relaxed extJSON
echo json_canonicalize(toRelaxedExtendedJSON(fromJSON($relaxedExtJson))), "\n";
?>
===DONE===
<?php exit(0); ?>
--EXPECT--
1000000001640081e97df41022b14300
{"d":{"$numberDouble":"1.23456789012345677E+18"}}
-{"d":1.2345678901235e+18}
+{"d":1.2345678901234568e+18}
1000000001640081e97df41022b14300
-{"d":1.2345678901235e+18}
+{"d":1.2345678901234568e+18}
===DONE===
\ No newline at end of file
diff --git a/mongodb-1.3.4/tests/bson-corpus/double-valid-006.phpt b/mongodb-1.4.2/tests/bson-corpus/double-valid-006.phpt
similarity index 95%
rename from mongodb-1.3.4/tests/bson-corpus/double-valid-006.phpt
rename to mongodb-1.4.2/tests/bson-corpus/double-valid-006.phpt
index 7829874f..cf99055a 100644
--- a/mongodb-1.3.4/tests/bson-corpus/double-valid-006.phpt
+++ b/mongodb-1.4.2/tests/bson-corpus/double-valid-006.phpt
@@ -1,42 +1,42 @@
--TEST--
Double type: -1.23456789012345677E+18
--XFAIL--
Variation in double's string representation (SPEC-850)
--DESCRIPTION--
Generated by scripts/convert-bson-corpus-tests.php
DO NOT EDIT THIS FILE
--FILE--
<?php
require_once __DIR__ . '/../utils/tools.php';
$canonicalBson = hex2bin('1000000001640081E97DF41022B1C300');
$canonicalExtJson = '{"d" : {"$numberDouble": "-1.23456789012345677E+18"}}';
$relaxedExtJson = '{"d" : -1.23456789012345677E+18}';
// Canonical BSON -> Native -> Canonical BSON
echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n";
// Canonical BSON -> Canonical extJSON
echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n";
// Canonical BSON -> Relaxed extJSON
echo json_canonicalize(toRelaxedExtendedJSON($canonicalBson)), "\n";
// Canonical extJSON -> Canonical BSON
echo bin2hex(fromJSON($canonicalExtJson)), "\n";
// Relaxed extJSON -> BSON -> Relaxed extJSON
echo json_canonicalize(toRelaxedExtendedJSON(fromJSON($relaxedExtJson))), "\n";
?>
===DONE===
<?php exit(0); ?>
--EXPECT--
1000000001640081e97df41022b1c300
{"d":{"$numberDouble":"-1.23456789012345677E+18"}}
-{"d":-1.2345678901235e+18}
+{"d":-1.2345678901234568e+18}
1000000001640081e97df41022b1c300
-{"d":-1.2345678901235e+18}
+{"d":-1.2345678901234568e+18}
===DONE===
\ No newline at end of file
diff --git a/mongodb-1.3.4/tests/bson-corpus/double-valid-007.phpt b/mongodb-1.4.2/tests/bson-corpus/double-valid-007.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/double-valid-007.phpt
rename to mongodb-1.4.2/tests/bson-corpus/double-valid-007.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/double-valid-008.phpt b/mongodb-1.4.2/tests/bson-corpus/double-valid-008.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/double-valid-008.phpt
rename to mongodb-1.4.2/tests/bson-corpus/double-valid-008.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/double-valid-009.phpt b/mongodb-1.4.2/tests/bson-corpus/double-valid-009.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/double-valid-009.phpt
rename to mongodb-1.4.2/tests/bson-corpus/double-valid-009.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/double-valid-010.phpt b/mongodb-1.4.2/tests/bson-corpus/double-valid-010.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/double-valid-010.phpt
rename to mongodb-1.4.2/tests/bson-corpus/double-valid-010.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/double-valid-011.phpt b/mongodb-1.4.2/tests/bson-corpus/double-valid-011.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/double-valid-011.phpt
rename to mongodb-1.4.2/tests/bson-corpus/double-valid-011.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/double-valid-012.phpt b/mongodb-1.4.2/tests/bson-corpus/double-valid-012.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/double-valid-012.phpt
rename to mongodb-1.4.2/tests/bson-corpus/double-valid-012.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/int32-decodeError-001.phpt b/mongodb-1.4.2/tests/bson-corpus/int32-decodeError-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/int32-decodeError-001.phpt
rename to mongodb-1.4.2/tests/bson-corpus/int32-decodeError-001.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/int32-valid-001.phpt b/mongodb-1.4.2/tests/bson-corpus/int32-valid-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/int32-valid-001.phpt
rename to mongodb-1.4.2/tests/bson-corpus/int32-valid-001.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/int32-valid-002.phpt b/mongodb-1.4.2/tests/bson-corpus/int32-valid-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/int32-valid-002.phpt
rename to mongodb-1.4.2/tests/bson-corpus/int32-valid-002.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/int32-valid-003.phpt b/mongodb-1.4.2/tests/bson-corpus/int32-valid-003.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/int32-valid-003.phpt
rename to mongodb-1.4.2/tests/bson-corpus/int32-valid-003.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/int32-valid-004.phpt b/mongodb-1.4.2/tests/bson-corpus/int32-valid-004.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/int32-valid-004.phpt
rename to mongodb-1.4.2/tests/bson-corpus/int32-valid-004.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/int32-valid-005.phpt b/mongodb-1.4.2/tests/bson-corpus/int32-valid-005.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/int32-valid-005.phpt
rename to mongodb-1.4.2/tests/bson-corpus/int32-valid-005.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/int64-decodeError-001.phpt b/mongodb-1.4.2/tests/bson-corpus/int64-decodeError-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/int64-decodeError-001.phpt
rename to mongodb-1.4.2/tests/bson-corpus/int64-decodeError-001.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/int64-valid-001.phpt b/mongodb-1.4.2/tests/bson-corpus/int64-valid-001.phpt
similarity index 91%
rename from mongodb-1.3.4/tests/bson-corpus/int64-valid-001.phpt
rename to mongodb-1.4.2/tests/bson-corpus/int64-valid-001.phpt
index cae95fa9..4f63a6d7 100644
--- a/mongodb-1.3.4/tests/bson-corpus/int64-valid-001.phpt
+++ b/mongodb-1.4.2/tests/bson-corpus/int64-valid-001.phpt
@@ -1,40 +1,42 @@
--TEST--
Int64 type: MinValue
+--SKIPIF--
+<?php if (PHP_INT_SIZE !== 8) { die("skip Can't represent 64-bit ints on a 32-bit platform"); } ?>
--DESCRIPTION--
Generated by scripts/convert-bson-corpus-tests.php
DO NOT EDIT THIS FILE
--FILE--
<?php
require_once __DIR__ . '/../utils/tools.php';
$canonicalBson = hex2bin('10000000126100000000000000008000');
$canonicalExtJson = '{"a" : {"$numberLong" : "-9223372036854775808"}}';
$relaxedExtJson = '{"a" : -9223372036854775808}';
// Canonical BSON -> Native -> Canonical BSON
echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n";
// Canonical BSON -> Canonical extJSON
echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n";
// Canonical BSON -> Relaxed extJSON
echo json_canonicalize(toRelaxedExtendedJSON($canonicalBson)), "\n";
// Canonical extJSON -> Canonical BSON
echo bin2hex(fromJSON($canonicalExtJson)), "\n";
// Relaxed extJSON -> BSON -> Relaxed extJSON
echo json_canonicalize(toRelaxedExtendedJSON(fromJSON($relaxedExtJson))), "\n";
?>
===DONE===
<?php exit(0); ?>
--EXPECT--
10000000126100000000000000008000
{"a":{"$numberLong":"-9223372036854775808"}}
{"a":-9223372036854775808}
10000000126100000000000000008000
{"a":-9223372036854775808}
===DONE===
\ No newline at end of file
diff --git a/mongodb-1.3.4/tests/bson-corpus/int64-valid-002.phpt b/mongodb-1.4.2/tests/bson-corpus/int64-valid-002.phpt
similarity index 91%
rename from mongodb-1.3.4/tests/bson-corpus/int64-valid-002.phpt
rename to mongodb-1.4.2/tests/bson-corpus/int64-valid-002.phpt
index 7d4468a3..394af40b 100644
--- a/mongodb-1.3.4/tests/bson-corpus/int64-valid-002.phpt
+++ b/mongodb-1.4.2/tests/bson-corpus/int64-valid-002.phpt
@@ -1,40 +1,42 @@
--TEST--
Int64 type: MaxValue
+--SKIPIF--
+<?php if (PHP_INT_SIZE !== 8) { die("skip Can't represent 64-bit ints on a 32-bit platform"); } ?>
--DESCRIPTION--
Generated by scripts/convert-bson-corpus-tests.php
DO NOT EDIT THIS FILE
--FILE--
<?php
require_once __DIR__ . '/../utils/tools.php';
$canonicalBson = hex2bin('10000000126100FFFFFFFFFFFFFF7F00');
$canonicalExtJson = '{"a" : {"$numberLong" : "9223372036854775807"}}';
$relaxedExtJson = '{"a" : 9223372036854775807}';
// Canonical BSON -> Native -> Canonical BSON
echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n";
// Canonical BSON -> Canonical extJSON
echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n";
// Canonical BSON -> Relaxed extJSON
echo json_canonicalize(toRelaxedExtendedJSON($canonicalBson)), "\n";
// Canonical extJSON -> Canonical BSON
echo bin2hex(fromJSON($canonicalExtJson)), "\n";
// Relaxed extJSON -> BSON -> Relaxed extJSON
echo json_canonicalize(toRelaxedExtendedJSON(fromJSON($relaxedExtJson))), "\n";
?>
===DONE===
<?php exit(0); ?>
--EXPECT--
10000000126100ffffffffffffff7f00
{"a":{"$numberLong":"9223372036854775807"}}
{"a":9223372036854775807}
10000000126100ffffffffffffff7f00
{"a":9223372036854775807}
===DONE===
\ No newline at end of file
diff --git a/mongodb-1.3.4/tests/bson-corpus/int64-valid-003.phpt b/mongodb-1.4.2/tests/bson-corpus/int64-valid-003.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/int64-valid-003.phpt
rename to mongodb-1.4.2/tests/bson-corpus/int64-valid-003.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/int64-valid-004.phpt b/mongodb-1.4.2/tests/bson-corpus/int64-valid-004.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/int64-valid-004.phpt
rename to mongodb-1.4.2/tests/bson-corpus/int64-valid-004.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/int64-valid-005.phpt b/mongodb-1.4.2/tests/bson-corpus/int64-valid-005.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/int64-valid-005.phpt
rename to mongodb-1.4.2/tests/bson-corpus/int64-valid-005.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/maxkey-valid-001.phpt b/mongodb-1.4.2/tests/bson-corpus/maxkey-valid-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/maxkey-valid-001.phpt
rename to mongodb-1.4.2/tests/bson-corpus/maxkey-valid-001.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/minkey-valid-001.phpt b/mongodb-1.4.2/tests/bson-corpus/minkey-valid-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/minkey-valid-001.phpt
rename to mongodb-1.4.2/tests/bson-corpus/minkey-valid-001.phpt
diff --git a/mongodb-1.4.2/tests/bson-corpus/multi-type-deprecated-valid-001.phpt b/mongodb-1.4.2/tests/bson-corpus/multi-type-deprecated-valid-001.phpt
new file mode 100644
index 00000000..80c8d56b
--- /dev/null
+++ b/mongodb-1.4.2/tests/bson-corpus/multi-type-deprecated-valid-001.phpt
@@ -0,0 +1,35 @@
+--TEST--
+Multiple types within the same document: All BSON types
+--XFAIL--
+PHP encodes integers as 32-bit if range allows
+--DESCRIPTION--
+Generated by scripts/convert-bson-corpus-tests.php
+
+DO NOT EDIT THIS FILE
+--FILE--
+<?php
+
+require_once __DIR__ . '/../utils/tools.php';
+
+$canonicalBson = hex2bin('38020000075F69640057E193D7A9CC81B4027498B50E53796D626F6C000700000073796D626F6C0002537472696E670007000000737472696E670010496E743332002A00000012496E743634002A0000000000000001446F75626C6500000000000000F0BF0542696E617279001000000003A34C38F7C3ABEDC8A37814A992AB8DB60542696E61727955736572446566696E656400050000008001020304050D436F6465000E00000066756E6374696F6E2829207B7D000F436F64655769746853636F7065001B0000000E00000066756E6374696F6E2829207B7D00050000000003537562646F63756D656E74001200000002666F6F0004000000626172000004417272617900280000001030000100000010310002000000103200030000001033000400000010340005000000001154696D657374616D7000010000002A0000000B5265676578007061747465726E0000094461746574696D6545706F6368000000000000000000094461746574696D65506F73697469766500FFFFFF7F00000000094461746574696D654E656761746976650000000080FFFFFFFF085472756500010846616C736500000C4442506F696E746572000B000000636F6C6C656374696F6E0057E193D7A9CC81B4027498B1034442526566003D0000000224726566000B000000636F6C6C656374696F6E00072469640057FD71E96E32AB4225B723FB02246462000900000064617461626173650000FF4D696E6B6579007F4D61786B6579000A4E756C6C0006556E646566696E65640000');
+$convertedBson = hex2bin('48020000075f69640057e193d7a9cc81b4027498b50253796d626f6c000700000073796d626f6c0002537472696e670007000000737472696e670010496e743332002a00000012496e743634002a0000000000000001446f75626c6500000000000000f0bf0542696e617279001000000003a34c38f7c3abedc8a37814a992ab8db60542696e61727955736572446566696e656400050000008001020304050d436f6465000e00000066756e6374696f6e2829207b7d000f436f64655769746853636f7065001b0000000e00000066756e6374696f6e2829207b7d00050000000003537562646f63756d656e74001200000002666f6f0004000000626172000004417272617900280000001030000100000010310002000000103200030000001033000400000010340005000000001154696d657374616d7000010000002a0000000b5265676578007061747465726e0000094461746574696d6545706f6368000000000000000000094461746574696d65506f73697469766500ffffff7f00000000094461746574696d654e656761746976650000000080ffffffff085472756500010846616c73650000034442506f696e746572002b0000000224726566000b000000636f6c6c656374696f6e00072469640057e193d7a9cc81b4027498b100034442526566003d0000000224726566000b000000636f6c6c656374696f6e00072469640057fd71e96e32ab4225b723fb02246462000900000064617461626173650000ff4d696e6b6579007f4d61786b6579000a4e756c6c000a556e646566696e65640000');
+$canonicalExtJson = '{"_id": {"$oid": "57e193d7a9cc81b4027498b5"}, "Symbol": {"$symbol": "symbol"}, "String": "string", "Int32": {"$numberInt": "42"}, "Int64": {"$numberLong": "42"}, "Double": {"$numberDouble": "-1.0"}, "Binary": { "$binary" : {"base64": "o0w498Or7cijeBSpkquNtg==", "subType": "03"}}, "BinaryUserDefined": { "$binary" : {"base64": "AQIDBAU=", "subType": "80"}}, "Code": {"$code": "function() {}"}, "CodeWithScope": {"$code": "function() {}", "$scope": {}}, "Subdocument": {"foo": "bar"}, "Array": [{"$numberInt": "1"}, {"$numberInt": "2"}, {"$numberInt": "3"}, {"$numberInt": "4"}, {"$numberInt": "5"}], "Timestamp": {"$timestamp": {"t": 42, "i": 1}}, "Regex": {"$regularExpression": {"pattern": "pattern", "options": ""}}, "DatetimeEpoch": {"$date": {"$numberLong": "0"}}, "DatetimePositive": {"$date": {"$numberLong": "2147483647"}}, "DatetimeNegative": {"$date": {"$numberLong": "-2147483648"}}, "True": true, "False": false, "DBPointer": {"$dbPointer": {"$ref": "collection", "$id": {"$oid": "57e193d7a9cc81b4027498b1"}}}, "DBRef": {"$ref": "collection", "$id": {"$oid": "57fd71e96e32ab4225b723fb"}, "$db": "database"}, "Minkey": {"$minKey": 1}, "Maxkey": {"$maxKey": 1}, "Null": null, "Undefined": {"$undefined": true}}';
+$convertedExtJson = '{"_id": {"$oid": "57e193d7a9cc81b4027498b5"}, "Symbol": "symbol", "String": "string", "Int32": {"$numberInt": "42"}, "Int64": {"$numberLong": "42"}, "Double": {"$numberDouble": "-1.0"}, "Binary": { "$binary" : {"base64": "o0w498Or7cijeBSpkquNtg==", "subType": "03"}}, "BinaryUserDefined": { "$binary" : {"base64": "AQIDBAU=", "subType": "80"}}, "Code": {"$code": "function() {}"}, "CodeWithScope": {"$code": "function() {}", "$scope": {}}, "Subdocument": {"foo": "bar"}, "Array": [{"$numberInt": "1"}, {"$numberInt": "2"}, {"$numberInt": "3"}, {"$numberInt": "4"}, {"$numberInt": "5"}], "Timestamp": {"$timestamp": {"t": 42, "i": 1}}, "Regex": {"$regularExpression": {"pattern": "pattern", "options": ""}}, "DatetimeEpoch": {"$date": {"$numberLong": "0"}}, "DatetimePositive": {"$date": {"$numberLong": "2147483647"}}, "DatetimeNegative": {"$date": {"$numberLong": "-2147483648"}}, "True": true, "False": false, "DBPointer": {"$ref": "collection", "$id": {"$oid": "57e193d7a9cc81b4027498b1"}}, "DBRef": {"$ref": "collection", "$id": {"$oid": "57fd71e96e32ab4225b723fb"}, "$db": "database"}, "Minkey": {"$minKey": 1}, "Maxkey": {"$maxKey": 1}, "Null": null, "Undefined": null}';
+
+// Canonical BSON -> Native -> Canonical BSON
+echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n";
+
+// Canonical BSON -> Canonical extJSON
+echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n";
+
+// Canonical extJSON -> Canonical BSON
+echo bin2hex(fromJSON($canonicalExtJson)), "\n";
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECT--
+38020000075f69640057e193d7a9cc81b4027498b50e53796d626f6c000700000073796d626f6c0002537472696e670007000000737472696e670010496e743332002a00000012496e743634002a0000000000000001446f75626c6500000000000000f0bf0542696e617279001000000003a34c38f7c3abedc8a37814a992ab8db60542696e61727955736572446566696e656400050000008001020304050d436f6465000e00000066756e6374696f6e2829207b7d000f436f64655769746853636f7065001b0000000e00000066756e6374696f6e2829207b7d00050000000003537562646f63756d656e74001200000002666f6f0004000000626172000004417272617900280000001030000100000010310002000000103200030000001033000400000010340005000000001154696d657374616d7000010000002a0000000b5265676578007061747465726e0000094461746574696d6545706f6368000000000000000000094461746574696d65506f73697469766500ffffff7f00000000094461746574696d654e656761746976650000000080ffffffff085472756500010846616c736500000c4442506f696e746572000b000000636f6c6c656374696f6e0057e193d7a9cc81b4027498b1034442526566003d0000000224726566000b000000636f6c6c656374696f6e00072469640057fd71e96e32ab4225b723fb02246462000900000064617461626173650000ff4d696e6b6579007f4d61786b6579000a4e756c6c0006556e646566696e65640000
+{"_id":{"$oid":"57e193d7a9cc81b4027498b5"},"Symbol":{"$symbol":"symbol"},"String":"string","Int32":{"$numberInt":"42"},"Int64":{"$numberLong":"42"},"Double":{"$numberDouble":"-1.0"},"Binary":{"$binary":{"base64":"o0w498Or7cijeBSpkquNtg==","subType":"03"}},"BinaryUserDefined":{"$binary":{"base64":"AQIDBAU=","subType":"80"}},"Code":{"$code":"function() {}"},"CodeWithScope":{"$code":"function() {}","$scope":{}},"Subdocument":{"foo":"bar"},"Array":[{"$numberInt":"1"},{"$numberInt":"2"},{"$numberInt":"3"},{"$numberInt":"4"},{"$numberInt":"5"}],"Timestamp":{"$timestamp":{"t":42,"i":1}},"Regex":{"$regularExpression":{"pattern":"pattern","options":""}},"DatetimeEpoch":{"$date":{"$numberLong":"0"}},"DatetimePositive":{"$date":{"$numberLong":"2147483647"}},"DatetimeNegative":{"$date":{"$numberLong":"-2147483648"}},"True":true,"False":false,"DBPointer":{"$dbPointer":{"$ref":"collection","$id":{"$oid":"57e193d7a9cc81b4027498b1"}}},"DBRef":{"$ref":"collection","$id":{"$oid":"57fd71e96e32ab4225b723fb"},"$db":"database"},"Minkey":{"$minKey":1},"Maxkey":{"$maxKey":1},"Null":null,"Undefined":{"$undefined":true}}
+38020000075f69640057e193d7a9cc81b4027498b50e53796d626f6c000700000073796d626f6c0002537472696e670007000000737472696e670010496e743332002a00000012496e743634002a0000000000000001446f75626c6500000000000000f0bf0542696e617279001000000003a34c38f7c3abedc8a37814a992ab8db60542696e61727955736572446566696e656400050000008001020304050d436f6465000e00000066756e6374696f6e2829207b7d000f436f64655769746853636f7065001b0000000e00000066756e6374696f6e2829207b7d00050000000003537562646f63756d656e74001200000002666f6f0004000000626172000004417272617900280000001030000100000010310002000000103200030000001033000400000010340005000000001154696d657374616d7000010000002a0000000b5265676578007061747465726e0000094461746574696d6545706f6368000000000000000000094461746574696d65506f73697469766500ffffff7f00000000094461746574696d654e656761746976650000000080ffffffff085472756500010846616c736500000c4442506f696e746572000b000000636f6c6c656374696f6e0057e193d7a9cc81b4027498b1034442526566003d0000000224726566000b000000636f6c6c656374696f6e00072469640057fd71e96e32ab4225b723fb02246462000900000064617461626173650000ff4d696e6b6579007f4d61786b6579000a4e756c6c0006556e646566696e65640000
+===DONE===
\ No newline at end of file
diff --git a/mongodb-1.3.4/tests/bson-corpus/multi-type-valid-001.phpt b/mongodb-1.4.2/tests/bson-corpus/multi-type-valid-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/multi-type-valid-001.phpt
rename to mongodb-1.4.2/tests/bson-corpus/multi-type-valid-001.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/null-valid-001.phpt b/mongodb-1.4.2/tests/bson-corpus/null-valid-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/null-valid-001.phpt
rename to mongodb-1.4.2/tests/bson-corpus/null-valid-001.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/oid-decodeError-001.phpt b/mongodb-1.4.2/tests/bson-corpus/oid-decodeError-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/oid-decodeError-001.phpt
rename to mongodb-1.4.2/tests/bson-corpus/oid-decodeError-001.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/oid-valid-001.phpt b/mongodb-1.4.2/tests/bson-corpus/oid-valid-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/oid-valid-001.phpt
rename to mongodb-1.4.2/tests/bson-corpus/oid-valid-001.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/oid-valid-002.phpt b/mongodb-1.4.2/tests/bson-corpus/oid-valid-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/oid-valid-002.phpt
rename to mongodb-1.4.2/tests/bson-corpus/oid-valid-002.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/oid-valid-003.phpt b/mongodb-1.4.2/tests/bson-corpus/oid-valid-003.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/oid-valid-003.phpt
rename to mongodb-1.4.2/tests/bson-corpus/oid-valid-003.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/regex-decodeError-001.phpt b/mongodb-1.4.2/tests/bson-corpus/regex-decodeError-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/regex-decodeError-001.phpt
rename to mongodb-1.4.2/tests/bson-corpus/regex-decodeError-001.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/regex-decodeError-002.phpt b/mongodb-1.4.2/tests/bson-corpus/regex-decodeError-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/regex-decodeError-002.phpt
rename to mongodb-1.4.2/tests/bson-corpus/regex-decodeError-002.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/regex-valid-001.phpt b/mongodb-1.4.2/tests/bson-corpus/regex-valid-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/regex-valid-001.phpt
rename to mongodb-1.4.2/tests/bson-corpus/regex-valid-001.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/regex-valid-002.phpt b/mongodb-1.4.2/tests/bson-corpus/regex-valid-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/regex-valid-002.phpt
rename to mongodb-1.4.2/tests/bson-corpus/regex-valid-002.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/regex-valid-003.phpt b/mongodb-1.4.2/tests/bson-corpus/regex-valid-003.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/regex-valid-003.phpt
rename to mongodb-1.4.2/tests/bson-corpus/regex-valid-003.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/regex-valid-004.phpt b/mongodb-1.4.2/tests/bson-corpus/regex-valid-004.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/regex-valid-004.phpt
rename to mongodb-1.4.2/tests/bson-corpus/regex-valid-004.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/regex-valid-005.phpt b/mongodb-1.4.2/tests/bson-corpus/regex-valid-005.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/regex-valid-005.phpt
rename to mongodb-1.4.2/tests/bson-corpus/regex-valid-005.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/regex-valid-006.phpt b/mongodb-1.4.2/tests/bson-corpus/regex-valid-006.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/regex-valid-006.phpt
rename to mongodb-1.4.2/tests/bson-corpus/regex-valid-006.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/regex-valid-007.phpt b/mongodb-1.4.2/tests/bson-corpus/regex-valid-007.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/regex-valid-007.phpt
rename to mongodb-1.4.2/tests/bson-corpus/regex-valid-007.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/regex-valid-008.phpt b/mongodb-1.4.2/tests/bson-corpus/regex-valid-008.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/regex-valid-008.phpt
rename to mongodb-1.4.2/tests/bson-corpus/regex-valid-008.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/regex-valid-009.phpt b/mongodb-1.4.2/tests/bson-corpus/regex-valid-009.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/regex-valid-009.phpt
rename to mongodb-1.4.2/tests/bson-corpus/regex-valid-009.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/string-decodeError-001.phpt b/mongodb-1.4.2/tests/bson-corpus/string-decodeError-001.phpt
similarity index 100%
copy from mongodb-1.3.4/tests/bson-corpus/string-decodeError-001.phpt
copy to mongodb-1.4.2/tests/bson-corpus/string-decodeError-001.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/string-decodeError-002.phpt b/mongodb-1.4.2/tests/bson-corpus/string-decodeError-002.phpt
similarity index 100%
copy from mongodb-1.3.4/tests/bson-corpus/string-decodeError-002.phpt
copy to mongodb-1.4.2/tests/bson-corpus/string-decodeError-002.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/string-decodeError-003.phpt b/mongodb-1.4.2/tests/bson-corpus/string-decodeError-003.phpt
similarity index 100%
copy from mongodb-1.3.4/tests/bson-corpus/string-decodeError-003.phpt
copy to mongodb-1.4.2/tests/bson-corpus/string-decodeError-003.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/string-decodeError-004.phpt b/mongodb-1.4.2/tests/bson-corpus/string-decodeError-004.phpt
similarity index 100%
copy from mongodb-1.3.4/tests/bson-corpus/string-decodeError-004.phpt
copy to mongodb-1.4.2/tests/bson-corpus/string-decodeError-004.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/string-decodeError-005.phpt b/mongodb-1.4.2/tests/bson-corpus/string-decodeError-005.phpt
similarity index 100%
copy from mongodb-1.3.4/tests/bson-corpus/string-decodeError-005.phpt
copy to mongodb-1.4.2/tests/bson-corpus/string-decodeError-005.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/string-decodeError-006.phpt b/mongodb-1.4.2/tests/bson-corpus/string-decodeError-006.phpt
similarity index 100%
copy from mongodb-1.3.4/tests/bson-corpus/string-decodeError-006.phpt
copy to mongodb-1.4.2/tests/bson-corpus/string-decodeError-006.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/string-decodeError-007.phpt b/mongodb-1.4.2/tests/bson-corpus/string-decodeError-007.phpt
similarity index 100%
copy from mongodb-1.3.4/tests/bson-corpus/string-decodeError-007.phpt
copy to mongodb-1.4.2/tests/bson-corpus/string-decodeError-007.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/string-valid-001.phpt b/mongodb-1.4.2/tests/bson-corpus/string-valid-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/string-valid-001.phpt
rename to mongodb-1.4.2/tests/bson-corpus/string-valid-001.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/string-valid-002.phpt b/mongodb-1.4.2/tests/bson-corpus/string-valid-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/string-valid-002.phpt
rename to mongodb-1.4.2/tests/bson-corpus/string-valid-002.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/string-valid-003.phpt b/mongodb-1.4.2/tests/bson-corpus/string-valid-003.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/string-valid-003.phpt
rename to mongodb-1.4.2/tests/bson-corpus/string-valid-003.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/string-valid-004.phpt b/mongodb-1.4.2/tests/bson-corpus/string-valid-004.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/string-valid-004.phpt
rename to mongodb-1.4.2/tests/bson-corpus/string-valid-004.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/string-valid-005.phpt b/mongodb-1.4.2/tests/bson-corpus/string-valid-005.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/string-valid-005.phpt
rename to mongodb-1.4.2/tests/bson-corpus/string-valid-005.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/string-valid-006.phpt b/mongodb-1.4.2/tests/bson-corpus/string-valid-006.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/string-valid-006.phpt
rename to mongodb-1.4.2/tests/bson-corpus/string-valid-006.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/string-valid-007.phpt b/mongodb-1.4.2/tests/bson-corpus/string-valid-007.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/string-valid-007.phpt
rename to mongodb-1.4.2/tests/bson-corpus/string-valid-007.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/string-decodeError-001.phpt b/mongodb-1.4.2/tests/bson-corpus/symbol-decodeError-001.phpt
similarity index 89%
rename from mongodb-1.3.4/tests/bson-corpus/string-decodeError-001.phpt
rename to mongodb-1.4.2/tests/bson-corpus/symbol-decodeError-001.phpt
index c92efe2f..70127320 100644
--- a/mongodb-1.3.4/tests/bson-corpus/string-decodeError-001.phpt
+++ b/mongodb-1.4.2/tests/bson-corpus/symbol-decodeError-001.phpt
@@ -1,23 +1,23 @@
--TEST--
-String: bad string length: 0 (but no 0x00 either)
+Symbol: bad symbol length: 0 (but no 0x00 either)
--DESCRIPTION--
Generated by scripts/convert-bson-corpus-tests.php
DO NOT EDIT THIS FILE
--FILE--
<?php
require_once __DIR__ . '/../utils/tools.php';
$bson = hex2bin('0C0000000261000000000000');
throws(function() use ($bson) {
var_dump(toPHP($bson));
}, 'MongoDB\Driver\Exception\UnexpectedValueException');
?>
===DONE===
<?php exit(0); ?>
--EXPECT--
OK: Got MongoDB\Driver\Exception\UnexpectedValueException
===DONE===
\ No newline at end of file
diff --git a/mongodb-1.3.4/tests/bson-corpus/string-decodeError-002.phpt b/mongodb-1.4.2/tests/bson-corpus/symbol-decodeError-002.phpt
similarity index 93%
rename from mongodb-1.3.4/tests/bson-corpus/string-decodeError-002.phpt
rename to mongodb-1.4.2/tests/bson-corpus/symbol-decodeError-002.phpt
index b02eee16..95d694d5 100644
--- a/mongodb-1.3.4/tests/bson-corpus/string-decodeError-002.phpt
+++ b/mongodb-1.4.2/tests/bson-corpus/symbol-decodeError-002.phpt
@@ -1,23 +1,23 @@
--TEST--
-String: bad string length: -1
+Symbol: bad symbol length: -1
--DESCRIPTION--
Generated by scripts/convert-bson-corpus-tests.php
DO NOT EDIT THIS FILE
--FILE--
<?php
require_once __DIR__ . '/../utils/tools.php';
$bson = hex2bin('0C000000026100FFFFFFFF00');
throws(function() use ($bson) {
var_dump(toPHP($bson));
}, 'MongoDB\Driver\Exception\UnexpectedValueException');
?>
===DONE===
<?php exit(0); ?>
--EXPECT--
OK: Got MongoDB\Driver\Exception\UnexpectedValueException
===DONE===
\ No newline at end of file
diff --git a/mongodb-1.3.4/tests/bson-corpus/string-decodeError-003.phpt b/mongodb-1.4.2/tests/bson-corpus/symbol-decodeError-003.phpt
similarity index 91%
rename from mongodb-1.3.4/tests/bson-corpus/string-decodeError-003.phpt
rename to mongodb-1.4.2/tests/bson-corpus/symbol-decodeError-003.phpt
index 7f33e848..09ae1349 100644
--- a/mongodb-1.3.4/tests/bson-corpus/string-decodeError-003.phpt
+++ b/mongodb-1.4.2/tests/bson-corpus/symbol-decodeError-003.phpt
@@ -1,23 +1,23 @@
--TEST--
-String: bad string length: eats terminator
+Symbol: bad symbol length: eats terminator
--DESCRIPTION--
Generated by scripts/convert-bson-corpus-tests.php
DO NOT EDIT THIS FILE
--FILE--
<?php
require_once __DIR__ . '/../utils/tools.php';
$bson = hex2bin('10000000026100050000006200620000');
throws(function() use ($bson) {
var_dump(toPHP($bson));
}, 'MongoDB\Driver\Exception\UnexpectedValueException');
?>
===DONE===
<?php exit(0); ?>
--EXPECT--
OK: Got MongoDB\Driver\Exception\UnexpectedValueException
===DONE===
\ No newline at end of file
diff --git a/mongodb-1.3.4/tests/bson-corpus/string-decodeError-004.phpt b/mongodb-1.4.2/tests/bson-corpus/symbol-decodeError-004.phpt
similarity index 88%
rename from mongodb-1.3.4/tests/bson-corpus/string-decodeError-004.phpt
rename to mongodb-1.4.2/tests/bson-corpus/symbol-decodeError-004.phpt
index 0b3585c4..733650d6 100644
--- a/mongodb-1.3.4/tests/bson-corpus/string-decodeError-004.phpt
+++ b/mongodb-1.4.2/tests/bson-corpus/symbol-decodeError-004.phpt
@@ -1,23 +1,23 @@
--TEST--
-String: bad string length: longer than rest of document
+Symbol: bad symbol length: longer than rest of document
--DESCRIPTION--
Generated by scripts/convert-bson-corpus-tests.php
DO NOT EDIT THIS FILE
--FILE--
<?php
require_once __DIR__ . '/../utils/tools.php';
$bson = hex2bin('120000000200FFFFFF00666F6F6261720000');
throws(function() use ($bson) {
var_dump(toPHP($bson));
}, 'MongoDB\Driver\Exception\UnexpectedValueException');
?>
===DONE===
<?php exit(0); ?>
--EXPECT--
OK: Got MongoDB\Driver\Exception\UnexpectedValueException
===DONE===
\ No newline at end of file
diff --git a/mongodb-1.3.4/tests/bson-corpus/string-decodeError-005.phpt b/mongodb-1.4.2/tests/bson-corpus/symbol-decodeError-005.phpt
similarity index 92%
rename from mongodb-1.3.4/tests/bson-corpus/string-decodeError-005.phpt
rename to mongodb-1.4.2/tests/bson-corpus/symbol-decodeError-005.phpt
index a324fcf1..f5946d26 100644
--- a/mongodb-1.3.4/tests/bson-corpus/string-decodeError-005.phpt
+++ b/mongodb-1.4.2/tests/bson-corpus/symbol-decodeError-005.phpt
@@ -1,23 +1,23 @@
--TEST--
-String: string is not null-terminated
+Symbol: symbol is not null-terminated
--DESCRIPTION--
Generated by scripts/convert-bson-corpus-tests.php
DO NOT EDIT THIS FILE
--FILE--
<?php
require_once __DIR__ . '/../utils/tools.php';
$bson = hex2bin('1000000002610004000000616263FF00');
throws(function() use ($bson) {
var_dump(toPHP($bson));
}, 'MongoDB\Driver\Exception\UnexpectedValueException');
?>
===DONE===
<?php exit(0); ?>
--EXPECT--
OK: Got MongoDB\Driver\Exception\UnexpectedValueException
===DONE===
\ No newline at end of file
diff --git a/mongodb-1.3.4/tests/bson-corpus/string-decodeError-006.phpt b/mongodb-1.4.2/tests/bson-corpus/symbol-decodeError-006.phpt
similarity index 92%
rename from mongodb-1.3.4/tests/bson-corpus/string-decodeError-006.phpt
rename to mongodb-1.4.2/tests/bson-corpus/symbol-decodeError-006.phpt
index 7ce2c466..ce724674 100644
--- a/mongodb-1.3.4/tests/bson-corpus/string-decodeError-006.phpt
+++ b/mongodb-1.4.2/tests/bson-corpus/symbol-decodeError-006.phpt
@@ -1,23 +1,23 @@
--TEST--
-String: empty string, but extra null
+Symbol: empty symbol, but extra null
--DESCRIPTION--
Generated by scripts/convert-bson-corpus-tests.php
DO NOT EDIT THIS FILE
--FILE--
<?php
require_once __DIR__ . '/../utils/tools.php';
$bson = hex2bin('0E00000002610001000000000000');
throws(function() use ($bson) {
var_dump(toPHP($bson));
}, 'MongoDB\Driver\Exception\UnexpectedValueException');
?>
===DONE===
<?php exit(0); ?>
--EXPECT--
OK: Got MongoDB\Driver\Exception\UnexpectedValueException
===DONE===
\ No newline at end of file
diff --git a/mongodb-1.3.4/tests/bson-corpus/string-decodeError-007.phpt b/mongodb-1.4.2/tests/bson-corpus/symbol-decodeError-007.phpt
similarity index 95%
rename from mongodb-1.3.4/tests/bson-corpus/string-decodeError-007.phpt
rename to mongodb-1.4.2/tests/bson-corpus/symbol-decodeError-007.phpt
index a8e14d31..eab8e5f9 100644
--- a/mongodb-1.3.4/tests/bson-corpus/string-decodeError-007.phpt
+++ b/mongodb-1.4.2/tests/bson-corpus/symbol-decodeError-007.phpt
@@ -1,23 +1,23 @@
--TEST--
-String: invalid UTF-8
+Symbol: invalid UTF-8
--DESCRIPTION--
Generated by scripts/convert-bson-corpus-tests.php
DO NOT EDIT THIS FILE
--FILE--
<?php
require_once __DIR__ . '/../utils/tools.php';
$bson = hex2bin('0E00000002610002000000E90000');
throws(function() use ($bson) {
var_dump(toPHP($bson));
}, 'MongoDB\Driver\Exception\UnexpectedValueException');
?>
===DONE===
<?php exit(0); ?>
--EXPECT--
OK: Got MongoDB\Driver\Exception\UnexpectedValueException
===DONE===
\ No newline at end of file
diff --git a/mongodb-1.3.4/tests/bson-corpus/array-valid-001.phpt b/mongodb-1.4.2/tests/bson-corpus/symbol-valid-001.phpt
similarity index 64%
copy from mongodb-1.3.4/tests/bson-corpus/array-valid-001.phpt
copy to mongodb-1.4.2/tests/bson-corpus/symbol-valid-001.phpt
index 0ba7b165..b01486d5 100644
--- a/mongodb-1.3.4/tests/bson-corpus/array-valid-001.phpt
+++ b/mongodb-1.4.2/tests/bson-corpus/symbol-valid-001.phpt
@@ -1,31 +1,33 @@
--TEST--
-Array: Empty
+Symbol: Empty string
--DESCRIPTION--
Generated by scripts/convert-bson-corpus-tests.php
DO NOT EDIT THIS FILE
--FILE--
<?php
require_once __DIR__ . '/../utils/tools.php';
-$canonicalBson = hex2bin('0D000000046100050000000000');
-$canonicalExtJson = '{"a" : []}';
+$canonicalBson = hex2bin('0D0000000E6100010000000000');
+$convertedBson = hex2bin('0D000000026100010000000000');
+$canonicalExtJson = '{"a": {"$symbol": ""}}';
+$convertedExtJson = '{"a": ""}';
// Canonical BSON -> Native -> Canonical BSON
echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n";
// Canonical BSON -> Canonical extJSON
echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n";
// Canonical extJSON -> Canonical BSON
echo bin2hex(fromJSON($canonicalExtJson)), "\n";
?>
===DONE===
<?php exit(0); ?>
--EXPECT--
-0d000000046100050000000000
-{"a":[]}
-0d000000046100050000000000
+0d0000000e6100010000000000
+{"a":{"$symbol":""}}
+0d0000000e6100010000000000
===DONE===
\ No newline at end of file
diff --git a/mongodb-1.3.4/tests/bson-corpus/array-valid-002.phpt b/mongodb-1.4.2/tests/bson-corpus/symbol-valid-002.phpt
similarity index 63%
rename from mongodb-1.3.4/tests/bson-corpus/array-valid-002.phpt
rename to mongodb-1.4.2/tests/bson-corpus/symbol-valid-002.phpt
index 6e4099c4..b46e07df 100644
--- a/mongodb-1.3.4/tests/bson-corpus/array-valid-002.phpt
+++ b/mongodb-1.4.2/tests/bson-corpus/symbol-valid-002.phpt
@@ -1,31 +1,33 @@
--TEST--
-Array: Single Element Array
+Symbol: Single character
--DESCRIPTION--
Generated by scripts/convert-bson-corpus-tests.php
DO NOT EDIT THIS FILE
--FILE--
<?php
require_once __DIR__ . '/../utils/tools.php';
-$canonicalBson = hex2bin('140000000461000C0000001030000A0000000000');
-$canonicalExtJson = '{"a" : [{"$numberInt": "10"}]}';
+$canonicalBson = hex2bin('0E0000000E610002000000620000');
+$convertedBson = hex2bin('0E00000002610002000000620000');
+$canonicalExtJson = '{"a": {"$symbol": "b"}}';
+$convertedExtJson = '{"a": "b"}';
// Canonical BSON -> Native -> Canonical BSON
echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n";
// Canonical BSON -> Canonical extJSON
echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n";
// Canonical extJSON -> Canonical BSON
echo bin2hex(fromJSON($canonicalExtJson)), "\n";
?>
===DONE===
<?php exit(0); ?>
--EXPECT--
-140000000461000c0000001030000a0000000000
-{"a":[{"$numberInt":"10"}]}
-140000000461000c0000001030000a0000000000
+0e0000000e610002000000620000
+{"a":{"$symbol":"b"}}
+0e0000000e610002000000620000
===DONE===
\ No newline at end of file
diff --git a/mongodb-1.3.4/tests/bson-corpus/code-valid-003.phpt b/mongodb-1.4.2/tests/bson-corpus/symbol-valid-003.phpt
similarity index 56%
copy from mongodb-1.3.4/tests/bson-corpus/code-valid-003.phpt
copy to mongodb-1.4.2/tests/bson-corpus/symbol-valid-003.phpt
index 40a49151..34ee9f7b 100644
--- a/mongodb-1.3.4/tests/bson-corpus/code-valid-003.phpt
+++ b/mongodb-1.4.2/tests/bson-corpus/symbol-valid-003.phpt
@@ -1,31 +1,33 @@
--TEST--
-Javascript Code: Multi-character
+Symbol: Multi-character
--DESCRIPTION--
Generated by scripts/convert-bson-corpus-tests.php
DO NOT EDIT THIS FILE
--FILE--
<?php
require_once __DIR__ . '/../utils/tools.php';
-$canonicalBson = hex2bin('190000000D61000D0000006162616261626162616261620000');
-$canonicalExtJson = '{"a" : {"$code" : "abababababab"}}';
+$canonicalBson = hex2bin('190000000E61000D0000006162616261626162616261620000');
+$convertedBson = hex2bin('190000000261000D0000006162616261626162616261620000');
+$canonicalExtJson = '{"a": {"$symbol": "abababababab"}}';
+$convertedExtJson = '{"a": "abababababab"}';
// Canonical BSON -> Native -> Canonical BSON
echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n";
// Canonical BSON -> Canonical extJSON
echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n";
// Canonical extJSON -> Canonical BSON
echo bin2hex(fromJSON($canonicalExtJson)), "\n";
?>
===DONE===
<?php exit(0); ?>
--EXPECT--
-190000000d61000d0000006162616261626162616261620000
-{"a":{"$code":"abababababab"}}
-190000000d61000d0000006162616261626162616261620000
+190000000e61000d0000006162616261626162616261620000
+{"a":{"$symbol":"abababababab"}}
+190000000e61000d0000006162616261626162616261620000
===DONE===
\ No newline at end of file
diff --git a/mongodb-1.3.4/tests/bson-corpus/code-valid-004.phpt b/mongodb-1.4.2/tests/bson-corpus/symbol-valid-004.phpt
similarity index 55%
rename from mongodb-1.3.4/tests/bson-corpus/code-valid-004.phpt
rename to mongodb-1.4.2/tests/bson-corpus/symbol-valid-004.phpt
index 6c88de79..e95d9a3c 100644
--- a/mongodb-1.3.4/tests/bson-corpus/code-valid-004.phpt
+++ b/mongodb-1.4.2/tests/bson-corpus/symbol-valid-004.phpt
@@ -1,31 +1,33 @@
--TEST--
-Javascript Code: two-byte UTF-8 (é)
+Symbol: two-byte UTF-8 (é)
--DESCRIPTION--
Generated by scripts/convert-bson-corpus-tests.php
DO NOT EDIT THIS FILE
--FILE--
<?php
require_once __DIR__ . '/../utils/tools.php';
-$canonicalBson = hex2bin('190000000261000D000000C3A9C3A9C3A9C3A9C3A9C3A90000');
-$canonicalExtJson = '{"a" : "\\u00e9\\u00e9\\u00e9\\u00e9\\u00e9\\u00e9"}';
+$canonicalBson = hex2bin('190000000E61000D000000C3A9C3A9C3A9C3A9C3A9C3A90000');
+$convertedBson = hex2bin('190000000261000D000000C3A9C3A9C3A9C3A9C3A9C3A90000');
+$canonicalExtJson = '{"a": {"$symbol": "éééééé"}}';
+$convertedExtJson = '{"a": "éééééé"}';
// Canonical BSON -> Native -> Canonical BSON
echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n";
// Canonical BSON -> Canonical extJSON
echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n";
// Canonical extJSON -> Canonical BSON
echo bin2hex(fromJSON($canonicalExtJson)), "\n";
?>
===DONE===
<?php exit(0); ?>
--EXPECT--
-190000000261000d000000c3a9c3a9c3a9c3a9c3a9c3a90000
-{"a":"\u00e9\u00e9\u00e9\u00e9\u00e9\u00e9"}
-190000000261000d000000c3a9c3a9c3a9c3a9c3a9c3a90000
+190000000e61000d000000c3a9c3a9c3a9c3a9c3a9c3a90000
+{"a":{"$symbol":"\u00e9\u00e9\u00e9\u00e9\u00e9\u00e9"}}
+190000000e61000d000000c3a9c3a9c3a9c3a9c3a9c3a90000
===DONE===
\ No newline at end of file
diff --git a/mongodb-1.3.4/tests/bson-corpus/code-valid-005.phpt b/mongodb-1.4.2/tests/bson-corpus/symbol-valid-005.phpt
similarity index 55%
rename from mongodb-1.3.4/tests/bson-corpus/code-valid-005.phpt
rename to mongodb-1.4.2/tests/bson-corpus/symbol-valid-005.phpt
index b4ab4fad..e1d0c57f 100644
--- a/mongodb-1.3.4/tests/bson-corpus/code-valid-005.phpt
+++ b/mongodb-1.4.2/tests/bson-corpus/symbol-valid-005.phpt
@@ -1,31 +1,33 @@
--TEST--
-Javascript Code: three-byte UTF-8 (☆)
+Symbol: three-byte UTF-8 (☆)
--DESCRIPTION--
Generated by scripts/convert-bson-corpus-tests.php
DO NOT EDIT THIS FILE
--FILE--
<?php
require_once __DIR__ . '/../utils/tools.php';
-$canonicalBson = hex2bin('190000000261000D000000E29886E29886E29886E298860000');
-$canonicalExtJson = '{"a" : "\\u2606\\u2606\\u2606\\u2606"}';
+$canonicalBson = hex2bin('190000000E61000D000000E29886E29886E29886E298860000');
+$convertedBson = hex2bin('190000000261000D000000E29886E29886E29886E298860000');
+$canonicalExtJson = '{"a": {"$symbol": "☆☆☆☆"}}';
+$convertedExtJson = '{"a": "☆☆☆☆"}';
// Canonical BSON -> Native -> Canonical BSON
echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n";
// Canonical BSON -> Canonical extJSON
echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n";
// Canonical extJSON -> Canonical BSON
echo bin2hex(fromJSON($canonicalExtJson)), "\n";
?>
===DONE===
<?php exit(0); ?>
--EXPECT--
-190000000261000d000000e29886e29886e29886e298860000
-{"a":"\u2606\u2606\u2606\u2606"}
-190000000261000d000000e29886e29886e29886e298860000
+190000000e61000d000000e29886e29886e29886e298860000
+{"a":{"$symbol":"\u2606\u2606\u2606\u2606"}}
+190000000e61000d000000e29886e29886e29886e298860000
===DONE===
\ No newline at end of file
diff --git a/mongodb-1.3.4/tests/bson-corpus/code-valid-003.phpt b/mongodb-1.4.2/tests/bson-corpus/symbol-valid-006.phpt
similarity index 54%
rename from mongodb-1.3.4/tests/bson-corpus/code-valid-003.phpt
rename to mongodb-1.4.2/tests/bson-corpus/symbol-valid-006.phpt
index 40a49151..c11c2919 100644
--- a/mongodb-1.3.4/tests/bson-corpus/code-valid-003.phpt
+++ b/mongodb-1.4.2/tests/bson-corpus/symbol-valid-006.phpt
@@ -1,31 +1,33 @@
--TEST--
-Javascript Code: Multi-character
+Symbol: Embedded nulls
--DESCRIPTION--
Generated by scripts/convert-bson-corpus-tests.php
DO NOT EDIT THIS FILE
--FILE--
<?php
require_once __DIR__ . '/../utils/tools.php';
-$canonicalBson = hex2bin('190000000D61000D0000006162616261626162616261620000');
-$canonicalExtJson = '{"a" : {"$code" : "abababababab"}}';
+$canonicalBson = hex2bin('190000000E61000D0000006162006261620062616261620000');
+$convertedBson = hex2bin('190000000261000D0000006162006261620062616261620000');
+$canonicalExtJson = '{"a": {"$symbol": "ab\\u0000bab\\u0000babab"}}';
+$convertedExtJson = '{"a": "ab\\u0000bab\\u0000babab"}';
// Canonical BSON -> Native -> Canonical BSON
echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n";
// Canonical BSON -> Canonical extJSON
echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n";
// Canonical extJSON -> Canonical BSON
echo bin2hex(fromJSON($canonicalExtJson)), "\n";
?>
===DONE===
<?php exit(0); ?>
--EXPECT--
-190000000d61000d0000006162616261626162616261620000
-{"a":{"$code":"abababababab"}}
-190000000d61000d0000006162616261626162616261620000
+190000000e61000d0000006162006261620062616261620000
+{"a":{"$symbol":"ab\u0000bab\u0000babab"}}
+190000000e61000d0000006162006261620062616261620000
===DONE===
\ No newline at end of file
diff --git a/mongodb-1.3.4/tests/bson-corpus/timestamp-decodeError-001.phpt b/mongodb-1.4.2/tests/bson-corpus/timestamp-decodeError-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/timestamp-decodeError-001.phpt
rename to mongodb-1.4.2/tests/bson-corpus/timestamp-decodeError-001.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/timestamp-valid-001.phpt b/mongodb-1.4.2/tests/bson-corpus/timestamp-valid-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/timestamp-valid-001.phpt
rename to mongodb-1.4.2/tests/bson-corpus/timestamp-valid-001.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/timestamp-valid-002.phpt b/mongodb-1.4.2/tests/bson-corpus/timestamp-valid-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/timestamp-valid-002.phpt
rename to mongodb-1.4.2/tests/bson-corpus/timestamp-valid-002.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/timestamp-valid-003.phpt b/mongodb-1.4.2/tests/bson-corpus/timestamp-valid-003.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/timestamp-valid-003.phpt
rename to mongodb-1.4.2/tests/bson-corpus/timestamp-valid-003.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/top-decodeError-001.phpt b/mongodb-1.4.2/tests/bson-corpus/top-decodeError-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/top-decodeError-001.phpt
rename to mongodb-1.4.2/tests/bson-corpus/top-decodeError-001.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/top-decodeError-002.phpt b/mongodb-1.4.2/tests/bson-corpus/top-decodeError-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/top-decodeError-002.phpt
rename to mongodb-1.4.2/tests/bson-corpus/top-decodeError-002.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/top-decodeError-003.phpt b/mongodb-1.4.2/tests/bson-corpus/top-decodeError-003.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/top-decodeError-003.phpt
rename to mongodb-1.4.2/tests/bson-corpus/top-decodeError-003.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/top-decodeError-004.phpt b/mongodb-1.4.2/tests/bson-corpus/top-decodeError-004.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/top-decodeError-004.phpt
rename to mongodb-1.4.2/tests/bson-corpus/top-decodeError-004.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/top-decodeError-005.phpt b/mongodb-1.4.2/tests/bson-corpus/top-decodeError-005.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/top-decodeError-005.phpt
rename to mongodb-1.4.2/tests/bson-corpus/top-decodeError-005.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/top-decodeError-006.phpt b/mongodb-1.4.2/tests/bson-corpus/top-decodeError-006.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/top-decodeError-006.phpt
rename to mongodb-1.4.2/tests/bson-corpus/top-decodeError-006.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/top-decodeError-007.phpt b/mongodb-1.4.2/tests/bson-corpus/top-decodeError-007.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/top-decodeError-007.phpt
rename to mongodb-1.4.2/tests/bson-corpus/top-decodeError-007.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/top-decodeError-008.phpt b/mongodb-1.4.2/tests/bson-corpus/top-decodeError-008.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/top-decodeError-008.phpt
rename to mongodb-1.4.2/tests/bson-corpus/top-decodeError-008.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/top-decodeError-009.phpt b/mongodb-1.4.2/tests/bson-corpus/top-decodeError-009.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/top-decodeError-009.phpt
rename to mongodb-1.4.2/tests/bson-corpus/top-decodeError-009.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/top-decodeError-010.phpt b/mongodb-1.4.2/tests/bson-corpus/top-decodeError-010.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/top-decodeError-010.phpt
rename to mongodb-1.4.2/tests/bson-corpus/top-decodeError-010.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/top-decodeError-011.phpt b/mongodb-1.4.2/tests/bson-corpus/top-decodeError-011.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/top-decodeError-011.phpt
rename to mongodb-1.4.2/tests/bson-corpus/top-decodeError-011.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/top-decodeError-012.phpt b/mongodb-1.4.2/tests/bson-corpus/top-decodeError-012.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/top-decodeError-012.phpt
rename to mongodb-1.4.2/tests/bson-corpus/top-decodeError-012.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/top-decodeError-013.phpt b/mongodb-1.4.2/tests/bson-corpus/top-decodeError-013.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/top-decodeError-013.phpt
rename to mongodb-1.4.2/tests/bson-corpus/top-decodeError-013.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/top-decodeError-014.phpt b/mongodb-1.4.2/tests/bson-corpus/top-decodeError-014.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/top-decodeError-014.phpt
rename to mongodb-1.4.2/tests/bson-corpus/top-decodeError-014.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/top-parseError-001.phpt b/mongodb-1.4.2/tests/bson-corpus/top-parseError-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/top-parseError-001.phpt
rename to mongodb-1.4.2/tests/bson-corpus/top-parseError-001.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/top-parseError-002.phpt b/mongodb-1.4.2/tests/bson-corpus/top-parseError-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/top-parseError-002.phpt
rename to mongodb-1.4.2/tests/bson-corpus/top-parseError-002.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/top-parseError-003.phpt b/mongodb-1.4.2/tests/bson-corpus/top-parseError-003.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/top-parseError-003.phpt
rename to mongodb-1.4.2/tests/bson-corpus/top-parseError-003.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/top-parseError-004.phpt b/mongodb-1.4.2/tests/bson-corpus/top-parseError-004.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/top-parseError-004.phpt
rename to mongodb-1.4.2/tests/bson-corpus/top-parseError-004.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/top-parseError-005.phpt b/mongodb-1.4.2/tests/bson-corpus/top-parseError-005.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/top-parseError-005.phpt
rename to mongodb-1.4.2/tests/bson-corpus/top-parseError-005.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/top-parseError-006.phpt b/mongodb-1.4.2/tests/bson-corpus/top-parseError-006.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/top-parseError-006.phpt
rename to mongodb-1.4.2/tests/bson-corpus/top-parseError-006.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/top-parseError-007.phpt b/mongodb-1.4.2/tests/bson-corpus/top-parseError-007.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/top-parseError-007.phpt
rename to mongodb-1.4.2/tests/bson-corpus/top-parseError-007.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/top-parseError-008.phpt b/mongodb-1.4.2/tests/bson-corpus/top-parseError-008.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/top-parseError-008.phpt
rename to mongodb-1.4.2/tests/bson-corpus/top-parseError-008.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/top-parseError-009.phpt b/mongodb-1.4.2/tests/bson-corpus/top-parseError-009.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/top-parseError-009.phpt
rename to mongodb-1.4.2/tests/bson-corpus/top-parseError-009.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/top-parseError-010.phpt b/mongodb-1.4.2/tests/bson-corpus/top-parseError-010.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/top-parseError-010.phpt
rename to mongodb-1.4.2/tests/bson-corpus/top-parseError-010.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/top-parseError-011.phpt b/mongodb-1.4.2/tests/bson-corpus/top-parseError-011.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/top-parseError-011.phpt
rename to mongodb-1.4.2/tests/bson-corpus/top-parseError-011.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/top-parseError-012.phpt b/mongodb-1.4.2/tests/bson-corpus/top-parseError-012.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/top-parseError-012.phpt
rename to mongodb-1.4.2/tests/bson-corpus/top-parseError-012.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/top-parseError-013.phpt b/mongodb-1.4.2/tests/bson-corpus/top-parseError-013.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/top-parseError-013.phpt
rename to mongodb-1.4.2/tests/bson-corpus/top-parseError-013.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/top-parseError-014.phpt b/mongodb-1.4.2/tests/bson-corpus/top-parseError-014.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/top-parseError-014.phpt
rename to mongodb-1.4.2/tests/bson-corpus/top-parseError-014.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/top-parseError-015.phpt b/mongodb-1.4.2/tests/bson-corpus/top-parseError-015.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/top-parseError-015.phpt
rename to mongodb-1.4.2/tests/bson-corpus/top-parseError-015.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/top-parseError-016.phpt b/mongodb-1.4.2/tests/bson-corpus/top-parseError-016.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/top-parseError-016.phpt
rename to mongodb-1.4.2/tests/bson-corpus/top-parseError-016.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/top-parseError-017.phpt b/mongodb-1.4.2/tests/bson-corpus/top-parseError-017.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/top-parseError-017.phpt
rename to mongodb-1.4.2/tests/bson-corpus/top-parseError-017.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/top-parseError-018.phpt b/mongodb-1.4.2/tests/bson-corpus/top-parseError-018.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/top-parseError-018.phpt
rename to mongodb-1.4.2/tests/bson-corpus/top-parseError-018.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/top-parseError-019.phpt b/mongodb-1.4.2/tests/bson-corpus/top-parseError-019.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/top-parseError-019.phpt
rename to mongodb-1.4.2/tests/bson-corpus/top-parseError-019.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/top-parseError-020.phpt b/mongodb-1.4.2/tests/bson-corpus/top-parseError-020.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/top-parseError-020.phpt
rename to mongodb-1.4.2/tests/bson-corpus/top-parseError-020.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/top-parseError-021.phpt b/mongodb-1.4.2/tests/bson-corpus/top-parseError-021.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/top-parseError-021.phpt
rename to mongodb-1.4.2/tests/bson-corpus/top-parseError-021.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/top-parseError-022.phpt b/mongodb-1.4.2/tests/bson-corpus/top-parseError-022.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/top-parseError-022.phpt
rename to mongodb-1.4.2/tests/bson-corpus/top-parseError-022.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/top-parseError-023.phpt b/mongodb-1.4.2/tests/bson-corpus/top-parseError-023.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/top-parseError-023.phpt
rename to mongodb-1.4.2/tests/bson-corpus/top-parseError-023.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/top-parseError-024.phpt b/mongodb-1.4.2/tests/bson-corpus/top-parseError-024.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/top-parseError-024.phpt
rename to mongodb-1.4.2/tests/bson-corpus/top-parseError-024.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/top-parseError-025.phpt b/mongodb-1.4.2/tests/bson-corpus/top-parseError-025.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/top-parseError-025.phpt
rename to mongodb-1.4.2/tests/bson-corpus/top-parseError-025.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/top-parseError-026.phpt b/mongodb-1.4.2/tests/bson-corpus/top-parseError-026.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/top-parseError-026.phpt
rename to mongodb-1.4.2/tests/bson-corpus/top-parseError-026.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/top-parseError-027.phpt b/mongodb-1.4.2/tests/bson-corpus/top-parseError-027.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/top-parseError-027.phpt
rename to mongodb-1.4.2/tests/bson-corpus/top-parseError-027.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/top-parseError-028.phpt b/mongodb-1.4.2/tests/bson-corpus/top-parseError-028.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/top-parseError-028.phpt
rename to mongodb-1.4.2/tests/bson-corpus/top-parseError-028.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/top-parseError-029.phpt b/mongodb-1.4.2/tests/bson-corpus/top-parseError-029.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/top-parseError-029.phpt
rename to mongodb-1.4.2/tests/bson-corpus/top-parseError-029.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/top-parseError-030.phpt b/mongodb-1.4.2/tests/bson-corpus/top-parseError-030.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/top-parseError-030.phpt
rename to mongodb-1.4.2/tests/bson-corpus/top-parseError-030.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/top-parseError-031.phpt b/mongodb-1.4.2/tests/bson-corpus/top-parseError-031.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/top-parseError-031.phpt
rename to mongodb-1.4.2/tests/bson-corpus/top-parseError-031.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/top-parseError-032.phpt b/mongodb-1.4.2/tests/bson-corpus/top-parseError-032.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/top-parseError-032.phpt
rename to mongodb-1.4.2/tests/bson-corpus/top-parseError-032.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/top-parseError-033.phpt b/mongodb-1.4.2/tests/bson-corpus/top-parseError-033.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/top-parseError-033.phpt
rename to mongodb-1.4.2/tests/bson-corpus/top-parseError-033.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/top-parseError-034.phpt b/mongodb-1.4.2/tests/bson-corpus/top-parseError-034.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/top-parseError-034.phpt
rename to mongodb-1.4.2/tests/bson-corpus/top-parseError-034.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/top-parseError-035.phpt b/mongodb-1.4.2/tests/bson-corpus/top-parseError-035.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/top-parseError-035.phpt
rename to mongodb-1.4.2/tests/bson-corpus/top-parseError-035.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/top-parseError-036.phpt b/mongodb-1.4.2/tests/bson-corpus/top-parseError-036.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/top-parseError-036.phpt
rename to mongodb-1.4.2/tests/bson-corpus/top-parseError-036.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/top-parseError-037.phpt b/mongodb-1.4.2/tests/bson-corpus/top-parseError-037.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/top-parseError-037.phpt
rename to mongodb-1.4.2/tests/bson-corpus/top-parseError-037.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/top-parseError-038.phpt b/mongodb-1.4.2/tests/bson-corpus/top-parseError-038.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/top-parseError-038.phpt
rename to mongodb-1.4.2/tests/bson-corpus/top-parseError-038.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/top-parseError-039.phpt b/mongodb-1.4.2/tests/bson-corpus/top-parseError-039.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/top-parseError-039.phpt
rename to mongodb-1.4.2/tests/bson-corpus/top-parseError-039.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/top-parseError-040.phpt b/mongodb-1.4.2/tests/bson-corpus/top-parseError-040.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/top-parseError-040.phpt
rename to mongodb-1.4.2/tests/bson-corpus/top-parseError-040.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/top-parseError-041.phpt b/mongodb-1.4.2/tests/bson-corpus/top-parseError-041.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/top-parseError-041.phpt
rename to mongodb-1.4.2/tests/bson-corpus/top-parseError-041.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/top-valid-001.phpt b/mongodb-1.4.2/tests/bson-corpus/top-valid-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson-corpus/top-valid-001.phpt
rename to mongodb-1.4.2/tests/bson-corpus/top-valid-001.phpt
diff --git a/mongodb-1.3.4/tests/bson-corpus/array-valid-001.phpt b/mongodb-1.4.2/tests/bson-corpus/undefined-valid-001.phpt
similarity index 64%
rename from mongodb-1.3.4/tests/bson-corpus/array-valid-001.phpt
rename to mongodb-1.4.2/tests/bson-corpus/undefined-valid-001.phpt
index 0ba7b165..68e2df39 100644
--- a/mongodb-1.3.4/tests/bson-corpus/array-valid-001.phpt
+++ b/mongodb-1.4.2/tests/bson-corpus/undefined-valid-001.phpt
@@ -1,31 +1,33 @@
--TEST--
-Array: Empty
+Undefined type (deprecated): Undefined
--DESCRIPTION--
Generated by scripts/convert-bson-corpus-tests.php
DO NOT EDIT THIS FILE
--FILE--
<?php
require_once __DIR__ . '/../utils/tools.php';
-$canonicalBson = hex2bin('0D000000046100050000000000');
-$canonicalExtJson = '{"a" : []}';
+$canonicalBson = hex2bin('0800000006610000');
+$convertedBson = hex2bin('080000000A610000');
+$canonicalExtJson = '{"a" : {"$undefined" : true}}';
+$convertedExtJson = '{"a" : null}';
// Canonical BSON -> Native -> Canonical BSON
echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n";
// Canonical BSON -> Canonical extJSON
echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n";
// Canonical extJSON -> Canonical BSON
echo bin2hex(fromJSON($canonicalExtJson)), "\n";
?>
===DONE===
<?php exit(0); ?>
--EXPECT--
-0d000000046100050000000000
-{"a":[]}
-0d000000046100050000000000
+0800000006610000
+{"a":{"$undefined":true}}
+0800000006610000
===DONE===
\ No newline at end of file
diff --git a/mongodb-1.3.4/tests/bson/bson-binary-001.phpt b/mongodb-1.4.2/tests/bson/bson-binary-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-binary-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-binary-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-binary-compare-001.phpt b/mongodb-1.4.2/tests/bson/bson-binary-compare-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-binary-compare-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-binary-compare-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-binary-compare-002.phpt b/mongodb-1.4.2/tests/bson/bson-binary-compare-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-binary-compare-002.phpt
rename to mongodb-1.4.2/tests/bson/bson-binary-compare-002.phpt
diff --git a/mongodb-1.4.2/tests/bson/bson-binary-get_properties-001.phpt b/mongodb-1.4.2/tests/bson/bson-binary-get_properties-001.phpt
new file mode 100644
index 00000000..776b378e
--- /dev/null
+++ b/mongodb-1.4.2/tests/bson/bson-binary-get_properties-001.phpt
@@ -0,0 +1,20 @@
+--TEST--
+MongoDB\BSON\Binary get_properties handler (get_object_vars)
+--FILE--
+<?php
+
+$binary = new MongoDB\BSON\Binary('foobar', MongoDB\BSON\Binary::TYPE_GENERIC);
+
+var_dump(get_object_vars($binary));
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECT--
+array(2) {
+ ["data"]=>
+ string(6) "foobar"
+ ["type"]=>
+ int(0)
+}
+===DONE===
diff --git a/mongodb-1.3.4/tests/bson/bson-binary-tostring-001.phpt b/mongodb-1.4.2/tests/bson/bson-binary-get_properties-002.phpt
similarity index 50%
copy from mongodb-1.3.4/tests/bson/bson-binary-tostring-001.phpt
copy to mongodb-1.4.2/tests/bson/bson-binary-get_properties-002.phpt
index 1488bfc9..2a6e7bbe 100644
--- a/mongodb-1.3.4/tests/bson/bson-binary-tostring-001.phpt
+++ b/mongodb-1.4.2/tests/bson/bson-binary-get_properties-002.phpt
@@ -1,14 +1,21 @@
--TEST--
-MongoDB\BSON\Binary::__toString()
+MongoDB\BSON\Binary get_properties handler (foreach)
--FILE--
<?php
$binary = new MongoDB\BSON\Binary('foobar', MongoDB\BSON\Binary::TYPE_GENERIC);
-var_dump((string) $binary);
+
+foreach ($binary as $key => $value) {
+ var_dump($key);
+ var_dump($value);
+}
?>
===DONE===
<?php exit(0); ?>
--EXPECT--
+string(4) "data"
string(6) "foobar"
+string(4) "type"
+int(0)
===DONE===
diff --git a/mongodb-1.3.4/tests/bson/bson-binary-jsonserialize-001.phpt b/mongodb-1.4.2/tests/bson/bson-binary-jsonserialize-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-binary-jsonserialize-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-binary-jsonserialize-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-binary-jsonserialize-002.phpt b/mongodb-1.4.2/tests/bson/bson-binary-jsonserialize-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-binary-jsonserialize-002.phpt
rename to mongodb-1.4.2/tests/bson/bson-binary-jsonserialize-002.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-binary-serialization-001.phpt b/mongodb-1.4.2/tests/bson/bson-binary-serialization-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-binary-serialization-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-binary-serialization-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-binary-serialization_error-001.phpt b/mongodb-1.4.2/tests/bson/bson-binary-serialization_error-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-binary-serialization_error-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-binary-serialization_error-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-binary-serialization_error-002.phpt b/mongodb-1.4.2/tests/bson/bson-binary-serialization_error-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-binary-serialization_error-002.phpt
rename to mongodb-1.4.2/tests/bson/bson-binary-serialization_error-002.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-binary-serialization_error-003.phpt b/mongodb-1.4.2/tests/bson/bson-binary-serialization_error-003.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-binary-serialization_error-003.phpt
rename to mongodb-1.4.2/tests/bson/bson-binary-serialization_error-003.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-binary-set_state-001.phpt b/mongodb-1.4.2/tests/bson/bson-binary-set_state-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-binary-set_state-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-binary-set_state-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-binary-set_state_error-001.phpt b/mongodb-1.4.2/tests/bson/bson-binary-set_state_error-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-binary-set_state_error-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-binary-set_state_error-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-binary-set_state_error-002.phpt b/mongodb-1.4.2/tests/bson/bson-binary-set_state_error-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-binary-set_state_error-002.phpt
rename to mongodb-1.4.2/tests/bson/bson-binary-set_state_error-002.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-binary-set_state_error-003.phpt b/mongodb-1.4.2/tests/bson/bson-binary-set_state_error-003.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-binary-set_state_error-003.phpt
rename to mongodb-1.4.2/tests/bson/bson-binary-set_state_error-003.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-binary-tostring-001.phpt b/mongodb-1.4.2/tests/bson/bson-binary-tostring-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-binary-tostring-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-binary-tostring-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-binary_error-001.phpt b/mongodb-1.4.2/tests/bson/bson-binary_error-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-binary_error-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-binary_error-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-binary_error-002.phpt b/mongodb-1.4.2/tests/bson/bson-binary_error-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-binary_error-002.phpt
rename to mongodb-1.4.2/tests/bson/bson-binary_error-002.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-binary_error-003.phpt b/mongodb-1.4.2/tests/bson/bson-binary_error-003.phpt
similarity index 100%
copy from mongodb-1.3.4/tests/bson/bson-binary_error-003.phpt
copy to mongodb-1.4.2/tests/bson/bson-binary_error-003.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-binary_error-004.phpt b/mongodb-1.4.2/tests/bson/bson-binary_error-004.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-binary_error-004.phpt
rename to mongodb-1.4.2/tests/bson/bson-binary_error-004.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-binaryinterface-001.phpt b/mongodb-1.4.2/tests/bson/bson-binaryinterface-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-binaryinterface-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-binaryinterface-001.phpt
diff --git a/mongodb-1.4.2/tests/bson/bson-dbpointer-001.phpt b/mongodb-1.4.2/tests/bson/bson-dbpointer-001.phpt
new file mode 100644
index 00000000..31d843b5
--- /dev/null
+++ b/mongodb-1.4.2/tests/bson/bson-dbpointer-001.phpt
@@ -0,0 +1,32 @@
+--TEST--
+MongoDB\BSON\DBPointer #001
+--FILE--
+<?php
+
+require_once __DIR__ . '/../utils/tools.php';
+
+$tests = array(
+ MongoDB\BSON\toPHP(MongoDB\BSON\fromJSON('{ "dbref": {"$dbPointer": {"$ref": "phongo.test", "$id" : { "$oid" : "5a2e78accd485d55b405ac12" } }} }')),
+);
+
+foreach($tests as $n => $test) {
+ echo "Test#{$n}", "\n";
+ $s = fromPHP($test);
+ $testagain = toPHP($s);
+ var_dump($test->dbref instanceof MongoDB\BSON\DBPointer);
+ var_dump($testagain->dbref instanceof MongoDB\BSON\DBPointer);
+ var_dump(toJSON(fromPHP($test)), toJSON(fromPHP($testagain)));
+ var_dump((object)$test == (object)$testagain);
+}
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECT--
+Test#0
+bool(true)
+bool(true)
+string(76) "{ "dbref" : { "$ref" : "phongo.test", "$id" : "5a2e78accd485d55b405ac12" } }"
+string(76) "{ "dbref" : { "$ref" : "phongo.test", "$id" : "5a2e78accd485d55b405ac12" } }"
+bool(true)
+===DONE===
diff --git a/mongodb-1.4.2/tests/bson/bson-dbpointer-002.phpt b/mongodb-1.4.2/tests/bson/bson-dbpointer-002.phpt
new file mode 100644
index 00000000..065a776a
--- /dev/null
+++ b/mongodb-1.4.2/tests/bson/bson-dbpointer-002.phpt
@@ -0,0 +1,19 @@
+--TEST--
+MongoDB\BSON\DBPointer debug handler
+--FILE--
+<?php
+
+$dbptr = MongoDB\BSON\toPHP(MongoDB\BSON\fromJSON('{ "dbref": {"$dbPointer": {"$ref": "phongo.test", "$id" : { "$oid" : "5a2e78accd485d55b405ac12" } }} }'));
+var_dump($dbptr->dbref);
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECTF--
+object(MongoDB\BSON\DBPointer)#1 (2) {
+ ["ref"]=>
+ string(11) "phongo.test"
+ ["id"]=>
+ string(24) "5a2e78accd485d55b405ac12"
+}
+===DONE===
diff --git a/mongodb-1.4.2/tests/bson/bson-dbpointer-compare-001.phpt b/mongodb-1.4.2/tests/bson/bson-dbpointer-compare-001.phpt
new file mode 100644
index 00000000..5eb1425a
--- /dev/null
+++ b/mongodb-1.4.2/tests/bson/bson-dbpointer-compare-001.phpt
@@ -0,0 +1,27 @@
+--TEST--
+MongoDB\BSON\DBPointer comparisons
+--FILE--
+<?php
+
+$jsonTest0 = MongoDB\BSON\toPHP(MongoDB\BSON\fromJSON('{ "dbref": {"$dbPointer": {"$ref": "phongo.test", "$id" : { "$oid" : "5a2e78accd485d55b4050000" } }} }'));
+$jsonTest1a = MongoDB\BSON\toPHP(MongoDB\BSON\fromJSON('{ "dbref": {"$dbPointer": {"$ref": "phongo.test", "$id" : { "$oid" : "5a2e78accd485d55b4051111" } }} }'));
+$jsonTest1b = MongoDB\BSON\toPHP(MongoDB\BSON\fromJSON('{ "dbref": {"$dbPointer": {"$ref": "phongo.test", "$id" : { "$oid" : "5a2e78accd485d55b4051111" } }} }'));
+$jsonTest2 = MongoDB\BSON\toPHP(MongoDB\BSON\fromJSON('{ "dbref": {"$dbPointer": {"$ref": "phongo.test", "$id" : { "$oid" : "5a2e78accd485d55b4052222" } }} }'));
+$jsonAAAA = MongoDB\BSON\toPHP(MongoDB\BSON\fromJSON('{ "dbref": {"$dbPointer": {"$ref": "phongo.aaaa", "$id" : { "$oid" : "5a2e78accd485d55b4051111" } }} }'));
+$jsonZZZZ = MongoDB\BSON\toPHP(MongoDB\BSON\fromJSON('{ "dbref": {"$dbPointer": {"$ref": "phongo.zzzz", "$id" : { "$oid" : "5a2e78accd485d55b4051111" } }} }'));
+
+var_dump($jsonTest1a == $jsonTest1b);
+var_dump($jsonTest0 < $jsonTest1b);
+var_dump($jsonTest2 > $jsonTest1b);
+var_dump($jsonAAAA < $jsonTest1b);
+var_dump($jsonZZZZ > $jsonTest1b);
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECT--
+bool(true)
+bool(true)
+bool(true)
+bool(true)
+bool(true)
+===DONE===
diff --git a/mongodb-1.4.2/tests/bson/bson-dbpointer-get_properties-001.phpt b/mongodb-1.4.2/tests/bson/bson-dbpointer-get_properties-001.phpt
new file mode 100644
index 00000000..c790222d
--- /dev/null
+++ b/mongodb-1.4.2/tests/bson/bson-dbpointer-get_properties-001.phpt
@@ -0,0 +1,21 @@
+--TEST--
+MongoDB\BSON\DBPointer get_properties handler (get_object_vars)
+--FILE--
+<?php
+
+$document = MongoDB\BSON\toPHP(MongoDB\BSON\fromJSON('{"dbptr":{"$dbPointer":{"$ref":"phongo.test","$id":{"$oid":"5a2e78accd485d55b405ac12"}}}}'));
+$dbptr = $document->dbptr;
+
+var_dump(get_object_vars($dbptr));
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECT--
+array(2) {
+ ["ref"]=>
+ string(11) "phongo.test"
+ ["id"]=>
+ string(24) "5a2e78accd485d55b405ac12"
+}
+===DONE===
diff --git a/mongodb-1.4.2/tests/bson/bson-dbpointer-get_properties-002.phpt b/mongodb-1.4.2/tests/bson/bson-dbpointer-get_properties-002.phpt
new file mode 100644
index 00000000..fad60d95
--- /dev/null
+++ b/mongodb-1.4.2/tests/bson/bson-dbpointer-get_properties-002.phpt
@@ -0,0 +1,22 @@
+--TEST--
+MongoDB\BSON\DBPointer get_properties handler (foreach)
+--FILE--
+<?php
+
+$document = MongoDB\BSON\toPHP(MongoDB\BSON\fromJSON('{"dbptr":{"$dbPointer":{"$ref":"phongo.test","$id":{"$oid":"5a2e78accd485d55b405ac12"}}}}'));
+$dbptr = $document->dbptr;
+
+foreach ($dbptr as $key => $value) {
+ var_dump($key);
+ var_dump($value);
+}
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECT--
+string(3) "ref"
+string(11) "phongo.test"
+string(2) "id"
+string(24) "5a2e78accd485d55b405ac12"
+===DONE===
diff --git a/mongodb-1.4.2/tests/bson/bson-dbpointer-jsonserialize-001.phpt b/mongodb-1.4.2/tests/bson/bson-dbpointer-jsonserialize-001.phpt
new file mode 100644
index 00000000..c10679e5
--- /dev/null
+++ b/mongodb-1.4.2/tests/bson/bson-dbpointer-jsonserialize-001.phpt
@@ -0,0 +1,25 @@
+--TEST--
+MongoDB\BSON\DBPointer::jsonSerialize() return value
+--FILE--
+<?php
+
+$dbref = MongoDB\BSON\toPHP(MongoDB\BSON\fromJSON('{ "dbref": {"$dbPointer": {"$ref": "phongo.test", "$id" : { "$oid" : "5a2e78accd485d55b4050000" } }} }'))->dbref;
+var_dump($dbref->jsonSerialize());
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECT--
+array(1) {
+ ["$dbPointer"]=>
+ array(2) {
+ ["$ref"]=>
+ string(11) "phongo.test"
+ ["$id"]=>
+ array(1) {
+ ["$oid"]=>
+ string(24) "5a2e78accd485d55b4050000"
+ }
+ }
+}
+===DONE===
diff --git a/mongodb-1.4.2/tests/bson/bson-dbpointer-jsonserialize-003.phpt b/mongodb-1.4.2/tests/bson/bson-dbpointer-jsonserialize-003.phpt
new file mode 100644
index 00000000..0c6f3ea6
--- /dev/null
+++ b/mongodb-1.4.2/tests/bson/bson-dbpointer-jsonserialize-003.phpt
@@ -0,0 +1,30 @@
+--TEST--
+MongoDB\BSON\DBPointer::jsonSerialize() with json_encode()
+--FILE--
+<?php
+
+require_once __DIR__ . '/../utils/tools.php';
+
+$doc = MongoDB\BSON\toPHP(MongoDB\BSON\fromJSON('{ "foo": {"$dbPointer": {"$ref": "phongo.test", "$id" : { "$oid" : "5a2e78accd485d55b4050000" } }} }'));
+$json = json_encode($doc);
+
+echo toJSON(fromPHP($doc)), "\n";
+echo $json, "\n";
+var_dump(toPHP(fromJSON($json)));
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECTF--
+{ "foo" : { "$ref" : "phongo.test", "$id" : "5a2e78accd485d55b4050000" } }
+{"foo":{"$dbPointer":{"$ref":"phongo.test","$id":{"$oid":"5a2e78accd485d55b4050000"}}}}
+object(stdClass)#%d (%d) {
+ ["foo"]=>
+ object(MongoDB\BSON\DBPointer)#%d (%d) {
+ ["ref"]=>
+ string(11) "phongo.test"
+ ["id"]=>
+ string(24) "5a2e78accd485d55b4050000"
+ }
+}
+===DONE===
diff --git a/mongodb-1.4.2/tests/bson/bson-dbpointer-serialization-001.phpt b/mongodb-1.4.2/tests/bson/bson-dbpointer-serialization-001.phpt
new file mode 100644
index 00000000..304ebafd
--- /dev/null
+++ b/mongodb-1.4.2/tests/bson/bson-dbpointer-serialization-001.phpt
@@ -0,0 +1,31 @@
+--TEST--
+MongoDB\BSON\DBPointer serialization
+--FILE--
+<?php
+
+$test = MongoDB\BSON\toPHP(MongoDB\BSON\fromJSON('{ "dbref": {"$dbPointer": {"$ref": "phongo.test", "$id" : { "$oid" : "5a2e78accd485d55b4050000" } }} }'))->dbref;
+
+var_dump($test);
+var_dump($s = serialize($test));
+var_dump(unserialize($s));
+echo "\n";
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECTF--
+object(MongoDB\BSON\DBPointer)#1 (2) {
+ ["ref"]=>
+ string(11) "phongo.test"
+ ["id"]=>
+ string(24) "5a2e78accd485d55b4050000"
+}
+string(111) "C:22:"MongoDB\BSON\DBPointer":76:{a:2:{s:3:"ref";s:11:"phongo.test";s:2:"id";s:24:"5a2e78accd485d55b4050000";}}"
+object(MongoDB\BSON\DBPointer)#2 (2) {
+ ["ref"]=>
+ string(11) "phongo.test"
+ ["id"]=>
+ string(24) "5a2e78accd485d55b4050000"
+}
+
+===DONE===
diff --git a/mongodb-1.3.4/tests/bson/bson-binary_error-003.phpt b/mongodb-1.4.2/tests/bson/bson-dbpointer-serialization_error-001.phpt
similarity index 50%
rename from mongodb-1.3.4/tests/bson/bson-binary_error-003.phpt
rename to mongodb-1.4.2/tests/bson/bson-dbpointer-serialization_error-001.phpt
index e0f0e30b..324b5a64 100644
--- a/mongodb-1.3.4/tests/bson/bson-binary_error-003.phpt
+++ b/mongodb-1.4.2/tests/bson/bson-dbpointer-serialization_error-001.phpt
@@ -1,24 +1,25 @@
--TEST--
-MongoDB\BSON\Binary constructor requires unsigned 8-bit integer for type
+MongoDB\BSON\DBPointer unserialization requires "ref" and "id" string fields
--FILE--
<?php
require_once __DIR__ . '/../utils/tools.php';
+
echo throws(function() {
- new MongoDB\BSON\Binary('foo', -1);
+ unserialize('C:22:"MongoDB\BSON\DBPointer":35:{a:1:{s:3:"ref";s:11:"phongo.test";}}');
}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
echo throws(function() {
- new MongoDB\BSON\Binary('foo', 256);
+ unserialize('C:22:"MongoDB\BSON\DBPointer":34:{a:1:{s:2:"id";s:11:"phongo.test";}}');
}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
?>
===DONE===
<?php exit(0); ?>
--EXPECT--
OK: Got MongoDB\Driver\Exception\InvalidArgumentException
-Expected type to be an unsigned 8-bit integer, -1 given
+MongoDB\BSON\DBPointer initialization requires "ref" and "id" string fields
OK: Got MongoDB\Driver\Exception\InvalidArgumentException
-Expected type to be an unsigned 8-bit integer, 256 given
+MongoDB\BSON\DBPointer initialization requires "ref" and "id" string fields
===DONE===
diff --git a/mongodb-1.4.2/tests/bson/bson-dbpointer-serialization_error-002.phpt b/mongodb-1.4.2/tests/bson/bson-dbpointer-serialization_error-002.phpt
new file mode 100644
index 00000000..c741e4a2
--- /dev/null
+++ b/mongodb-1.4.2/tests/bson/bson-dbpointer-serialization_error-002.phpt
@@ -0,0 +1,25 @@
+--TEST--
+MongoDB\BSON\DBPointer unserialization requires "id" string field to be valid
+--FILE--
+<?php
+
+require_once __DIR__ . '/../utils/tools.php';
+
+
+echo throws(function() {
+ unserialize('C:22:"MongoDB\BSON\DBPointer":76:{a:2:{s:3:"ref";s:11:"phongo.test";s:2:"id";s:24:"QQQQ78accd485d55b4050000";}}');
+}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
+
+echo throws(function() {
+ unserialize('C:22:"MongoDB\BSON\DBPointer":75:{a:2:{s:3:"ref";s:11:"phongo.test";s:2:"id";s:23:"52e78accd485d55b4050000";}}');
+}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECT--
+OK: Got MongoDB\Driver\Exception\InvalidArgumentException
+Error parsing ObjectId string: QQQQ78accd485d55b4050000
+OK: Got MongoDB\Driver\Exception\InvalidArgumentException
+Error parsing ObjectId string: 52e78accd485d55b4050000
+===DONE===
diff --git a/mongodb-1.4.2/tests/bson/bson-dbpointer-tostring-001.phpt b/mongodb-1.4.2/tests/bson/bson-dbpointer-tostring-001.phpt
new file mode 100644
index 00000000..019150a8
--- /dev/null
+++ b/mongodb-1.4.2/tests/bson/bson-dbpointer-tostring-001.phpt
@@ -0,0 +1,14 @@
+--TEST--
+MongoDB\BSON\DBPointer::__toString()
+--FILE--
+<?php
+
+$dbref = MongoDB\BSON\toPHP(MongoDB\BSON\fromJSON('{ "dbref": {"$dbPointer": {"$ref": "phongo.test", "$id" : { "$oid" : "5a2e78accd485d55b4050000" } }} }'))->dbref;
+var_dump((string) $dbref);
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECT--
+string(38) "[phongo.test/5a2e78accd485d55b4050000]"
+===DONE===
diff --git a/mongodb-1.4.2/tests/bson/bson-dbpointer_error-002.phpt b/mongodb-1.4.2/tests/bson/bson-dbpointer_error-002.phpt
new file mode 100644
index 00000000..c563c44f
--- /dev/null
+++ b/mongodb-1.4.2/tests/bson/bson-dbpointer_error-002.phpt
@@ -0,0 +1,12 @@
+--TEST--
+MongoDB\BSON\DBPointer cannot be extended
+--FILE--
+<?php
+
+class MyDBPointer extends MongoDB\BSON\DBPointer {}
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECTF--
+Fatal error: Class MyDBPointer may not inherit from final class (MongoDB\BSON\DBPointer) in %s on line %d
diff --git a/mongodb-1.3.4/tests/bson/bson-decimal128-001.phpt b/mongodb-1.4.2/tests/bson/bson-decimal128-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-decimal128-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-decimal128-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-decimal128-002.phpt b/mongodb-1.4.2/tests/bson/bson-decimal128-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-decimal128-002.phpt
rename to mongodb-1.4.2/tests/bson/bson-decimal128-002.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-decimal128-003.phpt b/mongodb-1.4.2/tests/bson/bson-decimal128-003.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-decimal128-003.phpt
rename to mongodb-1.4.2/tests/bson/bson-decimal128-003.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-decimal128-004.phpt b/mongodb-1.4.2/tests/bson/bson-decimal128-004.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-decimal128-004.phpt
rename to mongodb-1.4.2/tests/bson/bson-decimal128-004.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-decimal128-jsonserialize-001.phpt b/mongodb-1.4.2/tests/bson/bson-decimal128-get_properties-001.phpt
similarity index 51%
copy from mongodb-1.3.4/tests/bson/bson-decimal128-jsonserialize-001.phpt
copy to mongodb-1.4.2/tests/bson/bson-decimal128-get_properties-001.phpt
index d08037be..3155a94d 100644
--- a/mongodb-1.3.4/tests/bson/bson-decimal128-jsonserialize-001.phpt
+++ b/mongodb-1.4.2/tests/bson/bson-decimal128-get_properties-001.phpt
@@ -1,19 +1,20 @@
--TEST--
-MongoDB\BSON\Decimal128::jsonSerialize() return value
+MongoDB\BSON\Decimal128 get_properties handler (get_object_vars)
--SKIPIF--
<?php if (!class_exists('MongoDB\BSON\Decimal128')) { die('skip MongoDB\BSON\Decimal128 is not available'); } ?>
--FILE--
<?php
-$decimal = new MongoDB\BSON\Decimal128('12389719287312');
-var_dump($decimal->jsonSerialize());
+$decimal = new MongoDB\BSON\Decimal128('1234.5678');
+
+var_dump(get_object_vars($decimal));
?>
===DONE===
<?php exit(0); ?>
--EXPECT--
array(1) {
- ["$numberDecimal"]=>
- string(14) "12389719287312"
+ ["dec"]=>
+ string(9) "1234.5678"
}
===DONE===
diff --git a/mongodb-1.4.2/tests/bson/bson-decimal128-get_properties-002.phpt b/mongodb-1.4.2/tests/bson/bson-decimal128-get_properties-002.phpt
new file mode 100644
index 00000000..e1f7ce9e
--- /dev/null
+++ b/mongodb-1.4.2/tests/bson/bson-decimal128-get_properties-002.phpt
@@ -0,0 +1,21 @@
+--TEST--
+MongoDB\BSON\Decimal128 get_properties handler (foreach)
+--SKIPIF--
+<?php if (!class_exists('MongoDB\BSON\Decimal128')) { die('skip MongoDB\BSON\Decimal128 is not available'); } ?>
+--FILE--
+<?php
+
+$decimal = new MongoDB\BSON\Decimal128('1234.5678');
+
+foreach ($decimal as $key => $value) {
+ var_dump($key);
+ var_dump($value);
+}
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECT--
+string(3) "dec"
+string(9) "1234.5678"
+===DONE===
diff --git a/mongodb-1.3.4/tests/bson/bson-decimal128-jsonserialize-001.phpt b/mongodb-1.4.2/tests/bson/bson-decimal128-jsonserialize-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-decimal128-jsonserialize-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-decimal128-jsonserialize-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-decimal128-jsonserialize-002.phpt b/mongodb-1.4.2/tests/bson/bson-decimal128-jsonserialize-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-decimal128-jsonserialize-002.phpt
rename to mongodb-1.4.2/tests/bson/bson-decimal128-jsonserialize-002.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-decimal128-serialization-001.phpt b/mongodb-1.4.2/tests/bson/bson-decimal128-serialization-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-decimal128-serialization-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-decimal128-serialization-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-decimal128-serialization_error-001.phpt b/mongodb-1.4.2/tests/bson/bson-decimal128-serialization_error-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-decimal128-serialization_error-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-decimal128-serialization_error-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-decimal128-serialization_error-002.phpt b/mongodb-1.4.2/tests/bson/bson-decimal128-serialization_error-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-decimal128-serialization_error-002.phpt
rename to mongodb-1.4.2/tests/bson/bson-decimal128-serialization_error-002.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-decimal128-set_state-001.phpt b/mongodb-1.4.2/tests/bson/bson-decimal128-set_state-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-decimal128-set_state-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-decimal128-set_state-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-decimal128-set_state_error-001.phpt b/mongodb-1.4.2/tests/bson/bson-decimal128-set_state_error-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-decimal128-set_state_error-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-decimal128-set_state_error-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-decimal128-set_state_error-002.phpt b/mongodb-1.4.2/tests/bson/bson-decimal128-set_state_error-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-decimal128-set_state_error-002.phpt
rename to mongodb-1.4.2/tests/bson/bson-decimal128-set_state_error-002.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-decimal128_error-001.phpt b/mongodb-1.4.2/tests/bson/bson-decimal128_error-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-decimal128_error-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-decimal128_error-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-decimal128_error-002.phpt b/mongodb-1.4.2/tests/bson/bson-decimal128_error-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-decimal128_error-002.phpt
rename to mongodb-1.4.2/tests/bson/bson-decimal128_error-002.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-decimal128interface-001.phpt b/mongodb-1.4.2/tests/bson/bson-decimal128interface-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-decimal128interface-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-decimal128interface-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-decode-001.phpt b/mongodb-1.4.2/tests/bson/bson-decode-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-decode-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-decode-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-decode-002.phpt b/mongodb-1.4.2/tests/bson/bson-decode-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-decode-002.phpt
rename to mongodb-1.4.2/tests/bson/bson-decode-002.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-encode-001.phpt b/mongodb-1.4.2/tests/bson/bson-encode-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-encode-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-encode-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-encode-002.phpt b/mongodb-1.4.2/tests/bson/bson-encode-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-encode-002.phpt
rename to mongodb-1.4.2/tests/bson/bson-encode-002.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-encode-003.phpt b/mongodb-1.4.2/tests/bson/bson-encode-003.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-encode-003.phpt
rename to mongodb-1.4.2/tests/bson/bson-encode-003.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-encode-004.phpt b/mongodb-1.4.2/tests/bson/bson-encode-004.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-encode-004.phpt
rename to mongodb-1.4.2/tests/bson/bson-encode-004.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-encode-005.phpt b/mongodb-1.4.2/tests/bson/bson-encode-005.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-encode-005.phpt
rename to mongodb-1.4.2/tests/bson/bson-encode-005.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-fromJSON-001.phpt b/mongodb-1.4.2/tests/bson/bson-fromJSON-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-fromJSON-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-fromJSON-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-fromJSON-002.phpt b/mongodb-1.4.2/tests/bson/bson-fromJSON-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-fromJSON-002.phpt
rename to mongodb-1.4.2/tests/bson/bson-fromJSON-002.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-fromJSON_error-001.phpt b/mongodb-1.4.2/tests/bson/bson-fromJSON_error-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-fromJSON_error-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-fromJSON_error-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-fromPHP-001.phpt b/mongodb-1.4.2/tests/bson/bson-fromPHP-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-fromPHP-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-fromPHP-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-fromPHP-002.phpt b/mongodb-1.4.2/tests/bson/bson-fromPHP-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-fromPHP-002.phpt
rename to mongodb-1.4.2/tests/bson/bson-fromPHP-002.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-fromPHP-003.phpt b/mongodb-1.4.2/tests/bson/bson-fromPHP-003.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-fromPHP-003.phpt
rename to mongodb-1.4.2/tests/bson/bson-fromPHP-003.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-fromPHP-005.phpt b/mongodb-1.4.2/tests/bson/bson-fromPHP-005.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-fromPHP-005.phpt
rename to mongodb-1.4.2/tests/bson/bson-fromPHP-005.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-fromPHP-006.phpt b/mongodb-1.4.2/tests/bson/bson-fromPHP-006.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-fromPHP-006.phpt
rename to mongodb-1.4.2/tests/bson/bson-fromPHP-006.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-fromPHP_error-001.phpt b/mongodb-1.4.2/tests/bson/bson-fromPHP_error-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-fromPHP_error-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-fromPHP_error-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-fromPHP_error-002.phpt b/mongodb-1.4.2/tests/bson/bson-fromPHP_error-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-fromPHP_error-002.phpt
rename to mongodb-1.4.2/tests/bson/bson-fromPHP_error-002.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-fromPHP_error-003.phpt b/mongodb-1.4.2/tests/bson/bson-fromPHP_error-003.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-fromPHP_error-003.phpt
rename to mongodb-1.4.2/tests/bson/bson-fromPHP_error-003.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-fromPHP_error-004.phpt b/mongodb-1.4.2/tests/bson/bson-fromPHP_error-004.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-fromPHP_error-004.phpt
rename to mongodb-1.4.2/tests/bson/bson-fromPHP_error-004.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-fromPHP_error-005.phpt b/mongodb-1.4.2/tests/bson/bson-fromPHP_error-005.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-fromPHP_error-005.phpt
rename to mongodb-1.4.2/tests/bson/bson-fromPHP_error-005.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-fromPHP_error-006.phpt b/mongodb-1.4.2/tests/bson/bson-fromPHP_error-006.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-fromPHP_error-006.phpt
rename to mongodb-1.4.2/tests/bson/bson-fromPHP_error-006.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-fromPHP_error-007.phpt b/mongodb-1.4.2/tests/bson/bson-fromPHP_error-007.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-fromPHP_error-007.phpt
rename to mongodb-1.4.2/tests/bson/bson-fromPHP_error-007.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-generate-document-id.phpt b/mongodb-1.4.2/tests/bson/bson-generate-document-id.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-generate-document-id.phpt
rename to mongodb-1.4.2/tests/bson/bson-generate-document-id.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-javascript-001.phpt b/mongodb-1.4.2/tests/bson/bson-javascript-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-javascript-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-javascript-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-javascript-002.phpt b/mongodb-1.4.2/tests/bson/bson-javascript-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-javascript-002.phpt
rename to mongodb-1.4.2/tests/bson/bson-javascript-002.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-javascript-compare-001.phpt b/mongodb-1.4.2/tests/bson/bson-javascript-compare-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-javascript-compare-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-javascript-compare-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-javascript-compare-002.phpt b/mongodb-1.4.2/tests/bson/bson-javascript-compare-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-javascript-compare-002.phpt
rename to mongodb-1.4.2/tests/bson/bson-javascript-compare-002.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-javascript-getCode-001.phpt b/mongodb-1.4.2/tests/bson/bson-javascript-getCode-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-javascript-getCode-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-javascript-getCode-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-javascript-getScope-001.phpt b/mongodb-1.4.2/tests/bson/bson-javascript-getScope-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-javascript-getScope-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-javascript-getScope-001.phpt
diff --git a/mongodb-1.4.2/tests/bson/bson-javascript-get_properties-001.phpt b/mongodb-1.4.2/tests/bson/bson-javascript-get_properties-001.phpt
new file mode 100644
index 00000000..f37c22a5
--- /dev/null
+++ b/mongodb-1.4.2/tests/bson/bson-javascript-get_properties-001.phpt
@@ -0,0 +1,34 @@
+--TEST--
+MongoDB\BSON\Javascript get_properties handler (get_object_vars)
+--FILE--
+<?php
+
+$tests = [
+ new MongoDB\BSON\Javascript('function foo(bar) { return bar; }'),
+ new MongoDB\BSON\Javascript('function foo() { return bar; }', ['bar' => 42]),
+];
+
+foreach ($tests as $test) {
+ var_dump(get_object_vars($test));
+}
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECTF--
+array(2) {
+ ["code"]=>
+ string(33) "function foo(bar) { return bar; }"
+ ["scope"]=>
+ NULL
+}
+array(2) {
+ ["code"]=>
+ string(30) "function foo() { return bar; }"
+ ["scope"]=>
+ object(stdClass)#%d (%d) {
+ ["bar"]=>
+ int(42)
+ }
+}
+===DONE===
diff --git a/mongodb-1.4.2/tests/bson/bson-javascript-get_properties-002.phpt b/mongodb-1.4.2/tests/bson/bson-javascript-get_properties-002.phpt
new file mode 100644
index 00000000..84e5196e
--- /dev/null
+++ b/mongodb-1.4.2/tests/bson/bson-javascript-get_properties-002.phpt
@@ -0,0 +1,33 @@
+--TEST--
+MongoDB\BSON\Javascript get_properties handler (foreach)
+--FILE--
+<?php
+
+$tests = [
+ new MongoDB\BSON\Javascript('function foo(bar) { return bar; }'),
+ new MongoDB\BSON\Javascript('function foo() { return bar; }', ['bar' => 42]),
+];
+
+foreach ($tests as $test) {
+ foreach ($test as $key => $value) {
+ var_dump($key);
+ var_dump($value);
+ }
+}
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECTF--
+string(4) "code"
+string(33) "function foo(bar) { return bar; }"
+string(5) "scope"
+NULL
+string(4) "code"
+string(30) "function foo() { return bar; }"
+string(5) "scope"
+object(stdClass)#%d (%d) {
+ ["bar"]=>
+ int(42)
+}
+===DONE===
diff --git a/mongodb-1.3.4/tests/bson/bson-javascript-jsonserialize-001.phpt b/mongodb-1.4.2/tests/bson/bson-javascript-jsonserialize-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-javascript-jsonserialize-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-javascript-jsonserialize-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-javascript-jsonserialize-002.phpt b/mongodb-1.4.2/tests/bson/bson-javascript-jsonserialize-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-javascript-jsonserialize-002.phpt
rename to mongodb-1.4.2/tests/bson/bson-javascript-jsonserialize-002.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-javascript-jsonserialize-003.phpt b/mongodb-1.4.2/tests/bson/bson-javascript-jsonserialize-003.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-javascript-jsonserialize-003.phpt
rename to mongodb-1.4.2/tests/bson/bson-javascript-jsonserialize-003.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-javascript-jsonserialize-004.phpt b/mongodb-1.4.2/tests/bson/bson-javascript-jsonserialize-004.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-javascript-jsonserialize-004.phpt
rename to mongodb-1.4.2/tests/bson/bson-javascript-jsonserialize-004.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-javascript-serialization-001.phpt b/mongodb-1.4.2/tests/bson/bson-javascript-serialization-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-javascript-serialization-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-javascript-serialization-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-javascript-serialization_error-001.phpt b/mongodb-1.4.2/tests/bson/bson-javascript-serialization_error-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-javascript-serialization_error-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-javascript-serialization_error-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-javascript-serialization_error-002.phpt b/mongodb-1.4.2/tests/bson/bson-javascript-serialization_error-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-javascript-serialization_error-002.phpt
rename to mongodb-1.4.2/tests/bson/bson-javascript-serialization_error-002.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-javascript-serialization_error-003.phpt b/mongodb-1.4.2/tests/bson/bson-javascript-serialization_error-003.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-javascript-serialization_error-003.phpt
rename to mongodb-1.4.2/tests/bson/bson-javascript-serialization_error-003.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-javascript-set_state-001.phpt b/mongodb-1.4.2/tests/bson/bson-javascript-set_state-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-javascript-set_state-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-javascript-set_state-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-javascript-set_state_error-001.phpt b/mongodb-1.4.2/tests/bson/bson-javascript-set_state_error-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-javascript-set_state_error-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-javascript-set_state_error-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-javascript-set_state_error-002.phpt b/mongodb-1.4.2/tests/bson/bson-javascript-set_state_error-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-javascript-set_state_error-002.phpt
rename to mongodb-1.4.2/tests/bson/bson-javascript-set_state_error-002.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-javascript-set_state_error-003.phpt b/mongodb-1.4.2/tests/bson/bson-javascript-set_state_error-003.phpt
similarity index 100%
copy from mongodb-1.3.4/tests/bson/bson-javascript-set_state_error-003.phpt
copy to mongodb-1.4.2/tests/bson/bson-javascript-set_state_error-003.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-javascript-tostring-001.phpt b/mongodb-1.4.2/tests/bson/bson-javascript-tostring-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-javascript-tostring-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-javascript-tostring-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-javascript_error-001.phpt b/mongodb-1.4.2/tests/bson/bson-javascript_error-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-javascript_error-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-javascript_error-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-javascript_error-002.phpt b/mongodb-1.4.2/tests/bson/bson-javascript_error-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-javascript_error-002.phpt
rename to mongodb-1.4.2/tests/bson/bson-javascript_error-002.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-javascript_error-003.phpt b/mongodb-1.4.2/tests/bson/bson-javascript_error-003.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-javascript_error-003.phpt
rename to mongodb-1.4.2/tests/bson/bson-javascript_error-003.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-javascriptinterface-001.phpt b/mongodb-1.4.2/tests/bson/bson-javascriptinterface-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-javascriptinterface-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-javascriptinterface-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-maxkey-001.phpt b/mongodb-1.4.2/tests/bson/bson-maxkey-001.phpt
similarity index 100%
copy from mongodb-1.3.4/tests/bson/bson-maxkey-001.phpt
copy to mongodb-1.4.2/tests/bson/bson-maxkey-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-maxkey-compare-001.phpt b/mongodb-1.4.2/tests/bson/bson-maxkey-compare-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-maxkey-compare-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-maxkey-compare-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-maxkey-jsonserialize-001.phpt b/mongodb-1.4.2/tests/bson/bson-maxkey-jsonserialize-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-maxkey-jsonserialize-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-maxkey-jsonserialize-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-maxkey-jsonserialize-002.phpt b/mongodb-1.4.2/tests/bson/bson-maxkey-jsonserialize-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-maxkey-jsonserialize-002.phpt
rename to mongodb-1.4.2/tests/bson/bson-maxkey-jsonserialize-002.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-maxkey-serialization-001.phpt b/mongodb-1.4.2/tests/bson/bson-maxkey-serialization-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-maxkey-serialization-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-maxkey-serialization-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-maxkey-set_state-001.phpt b/mongodb-1.4.2/tests/bson/bson-maxkey-set_state-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-maxkey-set_state-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-maxkey-set_state-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-maxkey_error-001.phpt b/mongodb-1.4.2/tests/bson/bson-maxkey_error-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-maxkey_error-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-maxkey_error-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-maxkeyinterface-001.phpt b/mongodb-1.4.2/tests/bson/bson-maxkeyinterface-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-maxkeyinterface-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-maxkeyinterface-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-minkey-001.phpt b/mongodb-1.4.2/tests/bson/bson-minkey-001.phpt
similarity index 100%
copy from mongodb-1.3.4/tests/bson/bson-minkey-001.phpt
copy to mongodb-1.4.2/tests/bson/bson-minkey-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-minkey-compare-001.phpt b/mongodb-1.4.2/tests/bson/bson-minkey-compare-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-minkey-compare-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-minkey-compare-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-minkey-jsonserialize-001.phpt b/mongodb-1.4.2/tests/bson/bson-minkey-jsonserialize-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-minkey-jsonserialize-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-minkey-jsonserialize-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-minkey-jsonserialize-002.phpt b/mongodb-1.4.2/tests/bson/bson-minkey-jsonserialize-002.phpt
similarity index 100%
copy from mongodb-1.3.4/tests/bson/bson-minkey-jsonserialize-002.phpt
copy to mongodb-1.4.2/tests/bson/bson-minkey-jsonserialize-002.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-minkey-serialization-001.phpt b/mongodb-1.4.2/tests/bson/bson-minkey-serialization-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-minkey-serialization-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-minkey-serialization-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-minkey-set_state-001.phpt b/mongodb-1.4.2/tests/bson/bson-minkey-set_state-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-minkey-set_state-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-minkey-set_state-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-minkey_error-001.phpt b/mongodb-1.4.2/tests/bson/bson-minkey_error-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-minkey_error-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-minkey_error-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-minkeyinterface-001.phpt b/mongodb-1.4.2/tests/bson/bson-minkeyinterface-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-minkeyinterface-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-minkeyinterface-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-objectid-001.phpt b/mongodb-1.4.2/tests/bson/bson-objectid-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-objectid-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-objectid-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-objectid-002.phpt b/mongodb-1.4.2/tests/bson/bson-objectid-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-objectid-002.phpt
rename to mongodb-1.4.2/tests/bson/bson-objectid-002.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-objectid-003.phpt b/mongodb-1.4.2/tests/bson/bson-objectid-003.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-objectid-003.phpt
rename to mongodb-1.4.2/tests/bson/bson-objectid-003.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-objectid-004.phpt b/mongodb-1.4.2/tests/bson/bson-objectid-004.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-objectid-004.phpt
rename to mongodb-1.4.2/tests/bson/bson-objectid-004.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-objectid-compare-001.phpt b/mongodb-1.4.2/tests/bson/bson-objectid-compare-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-objectid-compare-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-objectid-compare-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-objectid-compare-002.phpt b/mongodb-1.4.2/tests/bson/bson-objectid-compare-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-objectid-compare-002.phpt
rename to mongodb-1.4.2/tests/bson/bson-objectid-compare-002.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-objectid-getTimestamp-001.phpt b/mongodb-1.4.2/tests/bson/bson-objectid-getTimestamp-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-objectid-getTimestamp-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-objectid-getTimestamp-001.phpt
diff --git a/mongodb-1.4.2/tests/bson/bson-objectid-get_properties-001.phpt b/mongodb-1.4.2/tests/bson/bson-objectid-get_properties-001.phpt
new file mode 100644
index 00000000..b201c540
--- /dev/null
+++ b/mongodb-1.4.2/tests/bson/bson-objectid-get_properties-001.phpt
@@ -0,0 +1,18 @@
+--TEST--
+MongoDB\BSON\ObjectId get_properties handler (get_object_vars)
+--FILE--
+<?php
+
+$oid = new MongoDB\BSON\ObjectId('53e2a1c40640fd72175d4603');
+
+var_dump(get_object_vars($oid));
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECT--
+array(1) {
+ ["oid"]=>
+ string(24) "53e2a1c40640fd72175d4603"
+}
+===DONE===
diff --git a/mongodb-1.4.2/tests/bson/bson-objectid-get_properties-002.phpt b/mongodb-1.4.2/tests/bson/bson-objectid-get_properties-002.phpt
new file mode 100644
index 00000000..04ae6ea4
--- /dev/null
+++ b/mongodb-1.4.2/tests/bson/bson-objectid-get_properties-002.phpt
@@ -0,0 +1,19 @@
+--TEST--
+MongoDB\BSON\ObjectId get_properties handler (foreach)
+--FILE--
+<?php
+
+$oid = new MongoDB\BSON\ObjectId('53e2a1c40640fd72175d4603');
+
+foreach ($oid as $key => $value) {
+ var_dump($key);
+ var_dump($value);
+}
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECT--
+string(3) "oid"
+string(24) "53e2a1c40640fd72175d4603"
+===DONE===
diff --git a/mongodb-1.3.4/tests/bson/bson-objectid-jsonserialize-001.phpt b/mongodb-1.4.2/tests/bson/bson-objectid-jsonserialize-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-objectid-jsonserialize-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-objectid-jsonserialize-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-objectid-jsonserialize-002.phpt b/mongodb-1.4.2/tests/bson/bson-objectid-jsonserialize-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-objectid-jsonserialize-002.phpt
rename to mongodb-1.4.2/tests/bson/bson-objectid-jsonserialize-002.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-objectid-serialization-001.phpt b/mongodb-1.4.2/tests/bson/bson-objectid-serialization-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-objectid-serialization-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-objectid-serialization-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-objectid-serialization_error-001.phpt b/mongodb-1.4.2/tests/bson/bson-objectid-serialization_error-001.phpt
similarity index 100%
copy from mongodb-1.3.4/tests/bson/bson-objectid-serialization_error-001.phpt
copy to mongodb-1.4.2/tests/bson/bson-objectid-serialization_error-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-objectid-serialization_error-002.phpt b/mongodb-1.4.2/tests/bson/bson-objectid-serialization_error-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-objectid-serialization_error-002.phpt
rename to mongodb-1.4.2/tests/bson/bson-objectid-serialization_error-002.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-objectid-set_state-001.phpt b/mongodb-1.4.2/tests/bson/bson-objectid-set_state-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-objectid-set_state-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-objectid-set_state-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-objectid-set_state_error-001.phpt b/mongodb-1.4.2/tests/bson/bson-objectid-set_state_error-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-objectid-set_state_error-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-objectid-set_state_error-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-objectid-set_state_error-002.phpt b/mongodb-1.4.2/tests/bson/bson-objectid-set_state_error-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-objectid-set_state_error-002.phpt
rename to mongodb-1.4.2/tests/bson/bson-objectid-set_state_error-002.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-objectid_error-001.phpt b/mongodb-1.4.2/tests/bson/bson-objectid_error-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-objectid_error-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-objectid_error-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-objectid_error-002.phpt b/mongodb-1.4.2/tests/bson/bson-objectid_error-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-objectid_error-002.phpt
rename to mongodb-1.4.2/tests/bson/bson-objectid_error-002.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-objectid_error-003.phpt b/mongodb-1.4.2/tests/bson/bson-objectid_error-003.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-objectid_error-003.phpt
rename to mongodb-1.4.2/tests/bson/bson-objectid_error-003.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-objectidinterface-001.phpt b/mongodb-1.4.2/tests/bson/bson-objectidinterface-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-objectidinterface-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-objectidinterface-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-regex-001.phpt b/mongodb-1.4.2/tests/bson/bson-regex-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-regex-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-regex-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-regex-002.phpt b/mongodb-1.4.2/tests/bson/bson-regex-002.phpt
similarity index 100%
copy from mongodb-1.3.4/tests/bson/bson-regex-002.phpt
copy to mongodb-1.4.2/tests/bson/bson-regex-002.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-regex-003.phpt b/mongodb-1.4.2/tests/bson/bson-regex-003.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-regex-003.phpt
rename to mongodb-1.4.2/tests/bson/bson-regex-003.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-regex-004.phpt b/mongodb-1.4.2/tests/bson/bson-regex-004.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-regex-004.phpt
rename to mongodb-1.4.2/tests/bson/bson-regex-004.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-regex-005.phpt b/mongodb-1.4.2/tests/bson/bson-regex-005.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-regex-005.phpt
rename to mongodb-1.4.2/tests/bson/bson-regex-005.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-regex-compare-001.phpt b/mongodb-1.4.2/tests/bson/bson-regex-compare-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-regex-compare-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-regex-compare-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-regex-compare-002.phpt b/mongodb-1.4.2/tests/bson/bson-regex-compare-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-regex-compare-002.phpt
rename to mongodb-1.4.2/tests/bson/bson-regex-compare-002.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-regex-002.phpt b/mongodb-1.4.2/tests/bson/bson-regex-get_properties-001.phpt
similarity index 61%
rename from mongodb-1.3.4/tests/bson/bson-regex-002.phpt
rename to mongodb-1.4.2/tests/bson/bson-regex-get_properties-001.phpt
index a81dff8c..8f497336 100644
--- a/mongodb-1.3.4/tests/bson/bson-regex-002.phpt
+++ b/mongodb-1.4.2/tests/bson/bson-regex-get_properties-001.phpt
@@ -1,20 +1,20 @@
--TEST--
-MongoDB\BSON\Regex debug handler
+MongoDB\BSON\Regex get_properties handler (get_object_vars)
--FILE--
<?php
$regex = new MongoDB\BSON\Regex('regexp', 'i');
-var_dump($regex);
+var_dump(get_object_vars($regex));
?>
===DONE===
<?php exit(0); ?>
---EXPECTF--
-object(MongoDB\BSON\Regex)#%d (%d) {
+--EXPECT--
+array(2) {
["pattern"]=>
string(6) "regexp"
["flags"]=>
string(1) "i"
}
===DONE===
diff --git a/mongodb-1.4.2/tests/bson/bson-regex-get_properties-002.phpt b/mongodb-1.4.2/tests/bson/bson-regex-get_properties-002.phpt
new file mode 100644
index 00000000..66b0df4c
--- /dev/null
+++ b/mongodb-1.4.2/tests/bson/bson-regex-get_properties-002.phpt
@@ -0,0 +1,21 @@
+--TEST--
+MongoDB\BSON\Regex get_properties handler (foreach)
+--FILE--
+<?php
+
+$regex = new MongoDB\BSON\Regex('regexp', 'i');
+
+foreach ($regex as $key => $value) {
+ var_dump($key);
+ var_dump($value);
+}
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECT--
+string(7) "pattern"
+string(6) "regexp"
+string(5) "flags"
+string(1) "i"
+===DONE===
diff --git a/mongodb-1.3.4/tests/bson/bson-regex-jsonserialize-001.phpt b/mongodb-1.4.2/tests/bson/bson-regex-jsonserialize-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-regex-jsonserialize-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-regex-jsonserialize-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-regex-jsonserialize-002.phpt b/mongodb-1.4.2/tests/bson/bson-regex-jsonserialize-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-regex-jsonserialize-002.phpt
rename to mongodb-1.4.2/tests/bson/bson-regex-jsonserialize-002.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-regex-jsonserialize-003.phpt b/mongodb-1.4.2/tests/bson/bson-regex-jsonserialize-003.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-regex-jsonserialize-003.phpt
rename to mongodb-1.4.2/tests/bson/bson-regex-jsonserialize-003.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-regex-jsonserialize-004.phpt b/mongodb-1.4.2/tests/bson/bson-regex-jsonserialize-004.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-regex-jsonserialize-004.phpt
rename to mongodb-1.4.2/tests/bson/bson-regex-jsonserialize-004.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-regex-serialization-001.phpt b/mongodb-1.4.2/tests/bson/bson-regex-serialization-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-regex-serialization-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-regex-serialization-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-regex-serialization-002.phpt b/mongodb-1.4.2/tests/bson/bson-regex-serialization-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-regex-serialization-002.phpt
rename to mongodb-1.4.2/tests/bson/bson-regex-serialization-002.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-regex-serialization-003.phpt b/mongodb-1.4.2/tests/bson/bson-regex-serialization-003.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-regex-serialization-003.phpt
rename to mongodb-1.4.2/tests/bson/bson-regex-serialization-003.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-regex-serialization_error-001.phpt b/mongodb-1.4.2/tests/bson/bson-regex-serialization_error-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-regex-serialization_error-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-regex-serialization_error-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-regex-serialization_error-002.phpt b/mongodb-1.4.2/tests/bson/bson-regex-serialization_error-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-regex-serialization_error-002.phpt
rename to mongodb-1.4.2/tests/bson/bson-regex-serialization_error-002.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-regex-set_state-001.phpt b/mongodb-1.4.2/tests/bson/bson-regex-set_state-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-regex-set_state-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-regex-set_state-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-regex-set_state-002.phpt b/mongodb-1.4.2/tests/bson/bson-regex-set_state-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-regex-set_state-002.phpt
rename to mongodb-1.4.2/tests/bson/bson-regex-set_state-002.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-regex-set_state_error-001.phpt b/mongodb-1.4.2/tests/bson/bson-regex-set_state_error-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-regex-set_state_error-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-regex-set_state_error-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-regex-set_state_error-002.phpt b/mongodb-1.4.2/tests/bson/bson-regex-set_state_error-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-regex-set_state_error-002.phpt
rename to mongodb-1.4.2/tests/bson/bson-regex-set_state_error-002.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-regex_error-001.phpt b/mongodb-1.4.2/tests/bson/bson-regex_error-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-regex_error-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-regex_error-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-regex_error-002.phpt b/mongodb-1.4.2/tests/bson/bson-regex_error-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-regex_error-002.phpt
rename to mongodb-1.4.2/tests/bson/bson-regex_error-002.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-regex_error-003.phpt b/mongodb-1.4.2/tests/bson/bson-regex_error-003.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-regex_error-003.phpt
rename to mongodb-1.4.2/tests/bson/bson-regex_error-003.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-regexinterface-001.phpt b/mongodb-1.4.2/tests/bson/bson-regexinterface-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-regexinterface-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-regexinterface-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-minkey-001.phpt b/mongodb-1.4.2/tests/bson/bson-symbol-001.phpt
similarity index 58%
rename from mongodb-1.3.4/tests/bson/bson-minkey-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-symbol-001.phpt
index fde437d6..0ab7cb7d 100644
--- a/mongodb-1.3.4/tests/bson/bson-minkey-001.phpt
+++ b/mongodb-1.4.2/tests/bson/bson-symbol-001.phpt
@@ -1,29 +1,28 @@
--TEST--
-MongoDB\BSON\MinKey #001
+MongoDB\BSON\Symbol #001
--FILE--
<?php
require_once __DIR__ . '/../utils/tools.php';
-$minkey = new MongoDB\BSON\MinKey;
$tests = array(
- array("min" => $minkey),
+ MongoDB\BSON\toPHP(MongoDB\BSON\fromJSON('{ "symbol": {"$symbol": "test"} }')),
);
foreach($tests as $n => $test) {
$s = fromPHP($test);
echo "Test#{$n} ", $json = toJSON($s), "\n";
- $bson = fromJSON($json);
- $testagain = toPHP($bson);
+ $testagain = toPHP($s);
var_dump(toJSON(fromPHP($test)), toJSON(fromPHP($testagain)));
var_dump((object)$test == (object)$testagain);
}
+
?>
===DONE===
<?php exit(0); ?>
--EXPECT--
-Test#0 { "min" : { "$minKey" : 1 } }
-string(29) "{ "min" : { "$minKey" : 1 } }"
-string(29) "{ "min" : { "$minKey" : 1 } }"
+Test#0 { "symbol" : "test" }
+string(21) "{ "symbol" : "test" }"
+string(21) "{ "symbol" : "test" }"
bool(true)
===DONE===
diff --git a/mongodb-1.4.2/tests/bson/bson-symbol-compare-001.phpt b/mongodb-1.4.2/tests/bson/bson-symbol-compare-001.phpt
new file mode 100644
index 00000000..2a57dcd7
--- /dev/null
+++ b/mongodb-1.4.2/tests/bson/bson-symbol-compare-001.phpt
@@ -0,0 +1,17 @@
+--TEST--
+MongoDB\BSON\Symbol comparisons
+--FILE--
+<?php
+
+var_dump(MongoDB\BSON\toPHP(MongoDB\BSON\fromJSON('{ "symbol": {"$symbol": "val1"} }')) == MongoDB\BSON\toPHP(MongoDB\BSON\fromJSON('{ "symbol": {"$symbol": "val1"} }')));
+var_dump(MongoDB\BSON\toPHP(MongoDB\BSON\fromJSON('{ "symbol": {"$symbol": "val1"} }')) < MongoDB\BSON\toPHP(MongoDB\BSON\fromJSON('{ "symbol": {"$symbol": "val2"} }')));
+var_dump(MongoDB\BSON\toPHP(MongoDB\BSON\fromJSON('{ "symbol": {"$symbol": "val1"} }')) > MongoDB\BSON\toPHP(MongoDB\BSON\fromJSON('{ "symbol": {"$symbol": "val0"} }')));
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECT--
+bool(true)
+bool(true)
+bool(true)
+===DONE===
diff --git a/mongodb-1.4.2/tests/bson/bson-symbol-get_properties-001.phpt b/mongodb-1.4.2/tests/bson/bson-symbol-get_properties-001.phpt
new file mode 100644
index 00000000..24a30686
--- /dev/null
+++ b/mongodb-1.4.2/tests/bson/bson-symbol-get_properties-001.phpt
@@ -0,0 +1,19 @@
+--TEST--
+MongoDB\BSON\Symbol get_properties handler (get_object_vars)
+--FILE--
+<?php
+
+$document = MongoDB\BSON\toPHP(MongoDB\BSON\fromJSON('{"symbol":{"$symbol":"test"}}'));
+$symbol = $document->symbol;
+
+var_dump(get_object_vars($symbol));
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECT--
+array(1) {
+ ["symbol"]=>
+ string(4) "test"
+}
+===DONE===
diff --git a/mongodb-1.4.2/tests/bson/bson-symbol-get_properties-002.phpt b/mongodb-1.4.2/tests/bson/bson-symbol-get_properties-002.phpt
new file mode 100644
index 00000000..f55ef70a
--- /dev/null
+++ b/mongodb-1.4.2/tests/bson/bson-symbol-get_properties-002.phpt
@@ -0,0 +1,20 @@
+--TEST--
+MongoDB\BSON\Symbol get_properties handler (foreach)
+--FILE--
+<?php
+
+$document = MongoDB\BSON\toPHP(MongoDB\BSON\fromJSON('{"symbol":{"$symbol":"test"}}'));
+$symbol = $document->symbol;
+
+foreach ($symbol as $key => $value) {
+ var_dump($key);
+ var_dump($value);
+}
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECT--
+string(6) "symbol"
+string(4) "test"
+===DONE===
diff --git a/mongodb-1.4.2/tests/bson/bson-symbol-jsonserialize-001.phpt b/mongodb-1.4.2/tests/bson/bson-symbol-jsonserialize-001.phpt
new file mode 100644
index 00000000..9262beaf
--- /dev/null
+++ b/mongodb-1.4.2/tests/bson/bson-symbol-jsonserialize-001.phpt
@@ -0,0 +1,17 @@
+--TEST--
+MongoDB\BSON\Symbol::jsonSerialize() return value
+--FILE--
+<?php
+
+$js = MongoDB\BSON\toPHP(MongoDB\BSON\fromJSON('{ "symbol": {"$symbol": "valSymbol"} }'))->symbol;
+var_dump($js->jsonSerialize());
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECT--
+array(1) {
+ ["$symbol"]=>
+ string(9) "valSymbol"
+}
+===DONE===
diff --git a/mongodb-1.4.2/tests/bson/bson-symbol-jsonserialize-002.phpt b/mongodb-1.4.2/tests/bson/bson-symbol-jsonserialize-002.phpt
new file mode 100644
index 00000000..ffc0bd7e
--- /dev/null
+++ b/mongodb-1.4.2/tests/bson/bson-symbol-jsonserialize-002.phpt
@@ -0,0 +1,28 @@
+--TEST--
+MongoDB\BSON\Symbol::jsonSerialize() with json_encode()
+--FILE--
+<?php
+
+require_once __DIR__ . '/../utils/tools.php';
+
+$doc = MongoDB\BSON\toPHP(MongoDB\BSON\fromJSON('{ "foo": {"$symbol": "symbolValue"} }'));
+$json = json_encode($doc);
+
+echo toJSON(fromPHP($doc)), "\n";
+echo $json, "\n";
+var_dump(toPHP(fromJSON($json)));
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECTF--
+{ "foo" : "symbolValue" }
+{"foo":{"$symbol":"symbolValue"}}
+object(stdClass)#%d (%d) {
+ ["foo"]=>
+ object(MongoDB\BSON\Symbol)#%d (%d) {
+ ["symbol"]=>
+ string(11) "symbolValue"
+ }
+}
+===DONE===
diff --git a/mongodb-1.4.2/tests/bson/bson-symbol-serialization-001.phpt b/mongodb-1.4.2/tests/bson/bson-symbol-serialization-001.phpt
new file mode 100644
index 00000000..250cc466
--- /dev/null
+++ b/mongodb-1.4.2/tests/bson/bson-symbol-serialization-001.phpt
@@ -0,0 +1,25 @@
+--TEST--
+MongoDB\BSON\Symbol serialization
+--FILE--
+<?php
+
+$test = MongoDB\BSON\toPHP(MongoDB\BSON\fromJSON('{ "symbol": {"$symbol": "symbolValue"} }'))->symbol;
+
+var_dump($symbol = $test);
+var_dump($s = serialize($symbol));
+var_dump(unserialize($s));
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECTF--
+object(MongoDB\BSON\Symbol)#1 (1) {
+ ["symbol"]=>
+ string(11) "symbolValue"
+}
+string(70) "C:19:"MongoDB\BSON\Symbol":38:{a:1:{s:6:"symbol";s:11:"symbolValue";}}"
+object(MongoDB\BSON\Symbol)#2 (1) {
+ ["symbol"]=>
+ string(11) "symbolValue"
+}
+===DONE===
diff --git a/mongodb-1.3.4/tests/bson/bson-objectid-serialization_error-001.phpt b/mongodb-1.4.2/tests/bson/bson-symbol-serialization_error-001.phpt
similarity index 57%
rename from mongodb-1.3.4/tests/bson/bson-objectid-serialization_error-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-symbol-serialization_error-001.phpt
index e1154300..3822cdb2 100644
--- a/mongodb-1.3.4/tests/bson/bson-objectid-serialization_error-001.phpt
+++ b/mongodb-1.4.2/tests/bson/bson-symbol-serialization_error-001.phpt
@@ -1,18 +1,18 @@
--TEST--
-MongoDB\BSON\ObjectId unserialization requires "oid" string field
+MongoDB\BSON\Symbol unserialization requires "symbol" string field
--FILE--
<?php
require_once __DIR__ . '/../utils/tools.php';
echo throws(function() {
- unserialize('C:21:"MongoDB\BSON\ObjectId":20:{a:1:{s:3:"oid";i:0;}}');
+ unserialize('C:19:"MongoDB\BSON\Symbol":23:{a:1:{s:6:"symbol";i:0;}}');
}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
?>
===DONE===
<?php exit(0); ?>
--EXPECT--
OK: Got MongoDB\Driver\Exception\InvalidArgumentException
-MongoDB\BSON\ObjectId initialization requires "oid" string field
+MongoDB\BSON\Symbol initialization requires "symbol" string field
===DONE===
diff --git a/mongodb-1.3.4/tests/bson/bson-javascript-set_state_error-003.phpt b/mongodb-1.4.2/tests/bson/bson-symbol-serialization_error-002.phpt
similarity index 54%
rename from mongodb-1.3.4/tests/bson/bson-javascript-set_state_error-003.phpt
rename to mongodb-1.4.2/tests/bson/bson-symbol-serialization_error-002.phpt
index 27a3c6b7..647c3fd6 100644
--- a/mongodb-1.3.4/tests/bson/bson-javascript-set_state_error-003.phpt
+++ b/mongodb-1.4.2/tests/bson/bson-symbol-serialization_error-002.phpt
@@ -1,18 +1,18 @@
--TEST--
-MongoDB\BSON\Javascript::__set_state() does not allow code to contain null bytes
+MongoDB\BSON\Symbol unserialization does not allow code to contain null bytes
--FILE--
<?php
require_once __DIR__ . '/../utils/tools.php';
echo throws(function() {
- MongoDB\BSON\Javascript::__set_state(['code' => "function foo() { return '\0'; }"]);
+ unserialize('C:19:"MongoDB\BSON\Symbol":57:{a:1:{s:6:"symbol";s:30:"function foo() { return ' . "'\0'" . '; }";}}');
}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
?>
===DONE===
<?php exit(0); ?>
--EXPECT--
OK: Got MongoDB\Driver\Exception\InvalidArgumentException
-Code cannot contain null bytes
+Symbol cannot contain null bytes
===DONE===
diff --git a/mongodb-1.4.2/tests/bson/bson-symbol-tostring-001.phpt b/mongodb-1.4.2/tests/bson/bson-symbol-tostring-001.phpt
new file mode 100644
index 00000000..e7eea50e
--- /dev/null
+++ b/mongodb-1.4.2/tests/bson/bson-symbol-tostring-001.phpt
@@ -0,0 +1,14 @@
+--TEST--
+MongoDB\BSON\Symbol::__toString()
+--FILE--
+<?php
+
+$symbol = MongoDB\BSON\toPHP(MongoDB\BSON\fromJSON('{ "symbol": {"$symbol": "symbolValue"} }'))->symbol;
+var_dump((string) $symbol);
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECT--
+string(11) "symbolValue"
+===DONE===
diff --git a/mongodb-1.4.2/tests/bson/bson-symbol_error-001.phpt b/mongodb-1.4.2/tests/bson/bson-symbol_error-001.phpt
new file mode 100644
index 00000000..7707cc88
--- /dev/null
+++ b/mongodb-1.4.2/tests/bson/bson-symbol_error-001.phpt
@@ -0,0 +1,12 @@
+--TEST--
+MongoDB\BSON\Symbol cannot be extended
+--FILE--
+<?php
+
+class MySymbol extends MongoDB\BSON\Symbol {}
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECTF--
+Fatal error: Class MySymbol may not inherit from final class (MongoDB\BSON\Symbol) in %s on line %d
diff --git a/mongodb-1.3.4/tests/bson/bson-timestamp-001.phpt b/mongodb-1.4.2/tests/bson/bson-timestamp-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-timestamp-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-timestamp-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-timestamp-002.phpt b/mongodb-1.4.2/tests/bson/bson-timestamp-002.phpt
similarity index 100%
copy from mongodb-1.3.4/tests/bson/bson-timestamp-002.phpt
copy to mongodb-1.4.2/tests/bson/bson-timestamp-002.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-timestamp-003.phpt b/mongodb-1.4.2/tests/bson/bson-timestamp-003.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-timestamp-003.phpt
rename to mongodb-1.4.2/tests/bson/bson-timestamp-003.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-timestamp-004.phpt b/mongodb-1.4.2/tests/bson/bson-timestamp-004.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-timestamp-004.phpt
rename to mongodb-1.4.2/tests/bson/bson-timestamp-004.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-timestamp-005.phpt b/mongodb-1.4.2/tests/bson/bson-timestamp-005.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-timestamp-005.phpt
rename to mongodb-1.4.2/tests/bson/bson-timestamp-005.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-timestamp-compare-001.phpt b/mongodb-1.4.2/tests/bson/bson-timestamp-compare-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-timestamp-compare-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-timestamp-compare-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-timestamp-getIncrement-001.phpt b/mongodb-1.4.2/tests/bson/bson-timestamp-getIncrement-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-timestamp-getIncrement-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-timestamp-getIncrement-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-timestamp-getTimestamp-001.phpt b/mongodb-1.4.2/tests/bson/bson-timestamp-getTimestamp-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-timestamp-getTimestamp-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-timestamp-getTimestamp-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-timestamp-002.phpt b/mongodb-1.4.2/tests/bson/bson-timestamp-get_properties-001.phpt
similarity index 61%
rename from mongodb-1.3.4/tests/bson/bson-timestamp-002.phpt
rename to mongodb-1.4.2/tests/bson/bson-timestamp-get_properties-001.phpt
index a8714e20..65e0b055 100644
--- a/mongodb-1.3.4/tests/bson/bson-timestamp-002.phpt
+++ b/mongodb-1.4.2/tests/bson/bson-timestamp-get_properties-001.phpt
@@ -1,20 +1,20 @@
--TEST--
-MongoDB\BSON\Timestamp debug handler
+MongoDB\BSON\Timestamp get_properties handler (get_object_vars)
--FILE--
<?php
$timestamp = new MongoDB\BSON\Timestamp(1234, 5678);
-var_dump($timestamp);
+var_dump(get_object_vars($timestamp));
?>
===DONE===
<?php exit(0); ?>
---EXPECTF--
-object(MongoDB\BSON\Timestamp)#%d (%d) {
+--EXPECT--
+array(2) {
["increment"]=>
string(4) "1234"
["timestamp"]=>
string(4) "5678"
}
===DONE===
diff --git a/mongodb-1.4.2/tests/bson/bson-timestamp-get_properties-002.phpt b/mongodb-1.4.2/tests/bson/bson-timestamp-get_properties-002.phpt
new file mode 100644
index 00000000..321fa713
--- /dev/null
+++ b/mongodb-1.4.2/tests/bson/bson-timestamp-get_properties-002.phpt
@@ -0,0 +1,21 @@
+--TEST--
+MongoDB\BSON\Timestamp get_properties handler (foreach)
+--FILE--
+<?php
+
+$timestamp = new MongoDB\BSON\Timestamp(1234, 5678);
+
+foreach ($timestamp as $key => $value) {
+ var_dump($key);
+ var_dump($value);
+}
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECT--
+string(9) "increment"
+string(4) "1234"
+string(9) "timestamp"
+string(4) "5678"
+===DONE===
diff --git a/mongodb-1.3.4/tests/bson/bson-timestamp-jsonserialize-001.phpt b/mongodb-1.4.2/tests/bson/bson-timestamp-jsonserialize-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-timestamp-jsonserialize-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-timestamp-jsonserialize-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-timestamp-jsonserialize-002.phpt b/mongodb-1.4.2/tests/bson/bson-timestamp-jsonserialize-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-timestamp-jsonserialize-002.phpt
rename to mongodb-1.4.2/tests/bson/bson-timestamp-jsonserialize-002.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-timestamp-serialization-001.phpt b/mongodb-1.4.2/tests/bson/bson-timestamp-serialization-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-timestamp-serialization-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-timestamp-serialization-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-timestamp-serialization-002.phpt b/mongodb-1.4.2/tests/bson/bson-timestamp-serialization-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-timestamp-serialization-002.phpt
rename to mongodb-1.4.2/tests/bson/bson-timestamp-serialization-002.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-timestamp-serialization_error-001.phpt b/mongodb-1.4.2/tests/bson/bson-timestamp-serialization_error-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-timestamp-serialization_error-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-timestamp-serialization_error-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-timestamp-serialization_error-002.phpt b/mongodb-1.4.2/tests/bson/bson-timestamp-serialization_error-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-timestamp-serialization_error-002.phpt
rename to mongodb-1.4.2/tests/bson/bson-timestamp-serialization_error-002.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-timestamp-serialization_error-003.phpt b/mongodb-1.4.2/tests/bson/bson-timestamp-serialization_error-003.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-timestamp-serialization_error-003.phpt
rename to mongodb-1.4.2/tests/bson/bson-timestamp-serialization_error-003.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-timestamp-serialization_error-004.phpt b/mongodb-1.4.2/tests/bson/bson-timestamp-serialization_error-004.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-timestamp-serialization_error-004.phpt
rename to mongodb-1.4.2/tests/bson/bson-timestamp-serialization_error-004.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-timestamp-set_state-001.phpt b/mongodb-1.4.2/tests/bson/bson-timestamp-set_state-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-timestamp-set_state-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-timestamp-set_state-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-timestamp-set_state-002.phpt b/mongodb-1.4.2/tests/bson/bson-timestamp-set_state-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-timestamp-set_state-002.phpt
rename to mongodb-1.4.2/tests/bson/bson-timestamp-set_state-002.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-timestamp-set_state_error-001.phpt b/mongodb-1.4.2/tests/bson/bson-timestamp-set_state_error-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-timestamp-set_state_error-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-timestamp-set_state_error-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-timestamp-set_state_error-002.phpt b/mongodb-1.4.2/tests/bson/bson-timestamp-set_state_error-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-timestamp-set_state_error-002.phpt
rename to mongodb-1.4.2/tests/bson/bson-timestamp-set_state_error-002.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-timestamp-set_state_error-003.phpt b/mongodb-1.4.2/tests/bson/bson-timestamp-set_state_error-003.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-timestamp-set_state_error-003.phpt
rename to mongodb-1.4.2/tests/bson/bson-timestamp-set_state_error-003.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-timestamp-set_state_error-004.phpt b/mongodb-1.4.2/tests/bson/bson-timestamp-set_state_error-004.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-timestamp-set_state_error-004.phpt
rename to mongodb-1.4.2/tests/bson/bson-timestamp-set_state_error-004.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-timestamp_error-001.phpt b/mongodb-1.4.2/tests/bson/bson-timestamp_error-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-timestamp_error-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-timestamp_error-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-timestamp_error-002.phpt b/mongodb-1.4.2/tests/bson/bson-timestamp_error-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-timestamp_error-002.phpt
rename to mongodb-1.4.2/tests/bson/bson-timestamp_error-002.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-timestamp_error-003.phpt b/mongodb-1.4.2/tests/bson/bson-timestamp_error-003.phpt
similarity index 75%
rename from mongodb-1.3.4/tests/bson/bson-timestamp_error-003.phpt
rename to mongodb-1.4.2/tests/bson/bson-timestamp_error-003.phpt
index 459ad097..3e91b5f2 100644
--- a/mongodb-1.3.4/tests/bson/bson-timestamp_error-003.phpt
+++ b/mongodb-1.4.2/tests/bson/bson-timestamp_error-003.phpt
@@ -1,36 +1,39 @@
--TEST--
MongoDB\BSON\Timestamp constructor requires positive unsigned 32-bit integers
--FILE--
<?php
require_once __DIR__ . '/../utils/tools.php';
echo throws(function() {
new MongoDB\BSON\Timestamp(-1, 0);
}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
echo throws(function() {
- new MongoDB\BSON\Timestamp(-2147483648, 0);
+ /* I realise that "-2147483647 - 1" could be written as "-2147483648", *however*, PHP considers
+ * the latter a floating point number, as it parses "-" and "2147483648" separately, and
+ * "2147483648" doesn't fit in the 32-bit signed range. */
+ new MongoDB\BSON\Timestamp(-2147483647 - 1, 0);
}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
echo throws(function() {
new MongoDB\BSON\Timestamp(0, -1);
}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
echo throws(function() {
- new MongoDB\BSON\Timestamp(0, -2147483648);
+ new MongoDB\BSON\Timestamp(0, -2147483647 - 1);
}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
?>
===DONE===
<?php exit(0); ?>
--EXPECT--
OK: Got MongoDB\Driver\Exception\InvalidArgumentException
Expected increment to be an unsigned 32-bit integer, -1 given
OK: Got MongoDB\Driver\Exception\InvalidArgumentException
Expected increment to be an unsigned 32-bit integer, -2147483648 given
OK: Got MongoDB\Driver\Exception\InvalidArgumentException
Expected timestamp to be an unsigned 32-bit integer, -1 given
OK: Got MongoDB\Driver\Exception\InvalidArgumentException
Expected timestamp to be an unsigned 32-bit integer, -2147483648 given
===DONE===
diff --git a/mongodb-1.3.4/tests/bson/bson-timestamp_error-004.phpt b/mongodb-1.4.2/tests/bson/bson-timestamp_error-004.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-timestamp_error-004.phpt
rename to mongodb-1.4.2/tests/bson/bson-timestamp_error-004.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-timestamp_error-005.phpt b/mongodb-1.4.2/tests/bson/bson-timestamp_error-005.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-timestamp_error-005.phpt
rename to mongodb-1.4.2/tests/bson/bson-timestamp_error-005.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-timestamp_error-006.phpt b/mongodb-1.4.2/tests/bson/bson-timestamp_error-006.phpt
similarity index 92%
rename from mongodb-1.3.4/tests/bson/bson-timestamp_error-006.phpt
rename to mongodb-1.4.2/tests/bson/bson-timestamp_error-006.phpt
index 23ce8a60..a97e1c21 100644
--- a/mongodb-1.3.4/tests/bson/bson-timestamp_error-006.phpt
+++ b/mongodb-1.4.2/tests/bson/bson-timestamp_error-006.phpt
@@ -1,46 +1,46 @@
--TEST--
MongoDB\BSON\Timestamp constructor requires integer or string arguments
--FILE--
<?php
require_once __DIR__ . '/../utils/tools.php';
$invalidValues = [null, 1234.5, true, [], new stdClass];
foreach ($invalidValues as $invalidValue) {
echo throws(function() use ($invalidValue) {
new MongoDB\BSON\Timestamp($invalidValue, 0);
}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
}
foreach ($invalidValues as $invalidValue) {
echo throws(function() use ($invalidValue) {
new MongoDB\BSON\Timestamp(0, $invalidValue);
}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
}
?>
===DONE===
<?php exit(0); ?>
--EXPECTF--
OK: Got MongoDB\Driver\Exception\InvalidArgumentException
Expected increment to be an unsigned 32-bit integer or string, %r(null|NULL)%r given
OK: Got MongoDB\Driver\Exception\InvalidArgumentException
Expected increment to be an unsigned 32-bit integer or string, %r(double|float)%r given
OK: Got MongoDB\Driver\Exception\InvalidArgumentException
Expected increment to be an unsigned 32-bit integer or string, boolean given
OK: Got MongoDB\Driver\Exception\InvalidArgumentException
Expected increment to be an unsigned 32-bit integer or string, array given
OK: Got MongoDB\Driver\Exception\InvalidArgumentException
-Expected increment to be an unsigned 32-bit integer or string, object given
+Expected increment to be an unsigned 32-bit integer or string, stdClass given
OK: Got MongoDB\Driver\Exception\InvalidArgumentException
Expected timestamp to be an unsigned 32-bit integer or string, %r(null|NULL)%r given
OK: Got MongoDB\Driver\Exception\InvalidArgumentException
Expected timestamp to be an unsigned 32-bit integer or string, %r(double|float)%r given
OK: Got MongoDB\Driver\Exception\InvalidArgumentException
Expected timestamp to be an unsigned 32-bit integer or string, boolean given
OK: Got MongoDB\Driver\Exception\InvalidArgumentException
Expected timestamp to be an unsigned 32-bit integer or string, array given
OK: Got MongoDB\Driver\Exception\InvalidArgumentException
-Expected timestamp to be an unsigned 32-bit integer or string, object given
+Expected timestamp to be an unsigned 32-bit integer or string, stdClass given
===DONE===
diff --git a/mongodb-1.3.4/tests/bson/bson-timestampinterface-001.phpt b/mongodb-1.4.2/tests/bson/bson-timestampinterface-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-timestampinterface-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-timestampinterface-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-toCanonicalJSON-001.phpt b/mongodb-1.4.2/tests/bson/bson-toCanonicalJSON-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-toCanonicalJSON-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-toCanonicalJSON-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-toCanonicalJSON-002.phpt b/mongodb-1.4.2/tests/bson/bson-toCanonicalJSON-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-toCanonicalJSON-002.phpt
rename to mongodb-1.4.2/tests/bson/bson-toCanonicalJSON-002.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-toCanonicalJSON_error-001.phpt b/mongodb-1.4.2/tests/bson/bson-toCanonicalJSON_error-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-toCanonicalJSON_error-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-toCanonicalJSON_error-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-toCanonicalJSON_error-002.phpt b/mongodb-1.4.2/tests/bson/bson-toCanonicalJSON_error-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-toCanonicalJSON_error-002.phpt
rename to mongodb-1.4.2/tests/bson/bson-toCanonicalJSON_error-002.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-toCanonicalJSON_error-003.phpt b/mongodb-1.4.2/tests/bson/bson-toCanonicalJSON_error-003.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-toCanonicalJSON_error-003.phpt
rename to mongodb-1.4.2/tests/bson/bson-toCanonicalJSON_error-003.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-toJSON-001.phpt b/mongodb-1.4.2/tests/bson/bson-toJSON-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-toJSON-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-toJSON-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-toJSON-002.phpt b/mongodb-1.4.2/tests/bson/bson-toJSON-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-toJSON-002.phpt
rename to mongodb-1.4.2/tests/bson/bson-toJSON-002.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-toJSON_error-001.phpt b/mongodb-1.4.2/tests/bson/bson-toJSON_error-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-toJSON_error-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-toJSON_error-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-toJSON_error-002.phpt b/mongodb-1.4.2/tests/bson/bson-toJSON_error-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-toJSON_error-002.phpt
rename to mongodb-1.4.2/tests/bson/bson-toJSON_error-002.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-toJSON_error-003.phpt b/mongodb-1.4.2/tests/bson/bson-toJSON_error-003.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-toJSON_error-003.phpt
rename to mongodb-1.4.2/tests/bson/bson-toJSON_error-003.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-toPHP-001.phpt b/mongodb-1.4.2/tests/bson/bson-toPHP-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-toPHP-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-toPHP-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-toPHP-002.phpt b/mongodb-1.4.2/tests/bson/bson-toPHP-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-toPHP-002.phpt
rename to mongodb-1.4.2/tests/bson/bson-toPHP-002.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-toPHP-003.phpt b/mongodb-1.4.2/tests/bson/bson-toPHP-003.phpt
similarity index 97%
rename from mongodb-1.3.4/tests/bson/bson-toPHP-003.phpt
rename to mongodb-1.4.2/tests/bson/bson-toPHP-003.phpt
index 9fa59e66..1416b2fb 100644
--- a/mongodb-1.3.4/tests/bson/bson-toPHP-003.phpt
+++ b/mongodb-1.4.2/tests/bson/bson-toPHP-003.phpt
@@ -1,468 +1,468 @@
--TEST--
MongoDB\BSON\toPHP(): Tests from serialization specification
--FILE--
<?php
require_once __DIR__ . '/../utils/tools.php';
class MyClass
{
}
class YourClass implements MongoDB\BSON\Unserializable
{
function bsonUnserialize(array $data)
{
foreach ($data as $key => $value) {
$this->$key = $value;
}
$this->unserialized = true;
}
}
class OurClass implements MongoDB\BSON\Persistable
{
function bsonSerialize()
{
// Not tested with this test, so return empty array
return array();
}
function bsonUnserialize(array $data)
{
foreach ($data as $key => $value) {
$this->$key = $value;
}
$this->unserialized = true;
}
}
class TheirClass extends OurClass
{
}
// Create base64-encoded class names for __pclass field's binary data
$bMyClass = base64_encode('MyClass');
$bYourClass = base64_encode('YourClass');
$bOurClass = base64_encode('OurClass');
$bTheirClass = base64_encode('TheirClass');
$bInterface = base64_encode('MongoDB\BSON\Unserializable');
$testGroups = array(
array(
'name' => 'DEFAULT TYPEMAP',
'typemap' => array(),
'tests' => array(
'{ "foo": "yes", "bar" : false }',
'{ "foo": "no", "array" : [ 5, 6 ] }',
- '{ "foo": "no", "obj" : { "embedded" : 3.14 } }',
+ '{ "foo": "no", "obj" : { "embedded" : 4.125 } }',
'{ "foo": "yes", "__pclass": "MyClass" }',
'{ "foo": "yes", "__pclass": { "$binary": "' . $bMyClass . '", "$type": "80" } }',
'{ "foo": "yes", "__pclass": { "$binary": "' . $bYourClass . '", "$type": "80" } }',
'{ "foo": "yes", "__pclass": { "$binary": "' . $bOurClass . '", "$type": "80" } }',
'{ "foo": "yes", "__pclass": { "$binary": "' . $bYourClass . '", "$type": "44" } }',
),
),
array(
'name' => 'NONEXISTING CLASS',
'typemap' => array('root' => 'MissingClass'),
'tests' => array(
'{ "foo": "yes" }',
),
),
array(
'name' => 'DOES NOT IMPLEMENT UNSERIALIZABLE',
'typemap' => array('root' => 'MyClass'),
'tests' => array(
'{ "foo": "yes", "__pclass": { "$binary": "' . $bMyClass . '", "$type": "80" } }',
),
),
array(
'name' => 'IS NOT A CONCRETE CLASS',
'typemap' => array('root' => 'MongoDB\BSON\Unserializable'),
'tests' => array(
'{ "foo": "yes" }',
),
),
array(
'name' => 'IS NOT A CONCRETE CLASS VIA PCLASS',
'typemap' => array('root' => 'YourClass'),
'tests' => array(
'{ "foo": "yes", "__pclass" : { "$binary": "' . $bInterface . '", "$type": "80" } }',
),
),
array(
'name' => 'PCLASS OVERRIDES TYPEMAP (1)',
'typemap' => array('root' => 'YourClass'),
'tests' => array(
'{ "foo": "yes", "__pclass" : { "$binary": "' . $bMyClass . '", "$type": "80" } }',
'{ "foo": "yes", "__pclass" : { "$binary": "' . $bOurClass . '", "$type": "80" } }',
'{ "foo": "yes", "__pclass" : { "$binary": "' . $bTheirClass . '", "$type": "80" } }',
'{ "foo": "yes", "__pclass" : { "$binary": "' . $bYourClass . '", "$type": "80" } }',
),
),
array(
'name' => 'PCLASS OVERRIDES TYPEMAP (2)',
'typemap' => array('root' => 'OurClass'),
'tests' => array(
'{ "foo": "yes", "__pclass" : { "$binary": "' . $bTheirClass . '", "$type": "80" } }',
),
),
array(
'name' => 'OBJECTS AS ARRAY',
'typemap' => array('root' => 'array', 'document' => 'array'),
'tests' => array(
'{ "foo": "yes", "bar" : false }',
'{ "foo": "no", "array" : [ 5, 6 ] }',
- '{ "foo": "no", "obj" : { "embedded" : 3.14 } }',
+ '{ "foo": "no", "obj" : { "embedded" : 4.125 } }',
'{ "foo": "yes", "__pclass": "MyClass" }',
'{ "foo": "yes", "__pclass" : { "$binary": "' . $bMyClass . '", "$type": "80" } }',
'{ "foo": "yes", "__pclass" : { "$binary": "' . $bOurClass . '", "$type": "80" } }',
),
),
array(
'name' => 'OBJECTS AS STDCLASS',
'typemap' => array('root' => 'object', 'document' => 'object'),
'tests' => array(
'{ "foo": "yes", "__pclass" : { "$binary": "' . $bMyClass . '", "$type": "80" } }',
'{ "foo": "yes", "__pclass" : { "$binary": "' . $bOurClass . '", "$type": "80" } }',
),
),
);
foreach ($testGroups as $testGroup) {
printf("=== %s ===\n\n", $testGroup['name']);
foreach ($testGroup['tests'] as $test) {
echo $test, "\n";
$bson = fromJSON($test);
try {
var_dump(toPHP($bson, $testGroup['typemap']));
} catch (MongoDB\Driver\Exception\Exception $e) {
echo $e->getMessage(), "\n";
}
echo "\n";
}
echo "\n";
}
?>
===DONE===
<?php exit(0); ?>
--EXPECTF--
=== DEFAULT TYPEMAP ===
{ "foo": "yes", "bar" : false }
object(stdClass)#%d (2) {
["foo"]=>
string(3) "yes"
["bar"]=>
bool(false)
}
{ "foo": "no", "array" : [ 5, 6 ] }
object(stdClass)#%d (2) {
["foo"]=>
string(2) "no"
["array"]=>
array(2) {
[0]=>
int(5)
[1]=>
int(6)
}
}
-{ "foo": "no", "obj" : { "embedded" : 3.14 } }
+{ "foo": "no", "obj" : { "embedded" : 4.125 } }
object(stdClass)#%d (2) {
["foo"]=>
string(2) "no"
["obj"]=>
object(stdClass)#%d (1) {
["embedded"]=>
- float(3.14)
+ float(4.125)
}
}
{ "foo": "yes", "__pclass": "MyClass" }
object(stdClass)#%d (2) {
["foo"]=>
string(3) "yes"
["__pclass"]=>
string(7) "MyClass"
}
{ "foo": "yes", "__pclass": { "$binary": "TXlDbGFzcw==", "$type": "80" } }
object(stdClass)#%d (2) {
["foo"]=>
string(3) "yes"
["__pclass"]=>
object(MongoDB\BSON\Binary)#%d (2) {
["data"]=>
string(7) "MyClass"
["type"]=>
int(128)
}
}
{ "foo": "yes", "__pclass": { "$binary": "WW91ckNsYXNz", "$type": "80" } }
object(stdClass)#%d (2) {
["foo"]=>
string(3) "yes"
["__pclass"]=>
object(MongoDB\BSON\Binary)#%d (2) {
["data"]=>
string(9) "YourClass"
["type"]=>
int(128)
}
}
{ "foo": "yes", "__pclass": { "$binary": "T3VyQ2xhc3M=", "$type": "80" } }
object(OurClass)#%d (3) {
["foo"]=>
string(3) "yes"
["__pclass"]=>
object(MongoDB\BSON\Binary)#%d (2) {
["data"]=>
string(8) "OurClass"
["type"]=>
int(128)
}
["unserialized"]=>
bool(true)
}
{ "foo": "yes", "__pclass": { "$binary": "WW91ckNsYXNz", "$type": "44" } }
object(stdClass)#%d (2) {
["foo"]=>
string(3) "yes"
["__pclass"]=>
object(MongoDB\BSON\Binary)#%d (2) {
["data"]=>
string(9) "YourClass"
["type"]=>
int(68)
}
}
=== NONEXISTING CLASS ===
{ "foo": "yes" }
Class MissingClass does not exist
=== DOES NOT IMPLEMENT UNSERIALIZABLE ===
{ "foo": "yes", "__pclass": { "$binary": "TXlDbGFzcw==", "$type": "80" } }
Class MyClass does not implement MongoDB\BSON\Unserializable
=== IS NOT A CONCRETE CLASS ===
{ "foo": "yes" }
Class MongoDB\BSON\Unserializable is not instantiatable
=== IS NOT A CONCRETE CLASS VIA PCLASS ===
{ "foo": "yes", "__pclass" : { "$binary": "TW9uZ29EQlxCU09OXFVuc2VyaWFsaXphYmxl", "$type": "80" } }
object(YourClass)#%d (3) {
["foo"]=>
string(3) "yes"
["__pclass"]=>
object(MongoDB\BSON\Binary)#%d (2) {
["data"]=>
string(27) "MongoDB\BSON\Unserializable"
["type"]=>
int(128)
}
["unserialized"]=>
bool(true)
}
=== PCLASS OVERRIDES TYPEMAP (1) ===
{ "foo": "yes", "__pclass" : { "$binary": "TXlDbGFzcw==", "$type": "80" } }
object(YourClass)#%d (3) {
["foo"]=>
string(3) "yes"
["__pclass"]=>
object(MongoDB\BSON\Binary)#%d (2) {
["data"]=>
string(7) "MyClass"
["type"]=>
int(128)
}
["unserialized"]=>
bool(true)
}
{ "foo": "yes", "__pclass" : { "$binary": "T3VyQ2xhc3M=", "$type": "80" } }
object(OurClass)#%d (3) {
["foo"]=>
string(3) "yes"
["__pclass"]=>
object(MongoDB\BSON\Binary)#%d (2) {
["data"]=>
string(8) "OurClass"
["type"]=>
int(128)
}
["unserialized"]=>
bool(true)
}
{ "foo": "yes", "__pclass" : { "$binary": "VGhlaXJDbGFzcw==", "$type": "80" } }
object(TheirClass)#%d (3) {
["foo"]=>
string(3) "yes"
["__pclass"]=>
object(MongoDB\BSON\Binary)#%d (2) {
["data"]=>
string(10) "TheirClass"
["type"]=>
int(128)
}
["unserialized"]=>
bool(true)
}
{ "foo": "yes", "__pclass" : { "$binary": "WW91ckNsYXNz", "$type": "80" } }
object(YourClass)#%d (3) {
["foo"]=>
string(3) "yes"
["__pclass"]=>
object(MongoDB\BSON\Binary)#%d (2) {
["data"]=>
string(9) "YourClass"
["type"]=>
int(128)
}
["unserialized"]=>
bool(true)
}
=== PCLASS OVERRIDES TYPEMAP (2) ===
{ "foo": "yes", "__pclass" : { "$binary": "VGhlaXJDbGFzcw==", "$type": "80" } }
object(TheirClass)#%d (3) {
["foo"]=>
string(3) "yes"
["__pclass"]=>
object(MongoDB\BSON\Binary)#%d (2) {
["data"]=>
string(10) "TheirClass"
["type"]=>
int(128)
}
["unserialized"]=>
bool(true)
}
=== OBJECTS AS ARRAY ===
{ "foo": "yes", "bar" : false }
array(2) {
["foo"]=>
string(3) "yes"
["bar"]=>
bool(false)
}
{ "foo": "no", "array" : [ 5, 6 ] }
array(2) {
["foo"]=>
string(2) "no"
["array"]=>
array(2) {
[0]=>
int(5)
[1]=>
int(6)
}
}
-{ "foo": "no", "obj" : { "embedded" : 3.14 } }
+{ "foo": "no", "obj" : { "embedded" : 4.125 } }
array(2) {
["foo"]=>
string(2) "no"
["obj"]=>
array(1) {
["embedded"]=>
- float(3.14)
+ float(4.125)
}
}
{ "foo": "yes", "__pclass": "MyClass" }
array(2) {
["foo"]=>
string(3) "yes"
["__pclass"]=>
string(7) "MyClass"
}
{ "foo": "yes", "__pclass" : { "$binary": "TXlDbGFzcw==", "$type": "80" } }
array(2) {
["foo"]=>
string(3) "yes"
["__pclass"]=>
object(MongoDB\BSON\Binary)#%d (2) {
["data"]=>
string(7) "MyClass"
["type"]=>
int(128)
}
}
{ "foo": "yes", "__pclass" : { "$binary": "T3VyQ2xhc3M=", "$type": "80" } }
array(2) {
["foo"]=>
string(3) "yes"
["__pclass"]=>
object(MongoDB\BSON\Binary)#%d (2) {
["data"]=>
string(8) "OurClass"
["type"]=>
int(128)
}
}
=== OBJECTS AS STDCLASS ===
{ "foo": "yes", "__pclass" : { "$binary": "TXlDbGFzcw==", "$type": "80" } }
object(stdClass)#%d (2) {
["foo"]=>
string(3) "yes"
["__pclass"]=>
object(MongoDB\BSON\Binary)#%d (2) {
["data"]=>
string(7) "MyClass"
["type"]=>
int(128)
}
}
{ "foo": "yes", "__pclass" : { "$binary": "T3VyQ2xhc3M=", "$type": "80" } }
object(stdClass)#%d (2) {
["foo"]=>
string(3) "yes"
["__pclass"]=>
object(MongoDB\BSON\Binary)#%d (2) {
["data"]=>
string(8) "OurClass"
["type"]=>
int(128)
}
}
===DONE===
diff --git a/mongodb-1.3.4/tests/bson/bson-toPHP-004.phpt b/mongodb-1.4.2/tests/bson/bson-toPHP-004.phpt
similarity index 99%
rename from mongodb-1.3.4/tests/bson/bson-toPHP-004.phpt
rename to mongodb-1.4.2/tests/bson/bson-toPHP-004.phpt
index 2a126a5c..f798bcdc 100644
--- a/mongodb-1.3.4/tests/bson/bson-toPHP-004.phpt
+++ b/mongodb-1.4.2/tests/bson/bson-toPHP-004.phpt
@@ -1,577 +1,577 @@
--TEST--
MongoDB\BSON\toPHP(): BSON array keys should be disregarded during visitation
--FILE--
<?php
require_once __DIR__ . '/../utils/tools.php';
class MyArrayObject extends ArrayObject implements MongoDB\BSON\Unserializable
{
function bsonUnserialize(array $data)
{
parent::__construct($data);
}
}
$tests = [
null,
true,
1,
- 3.14,
+ 4.125,
'foo',
[],
(object) [],
new MongoDB\BSON\Binary('foo', MongoDB\BSON\Binary::TYPE_GENERIC),
new MongoDB\BSON\Decimal128('3.14'),
new MongoDB\BSON\Javascript('function(){}'),
new MongoDB\BSON\MaxKey,
new MongoDB\BSON\MinKey,
new MongoDB\BSON\ObjectId('586c18d86118fd6c9012dec1'),
new MongoDB\BSON\Regex('foo'),
new MongoDB\BSON\Timestamp(1234, 5678),
new MongoDB\BSON\UTCDateTime('1483479256924'),
];
foreach ($tests as $value) {
printf("Testing %s visitor function\n", is_object($value) ? get_class($value) : gettype($value));
$bson = fromPHP(['x' => [$value]]);
// Alter the key of the BSON array's first element
$bson[12] = '1';
var_dump(toPHP($bson));
/* Note that numeric indexes within the HashTable are not accessible without
* casting the object to an array. This is because the entries are only
* stored with numeric indexes and do not also have string equivalents, as
* might be created with zend_symtable_update(). This behavior is not unique
* to the driver, as `(object) ['foo']` would demonstrate the same issue. */
var_dump(toPHP($bson, ['array' => 'object']));
var_dump(toPHP($bson, ['array' => 'MyArrayObject']));
echo "\n";
}
?>
===DONE===
<?php exit(0); ?>
--EXPECTF--
Testing NULL visitor function
object(stdClass)#%d (1) {
["x"]=>
array(1) {
[0]=>
NULL
}
}
object(stdClass)#%d (1) {
["x"]=>
object(stdClass)#%d (1) {
[%r(0|"0")%r]=>
NULL
}
}
object(stdClass)#%d (1) {
["x"]=>
object(MyArrayObject)#%d (1) {
["storage":"ArrayObject":private]=>
array(1) {
[0]=>
NULL
}
}
}
Testing boolean visitor function
object(stdClass)#%d (1) {
["x"]=>
array(1) {
[0]=>
bool(true)
}
}
object(stdClass)#%d (1) {
["x"]=>
object(stdClass)#%d (1) {
[%r(0|"0")%r]=>
bool(true)
}
}
object(stdClass)#%d (1) {
["x"]=>
object(MyArrayObject)#%d (1) {
["storage":"ArrayObject":private]=>
array(1) {
[0]=>
bool(true)
}
}
}
Testing integer visitor function
object(stdClass)#%d (1) {
["x"]=>
array(1) {
[0]=>
int(1)
}
}
object(stdClass)#%d (1) {
["x"]=>
object(stdClass)#%d (1) {
[%r(0|"0")%r]=>
int(1)
}
}
object(stdClass)#%d (1) {
["x"]=>
object(MyArrayObject)#%d (1) {
["storage":"ArrayObject":private]=>
array(1) {
[0]=>
int(1)
}
}
}
Testing double visitor function
object(stdClass)#%d (1) {
["x"]=>
array(1) {
[0]=>
- float(3.14)
+ float(4.125)
}
}
object(stdClass)#%d (1) {
["x"]=>
object(stdClass)#%d (1) {
[%r(0|"0")%r]=>
- float(3.14)
+ float(4.125)
}
}
object(stdClass)#%d (1) {
["x"]=>
object(MyArrayObject)#%d (1) {
["storage":"ArrayObject":private]=>
array(1) {
[0]=>
- float(3.14)
+ float(4.125)
}
}
}
Testing string visitor function
object(stdClass)#%d (1) {
["x"]=>
array(1) {
[0]=>
string(3) "foo"
}
}
object(stdClass)#%d (1) {
["x"]=>
object(stdClass)#%d (1) {
[%r(0|"0")%r]=>
string(3) "foo"
}
}
object(stdClass)#%d (1) {
["x"]=>
object(MyArrayObject)#%d (1) {
["storage":"ArrayObject":private]=>
array(1) {
[0]=>
string(3) "foo"
}
}
}
Testing array visitor function
object(stdClass)#%d (1) {
["x"]=>
array(1) {
[0]=>
array(0) {
}
}
}
object(stdClass)#%d (1) {
["x"]=>
object(stdClass)#%d (1) {
[%r(0|"0")%r]=>
object(stdClass)#%d (0) {
}
}
}
object(stdClass)#%d (1) {
["x"]=>
object(MyArrayObject)#%d (1) {
["storage":"ArrayObject":private]=>
array(1) {
[0]=>
object(MyArrayObject)#%d (1) {
["storage":"ArrayObject":private]=>
array(0) {
}
}
}
}
}
Testing stdClass visitor function
object(stdClass)#%d (1) {
["x"]=>
array(1) {
[0]=>
object(stdClass)#%d (0) {
}
}
}
object(stdClass)#%d (1) {
["x"]=>
object(stdClass)#%d (1) {
[%r(0|"0")%r]=>
object(stdClass)#%d (0) {
}
}
}
object(stdClass)#%d (1) {
["x"]=>
object(MyArrayObject)#%d (1) {
["storage":"ArrayObject":private]=>
array(1) {
[0]=>
object(stdClass)#%d (0) {
}
}
}
}
Testing MongoDB\BSON\Binary visitor function
object(stdClass)#%d (1) {
["x"]=>
array(1) {
[0]=>
object(MongoDB\BSON\Binary)#%d (2) {
["data"]=>
string(3) "foo"
["type"]=>
int(0)
}
}
}
object(stdClass)#%d (1) {
["x"]=>
object(stdClass)#%d (1) {
[%r(0|"0")%r]=>
object(MongoDB\BSON\Binary)#%d (2) {
["data"]=>
string(3) "foo"
["type"]=>
int(0)
}
}
}
object(stdClass)#%d (1) {
["x"]=>
object(MyArrayObject)#%d (1) {
["storage":"ArrayObject":private]=>
array(1) {
[0]=>
object(MongoDB\BSON\Binary)#%d (2) {
["data"]=>
string(3) "foo"
["type"]=>
int(0)
}
}
}
}
Testing MongoDB\BSON\Decimal128 visitor function
object(stdClass)#%d (1) {
["x"]=>
array(1) {
[0]=>
object(MongoDB\BSON\Decimal128)#%d (1) {
["dec"]=>
string(4) "3.14"
}
}
}
object(stdClass)#%d (1) {
["x"]=>
object(stdClass)#%d (1) {
[%r(0|"0")%r]=>
object(MongoDB\BSON\Decimal128)#%d (1) {
["dec"]=>
string(4) "3.14"
}
}
}
object(stdClass)#%d (1) {
["x"]=>
object(MyArrayObject)#%d (1) {
["storage":"ArrayObject":private]=>
array(1) {
[0]=>
object(MongoDB\BSON\Decimal128)#%d (1) {
["dec"]=>
string(4) "3.14"
}
}
}
}
Testing MongoDB\BSON\Javascript visitor function
object(stdClass)#%d (1) {
["x"]=>
array(1) {
[0]=>
object(MongoDB\BSON\Javascript)#%d (2) {
["code"]=>
string(12) "function(){}"
["scope"]=>
NULL
}
}
}
object(stdClass)#%d (1) {
["x"]=>
object(stdClass)#%d (1) {
[%r(0|"0")%r]=>
object(MongoDB\BSON\Javascript)#%d (2) {
["code"]=>
string(12) "function(){}"
["scope"]=>
NULL
}
}
}
object(stdClass)#%d (1) {
["x"]=>
object(MyArrayObject)#%d (1) {
["storage":"ArrayObject":private]=>
array(1) {
[0]=>
object(MongoDB\BSON\Javascript)#%d (2) {
["code"]=>
string(12) "function(){}"
["scope"]=>
NULL
}
}
}
}
Testing MongoDB\BSON\MaxKey visitor function
object(stdClass)#%d (1) {
["x"]=>
array(1) {
[0]=>
object(MongoDB\BSON\MaxKey)#%d (0) {
}
}
}
object(stdClass)#%d (1) {
["x"]=>
object(stdClass)#%d (1) {
[%r(0|"0")%r]=>
object(MongoDB\BSON\MaxKey)#%d (0) {
}
}
}
object(stdClass)#%d (1) {
["x"]=>
object(MyArrayObject)#%d (1) {
["storage":"ArrayObject":private]=>
array(1) {
[0]=>
object(MongoDB\BSON\MaxKey)#%d (0) {
}
}
}
}
Testing MongoDB\BSON\MinKey visitor function
object(stdClass)#%d (1) {
["x"]=>
array(1) {
[0]=>
object(MongoDB\BSON\MinKey)#%d (0) {
}
}
}
object(stdClass)#%d (1) {
["x"]=>
object(stdClass)#%d (1) {
[%r(0|"0")%r]=>
object(MongoDB\BSON\MinKey)#%d (0) {
}
}
}
object(stdClass)#%d (1) {
["x"]=>
object(MyArrayObject)#%d (1) {
["storage":"ArrayObject":private]=>
array(1) {
[0]=>
object(MongoDB\BSON\MinKey)#%d (0) {
}
}
}
}
Testing MongoDB\BSON\ObjectId visitor function
object(stdClass)#%d (1) {
["x"]=>
array(1) {
[0]=>
object(MongoDB\BSON\ObjectId)#%d (1) {
["oid"]=>
string(24) "586c18d86118fd6c9012dec1"
}
}
}
object(stdClass)#%d (1) {
["x"]=>
object(stdClass)#%d (1) {
[%r(0|"0")%r]=>
object(MongoDB\BSON\ObjectId)#%d (1) {
["oid"]=>
string(24) "586c18d86118fd6c9012dec1"
}
}
}
object(stdClass)#%d (1) {
["x"]=>
object(MyArrayObject)#%d (1) {
["storage":"ArrayObject":private]=>
array(1) {
[0]=>
object(MongoDB\BSON\ObjectId)#%d (1) {
["oid"]=>
string(24) "586c18d86118fd6c9012dec1"
}
}
}
}
Testing MongoDB\BSON\Regex visitor function
object(stdClass)#%d (1) {
["x"]=>
array(1) {
[0]=>
object(MongoDB\BSON\Regex)#%d (2) {
["pattern"]=>
string(3) "foo"
["flags"]=>
string(0) ""
}
}
}
object(stdClass)#%d (1) {
["x"]=>
object(stdClass)#%d (1) {
[%r(0|"0")%r]=>
object(MongoDB\BSON\Regex)#%d (2) {
["pattern"]=>
string(3) "foo"
["flags"]=>
string(0) ""
}
}
}
object(stdClass)#%d (1) {
["x"]=>
object(MyArrayObject)#%d (1) {
["storage":"ArrayObject":private]=>
array(1) {
[0]=>
object(MongoDB\BSON\Regex)#%d (2) {
["pattern"]=>
string(3) "foo"
["flags"]=>
string(0) ""
}
}
}
}
Testing MongoDB\BSON\Timestamp visitor function
object(stdClass)#%d (1) {
["x"]=>
array(1) {
[0]=>
object(MongoDB\BSON\Timestamp)#%d (2) {
["increment"]=>
string(4) "1234"
["timestamp"]=>
string(4) "5678"
}
}
}
object(stdClass)#%d (1) {
["x"]=>
object(stdClass)#%d (1) {
[%r(0|"0")%r]=>
object(MongoDB\BSON\Timestamp)#%d (2) {
["increment"]=>
string(4) "1234"
["timestamp"]=>
string(4) "5678"
}
}
}
object(stdClass)#%d (1) {
["x"]=>
object(MyArrayObject)#%d (1) {
["storage":"ArrayObject":private]=>
array(1) {
[0]=>
object(MongoDB\BSON\Timestamp)#%d (2) {
["increment"]=>
string(4) "1234"
["timestamp"]=>
string(4) "5678"
}
}
}
}
Testing MongoDB\BSON\UTCDateTime visitor function
object(stdClass)#%d (1) {
["x"]=>
array(1) {
[0]=>
object(MongoDB\BSON\UTCDateTime)#%d (1) {
["milliseconds"]=>
string(13) "1483479256924"
}
}
}
object(stdClass)#%d (1) {
["x"]=>
object(stdClass)#%d (1) {
[%r(0|"0")%r]=>
object(MongoDB\BSON\UTCDateTime)#%d (1) {
["milliseconds"]=>
string(13) "1483479256924"
}
}
}
object(stdClass)#%d (1) {
["x"]=>
object(MyArrayObject)#%d (1) {
["storage":"ArrayObject":private]=>
array(1) {
[0]=>
object(MongoDB\BSON\UTCDateTime)#%d (1) {
["milliseconds"]=>
string(13) "1483479256924"
}
}
}
}
===DONE===
diff --git a/mongodb-1.3.4/tests/bson/bson-toPHP-006.phpt b/mongodb-1.4.2/tests/bson/bson-toPHP-006.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-toPHP-006.phpt
rename to mongodb-1.4.2/tests/bson/bson-toPHP-006.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-toPHP_error-001.phpt b/mongodb-1.4.2/tests/bson/bson-toPHP_error-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-toPHP_error-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-toPHP_error-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-toPHP_error-002.phpt b/mongodb-1.4.2/tests/bson/bson-toPHP_error-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-toPHP_error-002.phpt
rename to mongodb-1.4.2/tests/bson/bson-toPHP_error-002.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-toPHP_error-003.phpt b/mongodb-1.4.2/tests/bson/bson-toPHP_error-003.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-toPHP_error-003.phpt
rename to mongodb-1.4.2/tests/bson/bson-toPHP_error-003.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-toPHP_error-004.phpt b/mongodb-1.4.2/tests/bson/bson-toPHP_error-004.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-toPHP_error-004.phpt
rename to mongodb-1.4.2/tests/bson/bson-toPHP_error-004.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-toRelaxedJSON-001.phpt b/mongodb-1.4.2/tests/bson/bson-toRelaxedJSON-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-toRelaxedJSON-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-toRelaxedJSON-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-toRelaxedJSON-002.phpt b/mongodb-1.4.2/tests/bson/bson-toRelaxedJSON-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-toRelaxedJSON-002.phpt
rename to mongodb-1.4.2/tests/bson/bson-toRelaxedJSON-002.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-toRelaxedJSON_error-001.phpt b/mongodb-1.4.2/tests/bson/bson-toRelaxedJSON_error-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-toRelaxedJSON_error-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-toRelaxedJSON_error-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-toRelaxedJSON_error-002.phpt b/mongodb-1.4.2/tests/bson/bson-toRelaxedJSON_error-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-toRelaxedJSON_error-002.phpt
rename to mongodb-1.4.2/tests/bson/bson-toRelaxedJSON_error-002.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-toRelaxedJSON_error-003.phpt b/mongodb-1.4.2/tests/bson/bson-toRelaxedJSON_error-003.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-toRelaxedJSON_error-003.phpt
rename to mongodb-1.4.2/tests/bson/bson-toRelaxedJSON_error-003.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-maxkey-001.phpt b/mongodb-1.4.2/tests/bson/bson-undefined-001.phpt
similarity index 61%
rename from mongodb-1.3.4/tests/bson/bson-maxkey-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-undefined-001.phpt
index 26e77e4f..86cd5be1 100644
--- a/mongodb-1.3.4/tests/bson/bson-maxkey-001.phpt
+++ b/mongodb-1.4.2/tests/bson/bson-undefined-001.phpt
@@ -1,29 +1,28 @@
--TEST--
-MongoDB\BSON\MaxKey #001
+MongoDB\BSON\Undefined #001
--FILE--
<?php
require_once __DIR__ . '/../utils/tools.php';
-$maxkey = new MongoDB\BSON\MaxKey;
$tests = array(
- array("max" => $maxkey),
+ MongoDB\BSON\toPHP(MongoDB\BSON\fromJSON('{ "undefined": {"$undefined": true} }')),
);
foreach($tests as $n => $test) {
$s = fromPHP($test);
echo "Test#{$n} ", $json = toJSON($s), "\n";
$bson = fromJSON($json);
$testagain = toPHP($bson);
var_dump(toJSON(fromPHP($test)), toJSON(fromPHP($testagain)));
var_dump((object)$test == (object)$testagain);
}
?>
===DONE===
<?php exit(0); ?>
--EXPECT--
-Test#0 { "max" : { "$maxKey" : 1 } }
-string(29) "{ "max" : { "$maxKey" : 1 } }"
-string(29) "{ "max" : { "$maxKey" : 1 } }"
+Test#0 { "undefined" : { "$undefined" : true } }
+string(41) "{ "undefined" : { "$undefined" : true } }"
+string(41) "{ "undefined" : { "$undefined" : true } }"
bool(true)
===DONE===
diff --git a/mongodb-1.4.2/tests/bson/bson-undefined-compare-001.phpt b/mongodb-1.4.2/tests/bson/bson-undefined-compare-001.phpt
new file mode 100644
index 00000000..2b2eeefd
--- /dev/null
+++ b/mongodb-1.4.2/tests/bson/bson-undefined-compare-001.phpt
@@ -0,0 +1,17 @@
+--TEST--
+MongoDB\BSON\Undefined comparisons
+--FILE--
+<?php
+
+var_dump(MongoDB\BSON\toPHP(MongoDB\BSON\fromJSON('{ "undefined": {"$undefined": true} }')) == MongoDB\BSON\toPHP(MongoDB\BSON\fromJSON('{ "undefined": {"$undefined": true} }')));
+var_dump(MongoDB\BSON\toPHP(MongoDB\BSON\fromJSON('{ "undefined": {"$undefined": true} }')) < MongoDB\BSON\toPHP(MongoDB\BSON\fromJSON('{ "undefined": {"$undefined": true} }')));
+var_dump(MongoDB\BSON\toPHP(MongoDB\BSON\fromJSON('{ "undefined": {"$undefined": true} }')) > MongoDB\BSON\toPHP(MongoDB\BSON\fromJSON('{ "undefined": {"$undefined": true} }')));
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECT--
+bool(true)
+bool(false)
+bool(false)
+===DONE===
diff --git a/mongodb-1.4.2/tests/bson/bson-undefined-jsonserialize-001.phpt b/mongodb-1.4.2/tests/bson/bson-undefined-jsonserialize-001.phpt
new file mode 100644
index 00000000..46906f5b
--- /dev/null
+++ b/mongodb-1.4.2/tests/bson/bson-undefined-jsonserialize-001.phpt
@@ -0,0 +1,17 @@
+--TEST--
+MongoDB\BSON\Undefined::jsonSerialize() return value
+--FILE--
+<?php
+
+$undefined = MongoDB\BSON\toPHP(MongoDB\BSON\fromJSON('{ "undefined": {"$undefined": true} }'))->undefined;
+var_dump($undefined->jsonSerialize());
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECT--
+array(1) {
+ ["$undefined"]=>
+ bool(true)
+}
+===DONE===
diff --git a/mongodb-1.3.4/tests/bson/bson-minkey-jsonserialize-002.phpt b/mongodb-1.4.2/tests/bson/bson-undefined-jsonserialize-002.phpt
similarity index 53%
rename from mongodb-1.3.4/tests/bson/bson-minkey-jsonserialize-002.phpt
rename to mongodb-1.4.2/tests/bson/bson-undefined-jsonserialize-002.phpt
index c6258b88..aaec70e1 100644
--- a/mongodb-1.3.4/tests/bson/bson-minkey-jsonserialize-002.phpt
+++ b/mongodb-1.4.2/tests/bson/bson-undefined-jsonserialize-002.phpt
@@ -1,26 +1,26 @@
--TEST--
-MongoDB\BSON\MinKey::jsonSerialize() with json_encode()
+MongoDB\BSON\Undefined::jsonSerialize() with json_encode()
--FILE--
<?php
require_once __DIR__ . '/../utils/tools.php';
-$doc = ['foo' => new MongoDB\BSON\MinKey];
+$doc = MongoDB\BSON\toPHP(MongoDB\BSON\fromJSON('{ "foo": {"$undefined": true} }'));
$json = json_encode($doc);
echo toJSON(fromPHP($doc)), "\n";
echo $json, "\n";
var_dump(toPHP(fromJSON($json)));
?>
===DONE===
<?php exit(0); ?>
--EXPECTF--
-{ "foo" : { "$minKey" : 1 } }
-{"foo":{"$minKey":1}}
+{ "foo" : { "$undefined" : true } }
+{"foo":{"$undefined":true}}
object(stdClass)#%d (%d) {
["foo"]=>
- object(MongoDB\BSON\MinKey)#%d (%d) {
+ object(MongoDB\BSON\Undefined)#%d (%d) {
}
}
===DONE===
diff --git a/mongodb-1.4.2/tests/bson/bson-undefined-serialization-001.phpt b/mongodb-1.4.2/tests/bson/bson-undefined-serialization-001.phpt
new file mode 100644
index 00000000..98ecec19
--- /dev/null
+++ b/mongodb-1.4.2/tests/bson/bson-undefined-serialization-001.phpt
@@ -0,0 +1,19 @@
+--TEST--
+MongoDB\BSON\Undefined serialization
+--FILE--
+<?php
+
+var_dump($undefined = MongoDB\BSON\toPHP(MongoDB\BSON\fromJSON('{ "undefined": {"$undefined": true} }'))->undefined);
+var_dump($s = serialize($undefined));
+var_dump(unserialize($s));
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECTF--
+object(MongoDB\BSON\Undefined)#%d (%d) {
+}
+string(34) "C:22:"MongoDB\BSON\Undefined":0:{}"
+object(MongoDB\BSON\Undefined)#%d (%d) {
+}
+===DONE===
diff --git a/mongodb-1.4.2/tests/bson/bson-undefined-tostring-001.phpt b/mongodb-1.4.2/tests/bson/bson-undefined-tostring-001.phpt
new file mode 100644
index 00000000..48309934
--- /dev/null
+++ b/mongodb-1.4.2/tests/bson/bson-undefined-tostring-001.phpt
@@ -0,0 +1,14 @@
+--TEST--
+MongoDB\BSON\Undefined::__toString()
+--FILE--
+<?php
+
+$undefined = MongoDB\BSON\toPHP(MongoDB\BSON\fromJSON('{ "undefined": {"$undefined": true} }'))->undefined;
+var_dump((string) $undefined);
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECT--
+string(0) ""
+===DONE===
diff --git a/mongodb-1.4.2/tests/bson/bson-undefined_error-001.phpt b/mongodb-1.4.2/tests/bson/bson-undefined_error-001.phpt
new file mode 100644
index 00000000..efa6620f
--- /dev/null
+++ b/mongodb-1.4.2/tests/bson/bson-undefined_error-001.phpt
@@ -0,0 +1,12 @@
+--TEST--
+MongoDB\BSON\Undefined cannot be extended
+--FILE--
+<?php
+
+class MyUndefined extends MongoDB\BSON\Undefined {}
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECTF--
+Fatal error: Class MyUndefined may not inherit from final class (MongoDB\BSON\Undefined) in %s on line %d
diff --git a/mongodb-1.3.4/tests/bson/bson-unknown-001.phpt b/mongodb-1.4.2/tests/bson/bson-unknown-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-unknown-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-unknown-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-utcdatetime-001.phpt b/mongodb-1.4.2/tests/bson/bson-utcdatetime-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-utcdatetime-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-utcdatetime-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-utcdatetime-002.phpt b/mongodb-1.4.2/tests/bson/bson-utcdatetime-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-utcdatetime-002.phpt
rename to mongodb-1.4.2/tests/bson/bson-utcdatetime-002.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-utcdatetime-003.phpt b/mongodb-1.4.2/tests/bson/bson-utcdatetime-003.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-utcdatetime-003.phpt
rename to mongodb-1.4.2/tests/bson/bson-utcdatetime-003.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-utcdatetime-004.phpt b/mongodb-1.4.2/tests/bson/bson-utcdatetime-004.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-utcdatetime-004.phpt
rename to mongodb-1.4.2/tests/bson/bson-utcdatetime-004.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-utcdatetime-005.phpt b/mongodb-1.4.2/tests/bson/bson-utcdatetime-005.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-utcdatetime-005.phpt
rename to mongodb-1.4.2/tests/bson/bson-utcdatetime-005.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-utcdatetime-006.phpt b/mongodb-1.4.2/tests/bson/bson-utcdatetime-006.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-utcdatetime-006.phpt
rename to mongodb-1.4.2/tests/bson/bson-utcdatetime-006.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-utcdatetime-007.phpt b/mongodb-1.4.2/tests/bson/bson-utcdatetime-007.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-utcdatetime-007.phpt
rename to mongodb-1.4.2/tests/bson/bson-utcdatetime-007.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-utcdatetime-compare-001.phpt b/mongodb-1.4.2/tests/bson/bson-utcdatetime-compare-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-utcdatetime-compare-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-utcdatetime-compare-001.phpt
diff --git a/mongodb-1.4.2/tests/bson/bson-utcdatetime-get_properties-001.phpt b/mongodb-1.4.2/tests/bson/bson-utcdatetime-get_properties-001.phpt
new file mode 100644
index 00000000..ab4dd66d
--- /dev/null
+++ b/mongodb-1.4.2/tests/bson/bson-utcdatetime-get_properties-001.phpt
@@ -0,0 +1,18 @@
+--TEST--
+MongoDB\BSON\UTCDateTime get_properties handler (get_object_vars)
+--FILE--
+<?php
+
+$utcdatetime = new MongoDB\BSON\UTCDateTime('1416445411987');
+
+var_dump(get_object_vars($utcdatetime));
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECT--
+array(1) {
+ ["milliseconds"]=>
+ string(13) "1416445411987"
+}
+===DONE===
diff --git a/mongodb-1.4.2/tests/bson/bson-utcdatetime-get_properties-002.phpt b/mongodb-1.4.2/tests/bson/bson-utcdatetime-get_properties-002.phpt
new file mode 100644
index 00000000..bcb4aee8
--- /dev/null
+++ b/mongodb-1.4.2/tests/bson/bson-utcdatetime-get_properties-002.phpt
@@ -0,0 +1,19 @@
+--TEST--
+MongoDB\BSON\UTCDateTime get_properties handler (foreach)
+--FILE--
+<?php
+
+$utcdatetime = new MongoDB\BSON\UTCDateTime('1416445411987');
+
+foreach ($utcdatetime as $key => $value) {
+ var_dump($key);
+ var_dump($value);
+}
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECT--
+string(12) "milliseconds"
+string(13) "1416445411987"
+===DONE===
diff --git a/mongodb-1.3.4/tests/bson/bson-utcdatetime-int-size-001.phpt b/mongodb-1.4.2/tests/bson/bson-utcdatetime-int-size-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-utcdatetime-int-size-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-utcdatetime-int-size-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-utcdatetime-int-size-002.phpt b/mongodb-1.4.2/tests/bson/bson-utcdatetime-int-size-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-utcdatetime-int-size-002.phpt
rename to mongodb-1.4.2/tests/bson/bson-utcdatetime-int-size-002.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-utcdatetime-jsonserialize-001.phpt b/mongodb-1.4.2/tests/bson/bson-utcdatetime-jsonserialize-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-utcdatetime-jsonserialize-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-utcdatetime-jsonserialize-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-utcdatetime-jsonserialize-002.phpt b/mongodb-1.4.2/tests/bson/bson-utcdatetime-jsonserialize-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-utcdatetime-jsonserialize-002.phpt
rename to mongodb-1.4.2/tests/bson/bson-utcdatetime-jsonserialize-002.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-utcdatetime-serialization-001.phpt b/mongodb-1.4.2/tests/bson/bson-utcdatetime-serialization-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-utcdatetime-serialization-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-utcdatetime-serialization-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-utcdatetime-serialization-002.phpt b/mongodb-1.4.2/tests/bson/bson-utcdatetime-serialization-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-utcdatetime-serialization-002.phpt
rename to mongodb-1.4.2/tests/bson/bson-utcdatetime-serialization-002.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-utcdatetime-serialization_error-001.phpt b/mongodb-1.4.2/tests/bson/bson-utcdatetime-serialization_error-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-utcdatetime-serialization_error-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-utcdatetime-serialization_error-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-utcdatetime-serialization_error-002.phpt b/mongodb-1.4.2/tests/bson/bson-utcdatetime-serialization_error-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-utcdatetime-serialization_error-002.phpt
rename to mongodb-1.4.2/tests/bson/bson-utcdatetime-serialization_error-002.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-utcdatetime-set_state-001.phpt b/mongodb-1.4.2/tests/bson/bson-utcdatetime-set_state-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-utcdatetime-set_state-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-utcdatetime-set_state-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-utcdatetime-set_state-002.phpt b/mongodb-1.4.2/tests/bson/bson-utcdatetime-set_state-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-utcdatetime-set_state-002.phpt
rename to mongodb-1.4.2/tests/bson/bson-utcdatetime-set_state-002.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-utcdatetime-set_state_error-001.phpt b/mongodb-1.4.2/tests/bson/bson-utcdatetime-set_state_error-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-utcdatetime-set_state_error-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-utcdatetime-set_state_error-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-utcdatetime-set_state_error-002.phpt b/mongodb-1.4.2/tests/bson/bson-utcdatetime-set_state_error-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-utcdatetime-set_state_error-002.phpt
rename to mongodb-1.4.2/tests/bson/bson-utcdatetime-set_state_error-002.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-utcdatetime-todatetime-001.phpt b/mongodb-1.4.2/tests/bson/bson-utcdatetime-todatetime-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-utcdatetime-todatetime-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-utcdatetime-todatetime-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-utcdatetime-todatetime-002.phpt b/mongodb-1.4.2/tests/bson/bson-utcdatetime-todatetime-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-utcdatetime-todatetime-002.phpt
rename to mongodb-1.4.2/tests/bson/bson-utcdatetime-todatetime-002.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-utcdatetime-tostring-001.phpt b/mongodb-1.4.2/tests/bson/bson-utcdatetime-tostring-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-utcdatetime-tostring-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-utcdatetime-tostring-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-utcdatetime_error-001.phpt b/mongodb-1.4.2/tests/bson/bson-utcdatetime_error-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-utcdatetime_error-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-utcdatetime_error-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-utcdatetime_error-002.phpt b/mongodb-1.4.2/tests/bson/bson-utcdatetime_error-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-utcdatetime_error-002.phpt
rename to mongodb-1.4.2/tests/bson/bson-utcdatetime_error-002.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-utcdatetime_error-003.phpt b/mongodb-1.4.2/tests/bson/bson-utcdatetime_error-003.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-utcdatetime_error-003.phpt
rename to mongodb-1.4.2/tests/bson/bson-utcdatetime_error-003.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-utcdatetime_error-004.phpt b/mongodb-1.4.2/tests/bson/bson-utcdatetime_error-004.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-utcdatetime_error-004.phpt
rename to mongodb-1.4.2/tests/bson/bson-utcdatetime_error-004.phpt
diff --git a/mongodb-1.3.4/tests/bson/bson-utcdatetimeinterface-001.phpt b/mongodb-1.4.2/tests/bson/bson-utcdatetimeinterface-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bson-utcdatetimeinterface-001.phpt
rename to mongodb-1.4.2/tests/bson/bson-utcdatetimeinterface-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bug0274.phpt b/mongodb-1.4.2/tests/bson/bug0274.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bug0274.phpt
rename to mongodb-1.4.2/tests/bson/bug0274.phpt
diff --git a/mongodb-1.3.4/tests/bson/bug0313.phpt b/mongodb-1.4.2/tests/bson/bug0313.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bug0313.phpt
rename to mongodb-1.4.2/tests/bson/bug0313.phpt
diff --git a/mongodb-1.3.4/tests/bson/bug0325.phpt b/mongodb-1.4.2/tests/bson/bug0325.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bug0325.phpt
rename to mongodb-1.4.2/tests/bson/bug0325.phpt
diff --git a/mongodb-1.3.4/tests/bson/bug0334-001.phpt b/mongodb-1.4.2/tests/bson/bug0334-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bug0334-001.phpt
rename to mongodb-1.4.2/tests/bson/bug0334-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bug0334-002.phpt b/mongodb-1.4.2/tests/bson/bug0334-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bug0334-002.phpt
rename to mongodb-1.4.2/tests/bson/bug0334-002.phpt
diff --git a/mongodb-1.3.4/tests/bson/bug0341.phpt b/mongodb-1.4.2/tests/bson/bug0341.phpt
similarity index 90%
rename from mongodb-1.3.4/tests/bson/bug0341.phpt
rename to mongodb-1.4.2/tests/bson/bug0341.phpt
index ea944983..e2df2ab2 100644
--- a/mongodb-1.3.4/tests/bson/bug0341.phpt
+++ b/mongodb-1.4.2/tests/bson/bug0341.phpt
@@ -1,49 +1,49 @@
--TEST--
PHPC-341: fromJSON() leaks when JSON contains array or object fields
--FILE--
<?php
require_once __DIR__ . '/../utils/tools.php';
$tests = array(
'{ "foo": "yes", "bar" : false }',
'{ "foo": "no", "array" : [ 5, 6 ] }',
- '{ "foo": "no", "obj" : { "embedded" : 3.14 } }',
+ '{ "foo": "no", "obj" : { "embedded" : 4.125 } }',
);
foreach ($tests as $test) {
$bson = fromJSON($test);
var_dump(toPHP($bson));
}
?>
===DONE===
<?php exit(0); ?>
--EXPECTF--
object(stdClass)#%d (2) {
["foo"]=>
string(3) "yes"
["bar"]=>
bool(false)
}
object(stdClass)#%d (2) {
["foo"]=>
string(2) "no"
["array"]=>
array(2) {
[0]=>
int(5)
[1]=>
int(6)
}
}
object(stdClass)#%d (2) {
["foo"]=>
string(2) "no"
["obj"]=>
object(stdClass)#%d (1) {
["embedded"]=>
- float(3.14)
+ float(4.125)
}
}
===DONE===
diff --git a/mongodb-1.3.4/tests/bson/bug0347.phpt b/mongodb-1.4.2/tests/bson/bug0347.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bug0347.phpt
rename to mongodb-1.4.2/tests/bson/bug0347.phpt
diff --git a/mongodb-1.3.4/tests/bson/bug0528.phpt b/mongodb-1.4.2/tests/bson/bug0528.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bug0528.phpt
rename to mongodb-1.4.2/tests/bson/bug0528.phpt
diff --git a/mongodb-1.3.4/tests/bson/bug0531.phpt b/mongodb-1.4.2/tests/bson/bug0531.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bug0531.phpt
rename to mongodb-1.4.2/tests/bson/bug0531.phpt
diff --git a/mongodb-1.3.4/tests/bson/bug0544.phpt b/mongodb-1.4.2/tests/bson/bug0544.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bug0544.phpt
rename to mongodb-1.4.2/tests/bson/bug0544.phpt
diff --git a/mongodb-1.3.4/tests/bson/bug0592.phpt b/mongodb-1.4.2/tests/bson/bug0592.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bug0592.phpt
rename to mongodb-1.4.2/tests/bson/bug0592.phpt
diff --git a/mongodb-1.3.4/tests/bson/bug0623.phpt b/mongodb-1.4.2/tests/bson/bug0623.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bug0623.phpt
rename to mongodb-1.4.2/tests/bson/bug0623.phpt
diff --git a/mongodb-1.3.4/tests/bson/bug0631.phpt b/mongodb-1.4.2/tests/bson/bug0631.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bug0631.phpt
rename to mongodb-1.4.2/tests/bson/bug0631.phpt
diff --git a/mongodb-1.3.4/tests/bson/bug0672.phpt b/mongodb-1.4.2/tests/bson/bug0672.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bug0672.phpt
rename to mongodb-1.4.2/tests/bson/bug0672.phpt
diff --git a/mongodb-1.3.4/tests/bson/bug0894-001.phpt b/mongodb-1.4.2/tests/bson/bug0894-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bug0894-001.phpt
rename to mongodb-1.4.2/tests/bson/bug0894-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bug0923-001.phpt b/mongodb-1.4.2/tests/bson/bug0923-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bug0923-001.phpt
rename to mongodb-1.4.2/tests/bson/bug0923-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bug0923-002.phpt b/mongodb-1.4.2/tests/bson/bug0923-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bug0923-002.phpt
rename to mongodb-1.4.2/tests/bson/bug0923-002.phpt
diff --git a/mongodb-1.3.4/tests/bson/bug0939-001.phpt b/mongodb-1.4.2/tests/bson/bug0939-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bug0939-001.phpt
rename to mongodb-1.4.2/tests/bson/bug0939-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bug0974-001.phpt b/mongodb-1.4.2/tests/bson/bug0974-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bug0974-001.phpt
rename to mongodb-1.4.2/tests/bson/bug0974-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bug1006-001.phpt b/mongodb-1.4.2/tests/bson/bug1006-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bug1006-001.phpt
rename to mongodb-1.4.2/tests/bson/bug1006-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/bug1006-002.phpt b/mongodb-1.4.2/tests/bson/bug1006-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bug1006-002.phpt
rename to mongodb-1.4.2/tests/bson/bug1006-002.phpt
diff --git a/mongodb-1.3.4/tests/bson/bug1053.phpt b/mongodb-1.4.2/tests/bson/bug1053.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/bug1053.phpt
rename to mongodb-1.4.2/tests/bson/bug1053.phpt
diff --git a/mongodb-1.4.2/tests/bson/bug1067.phpt b/mongodb-1.4.2/tests/bson/bug1067.phpt
new file mode 100644
index 00000000..24dfd619
--- /dev/null
+++ b/mongodb-1.4.2/tests/bson/bug1067.phpt
@@ -0,0 +1,16 @@
+--TEST--
+PHPC-1067: BSON document produces driver segfault with insert
+--FILE--
+<?php
+$x = [
+ '_______' => new MongoDB\BSON\ObjectID('111111111111111111111111'),
+ '___________________________________' => new MongoDB\BSON\Regex('_______________________________________________________', 'i'),
+];
+
+$bulk = new MongoDB\Driver\BulkWrite;
+$bulk->insert($x);
+?>
+==DONE==
+<?php exit(0); ?>
+--EXPECT--
+==DONE==
diff --git a/mongodb-1.3.4/tests/bson/typemap-001.phpt b/mongodb-1.4.2/tests/bson/typemap-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/typemap-001.phpt
rename to mongodb-1.4.2/tests/bson/typemap-001.phpt
diff --git a/mongodb-1.3.4/tests/bson/typemap-002.phpt b/mongodb-1.4.2/tests/bson/typemap-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bson/typemap-002.phpt
rename to mongodb-1.4.2/tests/bson/typemap-002.phpt
diff --git a/mongodb-1.3.4/tests/bulk/bug0667.phpt b/mongodb-1.4.2/tests/bulk/bug0667.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bulk/bug0667.phpt
rename to mongodb-1.4.2/tests/bulk/bug0667.phpt
diff --git a/mongodb-1.3.4/tests/bulk/bulkwrite-count-001.phpt b/mongodb-1.4.2/tests/bulk/bulkwrite-count-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bulk/bulkwrite-count-001.phpt
rename to mongodb-1.4.2/tests/bulk/bulkwrite-count-001.phpt
diff --git a/mongodb-1.3.4/tests/bulk/bulkwrite-countable-001.phpt b/mongodb-1.4.2/tests/bulk/bulkwrite-countable-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bulk/bulkwrite-countable-001.phpt
rename to mongodb-1.4.2/tests/bulk/bulkwrite-countable-001.phpt
diff --git a/mongodb-1.3.4/tests/bulk/bulkwrite-debug-001.phpt b/mongodb-1.4.2/tests/bulk/bulkwrite-debug-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bulk/bulkwrite-debug-001.phpt
rename to mongodb-1.4.2/tests/bulk/bulkwrite-debug-001.phpt
diff --git a/mongodb-1.3.4/tests/bulk/bulkwrite-delete-001.phpt b/mongodb-1.4.2/tests/bulk/bulkwrite-delete-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bulk/bulkwrite-delete-001.phpt
rename to mongodb-1.4.2/tests/bulk/bulkwrite-delete-001.phpt
diff --git a/mongodb-1.3.4/tests/bulk/bulkwrite-delete_error-001.phpt b/mongodb-1.4.2/tests/bulk/bulkwrite-delete_error-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bulk/bulkwrite-delete_error-001.phpt
rename to mongodb-1.4.2/tests/bulk/bulkwrite-delete_error-001.phpt
diff --git a/mongodb-1.3.4/tests/bulk/bulkwrite-delete_error-002.phpt b/mongodb-1.4.2/tests/bulk/bulkwrite-delete_error-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bulk/bulkwrite-delete_error-002.phpt
rename to mongodb-1.4.2/tests/bulk/bulkwrite-delete_error-002.phpt
diff --git a/mongodb-1.3.4/tests/bulk/bulkwrite-delete_error-003.phpt b/mongodb-1.4.2/tests/bulk/bulkwrite-delete_error-003.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bulk/bulkwrite-delete_error-003.phpt
rename to mongodb-1.4.2/tests/bulk/bulkwrite-delete_error-003.phpt
diff --git a/mongodb-1.3.4/tests/bulk/bulkwrite-insert-001.phpt b/mongodb-1.4.2/tests/bulk/bulkwrite-insert-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bulk/bulkwrite-insert-001.phpt
rename to mongodb-1.4.2/tests/bulk/bulkwrite-insert-001.phpt
diff --git a/mongodb-1.3.4/tests/bulk/bulkwrite-insert-004.phpt b/mongodb-1.4.2/tests/bulk/bulkwrite-insert-004.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bulk/bulkwrite-insert-004.phpt
rename to mongodb-1.4.2/tests/bulk/bulkwrite-insert-004.phpt
diff --git a/mongodb-1.3.4/tests/bulk/bulkwrite-insert_error-001.phpt b/mongodb-1.4.2/tests/bulk/bulkwrite-insert_error-001.phpt
similarity index 80%
rename from mongodb-1.3.4/tests/bulk/bulkwrite-insert_error-001.phpt
rename to mongodb-1.4.2/tests/bulk/bulkwrite-insert_error-001.phpt
index efb6ff61..a98ca226 100644
--- a/mongodb-1.3.4/tests/bulk/bulkwrite-insert_error-001.phpt
+++ b/mongodb-1.4.2/tests/bulk/bulkwrite-insert_error-001.phpt
@@ -1,41 +1,41 @@
--TEST--
MongoDB\Driver\BulkWrite::insert() with invalid insert document
--FILE--
<?php
require_once __DIR__ . '/../utils/tools.php';
$bulk = new MongoDB\Driver\BulkWrite;
echo throws(function() use ($bulk) {
$bulk->insert(['' => 1]);
}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n\n";
echo throws(function() use ($bulk) {
$bulk->insert(['x.y' => 1]);
}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n\n";
echo throws(function() use ($bulk) {
$bulk->insert(['$x' => 1]);
}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n\n";
echo throws(function() use ($bulk) {
$bulk->insert(["\xc3\x28" => 1]);
}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
?>
===DONE===
<?php exit(0); ?>
--EXPECT--
OK: Got MongoDB\Driver\Exception\InvalidArgumentException
-document to insert contains invalid key: empty key
+invalid document for insert: empty key
OK: Got MongoDB\Driver\Exception\InvalidArgumentException
-document to insert contains invalid key: keys cannot contain ".": "x.y"
+invalid document for insert: keys cannot contain ".": "x.y"
OK: Got MongoDB\Driver\Exception\InvalidArgumentException
-document to insert contains invalid key: keys cannot begin with "$": "$x"
+invalid document for insert: keys cannot begin with "$": "$x"
OK: Got MongoDB\Driver\Exception\InvalidArgumentException
-document to insert contains invalid key: corrupt BSON
+invalid document for insert: corrupt BSON
===DONE===
diff --git a/mongodb-1.3.4/tests/bulk/bulkwrite-insert_error-002.phpt b/mongodb-1.4.2/tests/bulk/bulkwrite-insert_error-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bulk/bulkwrite-insert_error-002.phpt
rename to mongodb-1.4.2/tests/bulk/bulkwrite-insert_error-002.phpt
diff --git a/mongodb-1.3.4/tests/bulk/bulkwrite-insert_error-003.phpt b/mongodb-1.4.2/tests/bulk/bulkwrite-insert_error-003.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bulk/bulkwrite-insert_error-003.phpt
rename to mongodb-1.4.2/tests/bulk/bulkwrite-insert_error-003.phpt
diff --git a/mongodb-1.3.4/tests/bulk/bulkwrite-update-001.phpt b/mongodb-1.4.2/tests/bulk/bulkwrite-update-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bulk/bulkwrite-update-001.phpt
rename to mongodb-1.4.2/tests/bulk/bulkwrite-update-001.phpt
diff --git a/mongodb-1.4.2/tests/bulk/bulkwrite-update-arrayFilters-001.phpt b/mongodb-1.4.2/tests/bulk/bulkwrite-update-arrayFilters-001.phpt
new file mode 100644
index 00000000..c047b292
--- /dev/null
+++ b/mongodb-1.4.2/tests/bulk/bulkwrite-update-arrayFilters-001.phpt
@@ -0,0 +1,86 @@
+--TEST--
+MongoDB\Driver\BulkWrite::update with arrayFilters
+--SKIPIF--
+<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php START('THROWAWAY', ["version" => "36-release"]); CLEANUP(THROWAWAY); ?>
+--FILE--
+<?php
+require_once __DIR__ . "/../utils/basic.inc";
+
+$manager = new MongoDB\Driver\Manager(THROWAWAY);
+
+$bulk = new MongoDB\Driver\BulkWrite();
+
+$bulk->insert([ '_id' => 1, 'grades' => [ 95, 92, 90 ] ]);
+$bulk->insert([ '_id' => 2, 'grades' => [ 98, 100, 102 ] ]);
+$bulk->insert([ '_id' => 3, 'grades' => [ 95, 110, 100 ] ]);
+
+$manager->executeBulkWrite(DATABASE_NAME . '.' . COLLECTION_NAME, $bulk);
+
+$updateBulk = new MongoDB\Driver\BulkWrite();
+
+$query = new MongoDB\Driver\Query(['grades' => ['$gte' => 100]]);
+$update = [ '$set' => [ 'grades.$[element]' => 100 ] ];
+$options = [
+ 'arrayFilters' => [ [ 'element' => [ '$gte' => 100 ] ] ],
+ 'multi' => true
+];
+
+$updateBulk->update($query, $update, $options);
+$manager->executeBulkWrite(DATABASE_NAME . '.' . COLLECTION_NAME, $updateBulk);
+
+$cursor = $manager->executeQuery( DATABASE_NAME . '.' . COLLECTION_NAME, new \MongoDB\Driver\Query([]));
+var_dump($cursor->toArray());
+?>
+===DONE===
+<?php DELETE("THROWAWAY"); ?>
+<?php exit(0); ?>
+--CLEAN--
+<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php DELETE("THROWAWAY"); ?>
+--EXPECTF--
+array(%d) {
+ [0]=>
+ object(stdClass)#%d (%d) {
+ ["_id"]=>
+ int(1)
+ ["grades"]=>
+ array(%d) {
+ [0]=>
+ int(95)
+ [1]=>
+ int(92)
+ [2]=>
+ int(90)
+ }
+ }
+ [1]=>
+ object(stdClass)#%d (%d) {
+ ["_id"]=>
+ int(2)
+ ["grades"]=>
+ array(%d) {
+ [0]=>
+ int(98)
+ [1]=>
+ int(100)
+ [2]=>
+ int(100)
+ }
+ }
+ [2]=>
+ object(stdClass)#%d (%d) {
+ ["_id"]=>
+ int(3)
+ ["grades"]=>
+ array(%d) {
+ [0]=>
+ int(95)
+ [1]=>
+ int(100)
+ [2]=>
+ int(100)
+ }
+ }
+}
+===DONE===
diff --git a/mongodb-1.3.4/tests/bulk/bulkwrite-update_error-001.phpt b/mongodb-1.4.2/tests/bulk/bulkwrite-update_error-001.phpt
similarity index 80%
rename from mongodb-1.3.4/tests/bulk/bulkwrite-update_error-001.phpt
rename to mongodb-1.4.2/tests/bulk/bulkwrite-update_error-001.phpt
index 8e0e99ae..278bb507 100644
--- a/mongodb-1.3.4/tests/bulk/bulkwrite-update_error-001.phpt
+++ b/mongodb-1.4.2/tests/bulk/bulkwrite-update_error-001.phpt
@@ -1,41 +1,41 @@
--TEST--
MongoDB\Driver\BulkWrite::update() with invalid replacement document
--FILE--
<?php
require_once __DIR__ . '/../utils/tools.php';
$bulk = new MongoDB\Driver\BulkWrite;
echo throws(function() use ($bulk) {
$bulk->update(['x' => 1], ['' => 1]);
}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n\n";
echo throws(function() use ($bulk) {
$bulk->update(['x' => 1], ['x.y' => 1]);
}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n\n";
echo throws(function() use ($bulk) {
$bulk->update(['x' => 1], ['y' => ['$x' => 1]]);
}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n\n";
echo throws(function() use ($bulk) {
$bulk->update(['x' => 1], ["\xc3\x28" => 1]);
}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
?>
===DONE===
<?php exit(0); ?>
--EXPECT--
OK: Got MongoDB\Driver\Exception\InvalidArgumentException
-replacement document contains invalid key: empty key
+invalid argument for replace: empty key
OK: Got MongoDB\Driver\Exception\InvalidArgumentException
-replacement document contains invalid key: keys cannot contain ".": "x.y"
+invalid argument for replace: keys cannot contain ".": "x.y"
OK: Got MongoDB\Driver\Exception\InvalidArgumentException
-replacement document contains invalid key: keys cannot begin with "$": "$x"
+invalid argument for replace: keys cannot begin with "$": "$x"
OK: Got MongoDB\Driver\Exception\InvalidArgumentException
-replacement document contains invalid key: corrupt BSON
+invalid argument for replace: corrupt BSON
===DONE===
\ No newline at end of file
diff --git a/mongodb-1.3.4/tests/bulk/bulkwrite-update_error-002.phpt b/mongodb-1.4.2/tests/bulk/bulkwrite-update_error-002.phpt
similarity index 92%
rename from mongodb-1.3.4/tests/bulk/bulkwrite-update_error-002.phpt
rename to mongodb-1.4.2/tests/bulk/bulkwrite-update_error-002.phpt
index 6c949d83..58373ef7 100644
--- a/mongodb-1.3.4/tests/bulk/bulkwrite-update_error-002.phpt
+++ b/mongodb-1.4.2/tests/bulk/bulkwrite-update_error-002.phpt
@@ -1,38 +1,38 @@
--TEST--
MongoDB\Driver\BulkWrite::update() with invalid update document
--FILE--
<?php
require_once __DIR__ . '/../utils/tools.php';
$bulk = new MongoDB\Driver\BulkWrite;
echo throws(function() use ($bulk) {
$bulk->update(['x' => 1], ['$set' => ['x' => ['' => 1]]]);
}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n\n";
echo throws(function() use ($bulk) {
$bulk->update(['x' => 1], ['$set' => ['x' => ["\xc3\x28" => 1]]]);
}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n\n";
/* This newObj argument mixes an update and replacement document, but
* php_phongo_bulkwrite_update_has_operators() will categorize it as an update
* due to the presence of an atomic operator. As such, _mongoc_validate_update()
* will report the error. */
echo throws(function() use ($bulk) {
$bulk->update(['x' => 1], ['$set' => ['y' => 1], 'z' => 1]);
}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
?>
===DONE===
<?php exit(0); ?>
--EXPECT--
OK: Got MongoDB\Driver\Exception\InvalidArgumentException
-update document contains invalid key: empty key
+invalid argument for update: empty key
OK: Got MongoDB\Driver\Exception\InvalidArgumentException
-update document contains invalid key: corrupt BSON
+invalid argument for update: corrupt BSON
OK: Got MongoDB\Driver\Exception\InvalidArgumentException
Invalid key 'z': update only works with $ operators
===DONE===
\ No newline at end of file
diff --git a/mongodb-1.3.4/tests/bulk/bulkwrite-update_error-003.phpt b/mongodb-1.4.2/tests/bulk/bulkwrite-update_error-003.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bulk/bulkwrite-update_error-003.phpt
rename to mongodb-1.4.2/tests/bulk/bulkwrite-update_error-003.phpt
diff --git a/mongodb-1.3.4/tests/bulk/bulkwrite-update_error-004.phpt b/mongodb-1.4.2/tests/bulk/bulkwrite-update_error-004.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bulk/bulkwrite-update_error-004.phpt
rename to mongodb-1.4.2/tests/bulk/bulkwrite-update_error-004.phpt
diff --git a/mongodb-1.3.4/tests/bulk/bulkwrite-update_error-005.phpt b/mongodb-1.4.2/tests/bulk/bulkwrite-update_error-005.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bulk/bulkwrite-update_error-005.phpt
rename to mongodb-1.4.2/tests/bulk/bulkwrite-update_error-005.phpt
diff --git a/mongodb-1.3.4/tests/bulk/bulkwrite_error-001.phpt b/mongodb-1.4.2/tests/bulk/bulkwrite_error-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bulk/bulkwrite_error-001.phpt
rename to mongodb-1.4.2/tests/bulk/bulkwrite_error-001.phpt
diff --git a/mongodb-1.3.4/tests/bulk/bulkwrite_error-002.phpt b/mongodb-1.4.2/tests/bulk/bulkwrite_error-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bulk/bulkwrite_error-002.phpt
rename to mongodb-1.4.2/tests/bulk/bulkwrite_error-002.phpt
diff --git a/mongodb-1.3.4/tests/bulk/write-0001.phpt b/mongodb-1.4.2/tests/bulk/write-0001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bulk/write-0001.phpt
rename to mongodb-1.4.2/tests/bulk/write-0001.phpt
diff --git a/mongodb-1.3.4/tests/bulk/write-0002.phpt b/mongodb-1.4.2/tests/bulk/write-0002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/bulk/write-0002.phpt
rename to mongodb-1.4.2/tests/bulk/write-0002.phpt
diff --git a/mongodb-1.4.2/tests/causal-consistency/causal-consistency-001.phpt b/mongodb-1.4.2/tests/causal-consistency/causal-consistency-001.phpt
new file mode 100644
index 00000000..4bdd706f
--- /dev/null
+++ b/mongodb-1.4.2/tests/causal-consistency/causal-consistency-001.phpt
@@ -0,0 +1,23 @@
+--TEST--
+Causal consistency: new session has no operation time
+--SKIPIF--
+<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS_CRYPTO(); ?>
+<?php NEEDS('REPLICASET'); ?>
+--FILE--
+<?php
+require_once __DIR__ . "/../utils/basic.inc";
+
+$manager = new MongoDB\Driver\Manager(REPLICASET);
+$session = $manager->startSession();
+
+echo "Initial operation time:\n";
+var_dump($session->getOperationTime());
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECT--
+Initial operation time:
+NULL
+===DONE===
diff --git a/mongodb-1.4.2/tests/causal-consistency/causal-consistency-002.phpt b/mongodb-1.4.2/tests/causal-consistency/causal-consistency-002.phpt
new file mode 100644
index 00000000..16c13d15
--- /dev/null
+++ b/mongodb-1.4.2/tests/causal-consistency/causal-consistency-002.phpt
@@ -0,0 +1,32 @@
+--TEST--
+Causal consistency: first read in session does not include afterClusterTime
+--SKIPIF--
+<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS_CRYPTO(); ?>
+<?php NEEDS('REPLICASET'); ?>
+--FILE--
+<?php
+require_once __DIR__ . "/../utils/basic.inc";
+require_once __DIR__ . "/../utils/observer.php";
+
+(new CommandObserver)->observe(
+ function() {
+ $manager = new MongoDB\Driver\Manager(REPLICASET);
+ $session = $manager->startSession();
+
+ $query = new MongoDB\Driver\Query([]);
+ $manager->executeQuery(NS, $query, ['session' => $session]);
+ },
+ function(stdClass $command)
+ {
+ $hasAfterClusterTime = isset($command->readConcern->afterClusterTime);
+ printf("Read includes afterClusterTime: %s\n", ($hasAfterClusterTime ? 'yes' : 'no'));
+ }
+);
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECT--
+Read includes afterClusterTime: no
+===DONE===
diff --git a/mongodb-1.4.2/tests/causal-consistency/causal-consistency-003.phpt b/mongodb-1.4.2/tests/causal-consistency/causal-consistency-003.phpt
new file mode 100644
index 00000000..8da30278
--- /dev/null
+++ b/mongodb-1.4.2/tests/causal-consistency/causal-consistency-003.phpt
@@ -0,0 +1,112 @@
+--TEST--
+Causal consistency: first read or write in session updates operationTime
+--SKIPIF--
+<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS_CRYPTO(); ?>
+<?php NEEDS('REPLICASET'); CLEANUP(REPLICASET); ?>
+--FILE--
+<?php
+require_once __DIR__ . "/../utils/basic.inc";
+
+class Test implements MongoDB\Driver\Monitoring\CommandSubscriber
+{
+ private $lastSeenOperationTime;
+
+ public function executeBulkWrite()
+ {
+ $this->lastSeenOperationTime = null;
+
+ MongoDB\Driver\Monitoring\addSubscriber($this);
+
+ $manager = new MongoDB\Driver\Manager(REPLICASET);
+ $session = $manager->startSession();
+
+ $bulk = new MongoDB\Driver\BulkWrite;
+ $bulk->insert(['x' => 1]);
+ $manager->executeBulkWrite(NS, $bulk, ['session' => $session]);
+
+ printf("Session reports last seen operationTime: %s\n", ($session->getOperationTime() == $this->lastSeenOperationTime) ? 'yes' : 'no');
+
+ MongoDB\Driver\Monitoring\removeSubscriber($this);
+ }
+
+ public function executeCommand()
+ {
+ $this->lastSeenOperationTime = null;
+
+ MongoDB\Driver\Monitoring\addSubscriber($this);
+
+ $manager = new MongoDB\Driver\Manager(REPLICASET);
+ $session = $manager->startSession();
+
+ $command = new MongoDB\Driver\Command(['ping' => 1]);
+ $manager->executeCommand(DATABASE_NAME, $command, ['session' => $session]);
+
+ printf("Session reports last seen operationTime: %s\n", ($session->getOperationTime() == $this->lastSeenOperationTime) ? 'yes' : 'no');
+
+ MongoDB\Driver\Monitoring\removeSubscriber($this);
+ }
+
+ public function executeQuery()
+ {
+ $this->lastSeenOperationTime = null;
+
+ MongoDB\Driver\Monitoring\addSubscriber($this);
+
+ $manager = new MongoDB\Driver\Manager(REPLICASET);
+ $session = $manager->startSession();
+
+ $query = new MongoDB\Driver\Query([]);
+ $manager->executeQuery(NS, $query, ['session' => $session]);
+
+ printf("Session reports last seen operationTime: %s\n", ($session->getOperationTime() == $this->lastSeenOperationTime) ? 'yes' : 'no');
+
+ MongoDB\Driver\Monitoring\removeSubscriber($this);
+ }
+
+ public function commandStarted(MongoDB\Driver\Monitoring\CommandStartedEvent $event)
+ {
+ }
+
+ public function commandSucceeded(MongoDB\Driver\Monitoring\CommandSucceededEvent $event)
+ {
+ $reply = $event->getReply();
+ $hasOperationTime = isset($reply->{'operationTime'});
+
+ printf("%s command reply includes operationTime: %s\n", $event->getCommandName(), $hasOperationTime ? 'yes' : 'no');
+
+ if ($hasOperationTime) {
+ $this->lastSeenOperationTime = $reply->operationTime;
+ }
+ }
+
+ public function commandFailed(MongoDB\Driver\Monitoring\CommandFailedEvent $event)
+ {
+ }
+}
+
+echo "Testing executeBulkWrite()\n";
+(new Test)->executeBulkWrite();
+
+echo "\nTesting executeCommand()\n";
+(new Test)->executeCommand();
+
+echo "\nTesting executeQuery()\n";
+(new Test)->executeQuery();
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECT--
+Testing executeBulkWrite()
+insert command reply includes operationTime: yes
+Session reports last seen operationTime: yes
+
+Testing executeCommand()
+ping command reply includes operationTime: yes
+Session reports last seen operationTime: yes
+
+Testing executeQuery()
+find command reply includes operationTime: yes
+Session reports last seen operationTime: yes
+===DONE===
diff --git a/mongodb-1.4.2/tests/causal-consistency/causal-consistency-004.phpt b/mongodb-1.4.2/tests/causal-consistency/causal-consistency-004.phpt
new file mode 100644
index 00000000..6212f497
--- /dev/null
+++ b/mongodb-1.4.2/tests/causal-consistency/causal-consistency-004.phpt
@@ -0,0 +1,135 @@
+--TEST--
+Causal consistency: first read or write in session updates operationTime (even on error)
+--SKIPIF--
+<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS_CRYPTO(); ?>
+<?php NEEDS('REPLICASET'); CLEANUP(REPLICASET); ?>
+--FILE--
+<?php
+require_once __DIR__ . "/../utils/basic.inc";
+
+class Test implements MongoDB\Driver\Monitoring\CommandSubscriber
+{
+ private $lastSeenOperationTime;
+
+ public function executeBulkWrite()
+ {
+ $this->lastSeenOperationTime = null;
+
+ MongoDB\Driver\Monitoring\addSubscriber($this);
+
+ $manager = new MongoDB\Driver\Manager(REPLICASET);
+ $session = $manager->startSession();
+
+ $bulk = new MongoDB\Driver\BulkWrite;
+ $bulk->insert(['_id' => 1]);
+ $bulk->insert(['_id' => 1]);
+
+ throws(function() use ($manager, $bulk, $session) {
+ $manager->executeBulkWrite(NS, $bulk, ['session' => $session]);
+ }, 'MongoDB\Driver\Exception\BulkWriteException');
+
+ printf("Session reports last seen operationTime: %s\n", ($session->getOperationTime() == $this->lastSeenOperationTime) ? 'yes' : 'no');
+
+ MongoDB\Driver\Monitoring\removeSubscriber($this);
+ }
+
+ public function executeCommand()
+ {
+ $this->lastSeenOperationTime = null;
+
+ MongoDB\Driver\Monitoring\addSubscriber($this);
+
+ $manager = new MongoDB\Driver\Manager(REPLICASET);
+ $session = $manager->startSession();
+
+ $command = new MongoDB\Driver\Command([
+ 'aggregate' => COLLECTION_NAME,
+ 'pipeline' => [
+ ['$unsupportedOperator' => 1],
+ ],
+ 'cursor' => new stdClass,
+ ]);
+
+ throws(function() use ($manager, $command, $session) {
+ $manager->executeCommand(DATABASE_NAME, $command, ['session' => $session]);
+ }, 'MongoDB\Driver\Exception\RuntimeException');
+
+ /* We cannot access the server reply if an exception is thrown for a
+ * failed command (see: PHPC-1076). For the time being, just assert that
+ * the operationTime is not null. */
+ printf("Session has non-null operationTime: %s\n", ($session->getOperationTime() !== null ? 'yes' : 'no'));
+
+ MongoDB\Driver\Monitoring\removeSubscriber($this);
+ }
+
+ public function executeQuery()
+ {
+ $this->lastSeenOperationTime = null;
+
+ MongoDB\Driver\Monitoring\addSubscriber($this);
+
+ $manager = new MongoDB\Driver\Manager(REPLICASET);
+ $session = $manager->startSession();
+
+ $query = new MongoDB\Driver\Query(['$unsupportedOperator' => 1]);
+
+ throws(function() use ($manager, $query, $session) {
+ $manager->executeQuery(NS, $query, ['session' => $session]);
+ }, 'MongoDB\Driver\Exception\RuntimeException');
+
+ /* We cannot access the server reply if an exception is thrown for a
+ * failed command (see: PHPC-1076). For the time being, just assert that
+ * the operationTime is not null. */
+ printf("Session has non-null operationTime: %s\n", ($session->getOperationTime() !== null ? 'yes' : 'no'));
+
+ MongoDB\Driver\Monitoring\removeSubscriber($this);
+ }
+
+ public function commandStarted(MongoDB\Driver\Monitoring\CommandStartedEvent $event)
+ {
+ }
+
+ public function commandSucceeded(MongoDB\Driver\Monitoring\CommandSucceededEvent $event)
+ {
+ $reply = $event->getReply();
+ $hasOperationTime = isset($reply->operationTime);
+
+ printf("%s command reply includes operationTime: %s\n", $event->getCommandName(), $hasOperationTime ? 'yes' : 'no');
+
+ if ($hasOperationTime) {
+ $this->lastSeenOperationTime = $reply->operationTime;
+ }
+ }
+
+ public function commandFailed(MongoDB\Driver\Monitoring\CommandFailedEvent $event)
+ {
+ }
+}
+
+echo "Testing executeBulkWrite()\n";
+(new Test)->executeBulkWrite();
+
+echo "\nTesting executeCommand()\n";
+(new Test)->executeCommand();
+
+echo "\nTesting executeQuery()\n";
+(new Test)->executeQuery();
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECT--
+Testing executeBulkWrite()
+insert command reply includes operationTime: yes
+OK: Got MongoDB\Driver\Exception\BulkWriteException
+Session reports last seen operationTime: yes
+
+Testing executeCommand()
+OK: Got MongoDB\Driver\Exception\RuntimeException
+Session has non-null operationTime: yes
+
+Testing executeQuery()
+OK: Got MongoDB\Driver\Exception\RuntimeException
+Session has non-null operationTime: yes
+===DONE===
diff --git a/mongodb-1.4.2/tests/causal-consistency/causal-consistency-005.phpt b/mongodb-1.4.2/tests/causal-consistency/causal-consistency-005.phpt
new file mode 100644
index 00000000..451df4f0
--- /dev/null
+++ b/mongodb-1.4.2/tests/causal-consistency/causal-consistency-005.phpt
@@ -0,0 +1,101 @@
+--TEST--
+Causal consistency: second read's afterClusterTime uses last reply's operationTime
+--SKIPIF--
+<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS_CRYPTO(); ?>
+<?php NEEDS('REPLICASET'); CLEANUP(REPLICASET); ?>
+--FILE--
+<?php
+require_once __DIR__ . "/../utils/basic.inc";
+
+class Test implements MongoDB\Driver\Monitoring\CommandSubscriber
+{
+ private $lastSeenOperationTime;
+
+ public function executeReadAfterRead()
+ {
+ $this->lastSeenOperationTime = null;
+
+ MongoDB\Driver\Monitoring\addSubscriber($this);
+
+ $manager = new MongoDB\Driver\Manager(REPLICASET);
+ $session = $manager->startSession();
+
+ $query = new MongoDB\Driver\Query([]);
+ $manager->executeQuery(NS, $query, ['session' => $session]);
+ $manager->executeQuery(NS, $query, ['session' => $session]);
+
+ MongoDB\Driver\Monitoring\removeSubscriber($this);
+ }
+
+ public function executeReadAfterWrite()
+ {
+ $this->lastSeenOperationTime = null;
+
+ MongoDB\Driver\Monitoring\addSubscriber($this);
+
+ $manager = new MongoDB\Driver\Manager(REPLICASET);
+ $session = $manager->startSession();
+
+ $bulk = new MongoDB\Driver\BulkWrite;
+ $bulk->insert(['x' => 1]);
+ $manager->executeBulkWrite(NS, $bulk, ['session' => $session]);
+
+ $query = new MongoDB\Driver\Query([]);
+ $manager->executeQuery(NS, $query, ['session' => $session]);
+
+ MongoDB\Driver\Monitoring\removeSubscriber($this);
+ }
+
+ public function commandStarted(MongoDB\Driver\Monitoring\CommandStartedEvent $event)
+ {
+ $command = $event->getCommand();
+ $hasAfterClusterTime = isset($command->readConcern->afterClusterTime);
+ printf("%s command includes afterClusterTime: %s\n", $event->getCommandName(), ($hasAfterClusterTime ? 'yes' : 'no'));
+
+ if ($hasAfterClusterTime && $this->lastSeenOperationTime !== null) {
+ printf("%s command uses last seen operationTime: %s\n", $event->getCommandName(), ($command->readConcern->afterClusterTime == $this->lastSeenOperationTime) ? 'yes' : 'no');
+ }
+ }
+
+ public function commandSucceeded(MongoDB\Driver\Monitoring\CommandSucceededEvent $event)
+ {
+ $reply = $event->getReply();
+ $hasOperationTime = isset($reply->operationTime);
+
+ printf("%s command reply includes operationTime: %s\n", $event->getCommandName(), $hasOperationTime ? 'yes' : 'no');
+
+ if ($hasOperationTime) {
+ $this->lastSeenOperationTime = $reply->operationTime;
+ }
+ }
+
+ public function commandFailed(MongoDB\Driver\Monitoring\CommandFailedEvent $event)
+ {
+ }
+}
+
+echo "Testing read after read\n";
+(new Test)->executeReadAfterRead();
+
+echo "\nTesting read after write\n";
+(new Test)->executeReadAfterWrite();
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECT--
+Testing read after read
+find command includes afterClusterTime: no
+find command reply includes operationTime: yes
+find command includes afterClusterTime: yes
+find command uses last seen operationTime: yes
+find command reply includes operationTime: yes
+
+Testing read after write
+insert command includes afterClusterTime: no
+insert command reply includes operationTime: yes
+find command includes afterClusterTime: yes
+find command uses last seen operationTime: yes
+find command reply includes operationTime: yes
+===DONE===
diff --git a/mongodb-1.4.2/tests/causal-consistency/causal-consistency-006.phpt b/mongodb-1.4.2/tests/causal-consistency/causal-consistency-006.phpt
new file mode 100644
index 00000000..d5524ee8
--- /dev/null
+++ b/mongodb-1.4.2/tests/causal-consistency/causal-consistency-006.phpt
@@ -0,0 +1,116 @@
+--TEST--
+Causal consistency: second read's afterClusterTime uses last reply's operationTime (even on error)
+--SKIPIF--
+<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS_CRYPTO(); ?>
+<?php NEEDS('REPLICASET'); CLEANUP(REPLICASET); ?>
+--FILE--
+<?php
+require_once __DIR__ . "/../utils/basic.inc";
+
+class Test implements MongoDB\Driver\Monitoring\CommandSubscriber
+{
+ private $lastSeenOperationTime;
+
+ public function executeReadAfterReadError()
+ {
+ $this->lastSeenOperationTime = null;
+
+ MongoDB\Driver\Monitoring\addSubscriber($this);
+
+ $manager = new MongoDB\Driver\Manager(REPLICASET);
+ $session = $manager->startSession();
+
+ $query = new MongoDB\Driver\Query(['$unsupportedOperator' => 1]);
+
+ throws(function() use ($manager, $query, $session) {
+ $manager->executeQuery(NS, $query, ['session' => $session]);
+ }, 'MongoDB\Driver\Exception\RuntimeException');
+
+ /* We cannot access the server reply if an exception is thrown for a
+ * failed command (see: PHPC-1076). For the time being, just assert that
+ * the operationTime is not null. */
+ printf("Session has non-null operationTime: %s\n", ($session->getOperationTime() !== null ? 'yes' : 'no'));
+
+ $query = new MongoDB\Driver\Query([]);
+ $manager->executeQuery(NS, $query, ['session' => $session]);
+
+ MongoDB\Driver\Monitoring\removeSubscriber($this);
+ }
+
+ public function executeReadAfterWriteError()
+ {
+ $this->lastSeenOperationTime = null;
+
+ MongoDB\Driver\Monitoring\addSubscriber($this);
+
+ $manager = new MongoDB\Driver\Manager(REPLICASET);
+ $session = $manager->startSession();
+
+ $bulk = new MongoDB\Driver\BulkWrite;
+ $bulk->insert(['_id' => 1]);
+ $bulk->insert(['_id' => 1]);
+
+ throws(function() use ($manager, $bulk, $session) {
+ $manager->executeBulkWrite(NS, $bulk, ['session' => $session]);
+ }, 'MongoDB\Driver\Exception\BulkWriteException');
+
+ $query = new MongoDB\Driver\Query([]);
+ $manager->executeQuery(NS, $query, ['session' => $session]);
+
+ MongoDB\Driver\Monitoring\removeSubscriber($this);
+ }
+
+ public function commandStarted(MongoDB\Driver\Monitoring\CommandStartedEvent $event)
+ {
+ $command = $event->getCommand();
+ $hasAfterClusterTime = isset($command->readConcern->afterClusterTime);
+ printf("%s command includes afterClusterTime: %s\n", $event->getCommandName(), ($hasAfterClusterTime ? 'yes' : 'no'));
+
+ if ($hasAfterClusterTime && $this->lastSeenOperationTime !== null) {
+ printf("%s command uses last seen operationTime: %s\n", $event->getCommandName(), ($command->readConcern->afterClusterTime == $this->lastSeenOperationTime) ? 'yes' : 'no');
+ }
+ }
+
+ public function commandSucceeded(MongoDB\Driver\Monitoring\CommandSucceededEvent $event)
+ {
+ $reply = $event->getReply();
+ $hasOperationTime = isset($reply->operationTime);
+
+ printf("%s command reply includes operationTime: %s\n", $event->getCommandName(), $hasOperationTime ? 'yes' : 'no');
+
+ if ($hasOperationTime) {
+ $this->lastSeenOperationTime = $reply->operationTime;
+ }
+ }
+
+ public function commandFailed(MongoDB\Driver\Monitoring\CommandFailedEvent $event)
+ {
+ }
+}
+
+echo "\nTesting read after read error\n";
+(new Test)->executeReadAfterReadError();
+
+echo "\nTesting read after write error\n";
+(new Test)->executeReadAfterWriteError();
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECT--
+Testing read after read error
+find command includes afterClusterTime: no
+OK: Got MongoDB\Driver\Exception\RuntimeException
+Session has non-null operationTime: yes
+find command includes afterClusterTime: yes
+find command reply includes operationTime: yes
+
+Testing read after write error
+insert command includes afterClusterTime: no
+insert command reply includes operationTime: yes
+OK: Got MongoDB\Driver\Exception\BulkWriteException
+find command includes afterClusterTime: yes
+find command uses last seen operationTime: yes
+find command reply includes operationTime: yes
+===DONE===
diff --git a/mongodb-1.4.2/tests/causal-consistency/causal-consistency-007.phpt b/mongodb-1.4.2/tests/causal-consistency/causal-consistency-007.phpt
new file mode 100644
index 00000000..d222b24a
--- /dev/null
+++ b/mongodb-1.4.2/tests/causal-consistency/causal-consistency-007.phpt
@@ -0,0 +1,34 @@
+--TEST--
+Causal consistency: reads in non-causally consistent session never include afterClusterTime
+--SKIPIF--
+<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS_CRYPTO(); ?>
+<?php NEEDS('REPLICASET'); ?>
+--FILE--
+<?php
+require_once __DIR__ . "/../utils/basic.inc";
+require_once __DIR__ . "/../utils/observer.php";
+
+(new CommandObserver)->observe(
+ function() {
+ $manager = new MongoDB\Driver\Manager(REPLICASET);
+ $session = $manager->startSession(['causalConsistency' => false]);
+
+ $query = new MongoDB\Driver\Query([]);
+ $manager->executeQuery(NS, $query, ['session' => $session]);
+ $manager->executeQuery(NS, $query, ['session' => $session]);
+ },
+ function(stdClass $command)
+ {
+ $hasAfterClusterTime = isset($command->readConcern->afterClusterTime);
+ printf("Read includes afterClusterTime: %s\n", ($hasAfterClusterTime ? 'yes' : 'no'));
+ }
+);
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECT--
+Read includes afterClusterTime: no
+Read includes afterClusterTime: no
+===DONE===
diff --git a/mongodb-1.4.2/tests/causal-consistency/causal-consistency-008.phpt b/mongodb-1.4.2/tests/causal-consistency/causal-consistency-008.phpt
new file mode 100644
index 00000000..50e2ae15
--- /dev/null
+++ b/mongodb-1.4.2/tests/causal-consistency/causal-consistency-008.phpt
@@ -0,0 +1,39 @@
+--TEST--
+Causal consistency: default read concern includes afterClusterTime but not level
+--SKIPIF--
+<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS_CRYPTO(); ?>
+<?php NEEDS('REPLICASET'); ?>
+--FILE--
+<?php
+require_once __DIR__ . "/../utils/basic.inc";
+require_once __DIR__ . "/../utils/observer.php";
+
+(new CommandObserver)->observe(
+ function() {
+ $manager = new MongoDB\Driver\Manager(REPLICASET);
+ $session = $manager->startSession();
+
+ $query = new MongoDB\Driver\Query([]);
+ $manager->executeQuery(NS, $query, ['session' => $session]);
+ $manager->executeQuery(NS, $query, ['session' => $session]);
+ },
+ function(stdClass $command)
+ {
+ $hasAfterClusterTime = isset($command->readConcern->afterClusterTime);
+ printf("Read concern includes afterClusterTime: %s\n", ($hasAfterClusterTime ? 'yes' : 'no'));
+
+ $hasLevel = isset($command->readConcern->level);
+ printf("Read concern includes level: %s\n", ($hasLevel ? 'yes' : 'no'));
+ }
+);
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECT--
+Read concern includes afterClusterTime: no
+Read concern includes level: no
+Read concern includes afterClusterTime: yes
+Read concern includes level: no
+===DONE===
diff --git a/mongodb-1.4.2/tests/causal-consistency/causal-consistency-009.phpt b/mongodb-1.4.2/tests/causal-consistency/causal-consistency-009.phpt
new file mode 100644
index 00000000..7c00694d
--- /dev/null
+++ b/mongodb-1.4.2/tests/causal-consistency/causal-consistency-009.phpt
@@ -0,0 +1,40 @@
+--TEST--
+Causal consistency: custom read concern merges afterClusterTime and level
+--SKIPIF--
+<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS_CRYPTO(); ?>
+<?php NEEDS('REPLICASET'); ?>
+--FILE--
+<?php
+require_once __DIR__ . "/../utils/basic.inc";
+require_once __DIR__ . "/../utils/observer.php";
+
+(new CommandObserver)->observe(
+ function() {
+ $manager = new MongoDB\Driver\Manager(REPLICASET);
+ $session = $manager->startSession();
+
+ $readConcern = new MongoDB\Driver\ReadConcern(MongoDB\Driver\ReadConcern::MAJORITY);
+ $query = new MongoDB\Driver\Query([], ['readConcern' => $readConcern]);
+ $manager->executeQuery(NS, $query, ['session' => $session]);
+ $manager->executeQuery(NS, $query, ['session' => $session]);
+ },
+ function(stdClass $command)
+ {
+ $hasAfterClusterTime = isset($command->readConcern->afterClusterTime);
+ printf("Read concern includes afterClusterTime: %s\n", ($hasAfterClusterTime ? 'yes' : 'no'));
+
+ $hasLevel = isset($command->readConcern->level);
+ printf("Read concern includes level: %s\n", ($hasLevel ? 'yes' : 'no'));
+ }
+);
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECT--
+Read concern includes afterClusterTime: no
+Read concern includes level: yes
+Read concern includes afterClusterTime: yes
+Read concern includes level: yes
+===DONE===
diff --git a/mongodb-1.4.2/tests/causal-consistency/causal-consistency-010.phpt b/mongodb-1.4.2/tests/causal-consistency/causal-consistency-010.phpt
new file mode 100644
index 00000000..7d3407e8
--- /dev/null
+++ b/mongodb-1.4.2/tests/causal-consistency/causal-consistency-010.phpt
@@ -0,0 +1,34 @@
+--TEST--
+Causal consistency: unacknowledged write does not update operationTime
+--SKIPIF--
+<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS_CRYPTO(); ?>
+<?php NEEDS('REPLICASET'); CLEANUP(REPLICASET); ?>
+--FILE--
+<?php
+require_once __DIR__ . "/../utils/basic.inc";
+
+$manager = new MongoDB\Driver\Manager(REPLICASET);
+$session = $manager->startSession();
+
+echo "Initial operation time:\n";
+var_dump($session->getOperationTime());
+
+$bulk = new MongoDB\Driver\BulkWrite;
+$bulk->insert(['x' => 1]);
+$writeConcern = new MongoDB\Driver\WriteConcern(0);
+$manager->executeBulkWrite(NS, $bulk, ['session' => $session, 'writeConcern' => $writeConcern]);
+
+echo "\nOperation time after unacknowledged write:\n";
+var_dump($session->getOperationTime());
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECT--
+Initial operation time:
+NULL
+
+Operation time after unacknowledged write:
+NULL
+===DONE===
diff --git a/mongodb-1.4.2/tests/causal-consistency/causal-consistency-011.phpt b/mongodb-1.4.2/tests/causal-consistency/causal-consistency-011.phpt
new file mode 100644
index 00000000..4107d38e
--- /dev/null
+++ b/mongodb-1.4.2/tests/causal-consistency/causal-consistency-011.phpt
@@ -0,0 +1,34 @@
+--TEST--
+Causal consistency: $clusterTime is not sent in commands to unsupported deployments
+--SKIPIF--
+<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS_CRYPTO(); ?>
+<?php NEEDS('STANDALONE'); NEEDS_ATLEAST_MONGODB_VERSION(STANDALONE, "3.6"); ?>
+--FILE--
+<?php
+require_once __DIR__ . "/../utils/basic.inc";
+require_once __DIR__ . "/../utils/observer.php";
+
+(new CommandObserver)->observe(
+ function() {
+ $manager = new MongoDB\Driver\Manager(STANDALONE);
+ $session = $manager->startSession();
+
+ $query = new MongoDB\Driver\Query([]);
+ $manager->executeQuery(NS, $query, ['session' => $session]);
+ $manager->executeQuery(NS, $query, ['session' => $session]);
+ },
+ function(stdClass $command)
+ {
+ $hasClusterTime = isset($command->{'$clusterTime'});
+ printf("Command includes \$clusterTime: %s\n", ($hasClusterTime ? 'yes' : 'no'));
+ }
+);
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECT--
+Command includes $clusterTime: no
+Command includes $clusterTime: no
+===DONE===
diff --git a/mongodb-1.4.2/tests/causal-consistency/causal-consistency-012.phpt b/mongodb-1.4.2/tests/causal-consistency/causal-consistency-012.phpt
new file mode 100644
index 00000000..db2cb24c
--- /dev/null
+++ b/mongodb-1.4.2/tests/causal-consistency/causal-consistency-012.phpt
@@ -0,0 +1,34 @@
+--TEST--
+Causal consistency: $clusterTime is sent in commands to supported deployments
+--SKIPIF--
+<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS_CRYPTO(); ?>
+<?php NEEDS('REPLICASET'); ?>
+--FILE--
+<?php
+require_once __DIR__ . "/../utils/basic.inc";
+require_once __DIR__ . "/../utils/observer.php";
+
+(new CommandObserver)->observe(
+ function() {
+ $manager = new MongoDB\Driver\Manager(REPLICASET);
+ $session = $manager->startSession();
+
+ $query = new MongoDB\Driver\Query([]);
+ $manager->executeQuery(NS, $query, ['session' => $session]);
+ $manager->executeQuery(NS, $query, ['session' => $session]);
+ },
+ function(stdClass $command)
+ {
+ $hasClusterTime = isset($command->{'$clusterTime'});
+ printf("Command includes \$clusterTime: %s\n", ($hasClusterTime ? 'yes' : 'no'));
+ }
+);
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECT--
+Command includes $clusterTime: yes
+Command includes $clusterTime: yes
+===DONE===
diff --git a/mongodb-1.3.4/tests/command/command-ctor-001.phpt b/mongodb-1.4.2/tests/command/command-ctor-001.phpt
similarity index 97%
rename from mongodb-1.3.4/tests/command/command-ctor-001.phpt
rename to mongodb-1.4.2/tests/command/command-ctor-001.phpt
index 59078a2a..2a906572 100644
--- a/mongodb-1.3.4/tests/command/command-ctor-001.phpt
+++ b/mongodb-1.4.2/tests/command/command-ctor-001.phpt
@@ -1,93 +1,94 @@
--TEST--
MongoDB\Driver\Command construction should always encode __pclass for Persistable objects
--SKIPIF--
<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
<?php NEEDS('STANDALONE'); CLEANUP(STANDALONE); ?>
--FILE--
<?php
require_once __DIR__ . "/../utils/basic.inc";
class MyClass implements MongoDB\BSON\Persistable
{
private $id;
private $child;
public function __construct($id, MyClass $child = null)
{
$this->id = $id;
$this->child = $child;
}
public function bsonSerialize()
{
return [
'_id' => $this->id,
'child' => $this->child,
];
}
public function bsonUnserialize(array $data)
{
$this->id = $data['_id'];
$this->child = $data['child'];
}
}
$manager = new MongoDB\Driver\Manager(STANDALONE);
$document = new MyClass('foo', new MyClass('bar', new MyClass('baz')));
$cursor = $manager->executeCommand(DATABASE_NAME, new MongoDB\Driver\Command([
'findAndModify' => COLLECTION_NAME,
'query' => ['_id' => 'foo'],
'update' => $document,
'upsert' => true,
'new' => true,
]));
var_dump($cursor->toArray()[0]->value);
$cursor = $manager->executeCommand(DATABASE_NAME, new MongoDB\Driver\Command([
'aggregate' => COLLECTION_NAME,
'pipeline' => [
['$match' => $document],
],
+ 'cursor' => new stdClass(),
]));
-var_dump($cursor->toArray()[0]->result[0]);
+var_dump($cursor->toArray()[0]);
?>
===DONE===
<?php exit(0); ?>
--EXPECTF--
object(MyClass)#%d (%d) {
["id":"MyClass":private]=>
string(3) "foo"
["child":"MyClass":private]=>
object(MyClass)#%d (%d) {
["id":"MyClass":private]=>
string(3) "bar"
["child":"MyClass":private]=>
object(MyClass)#%d (%d) {
["id":"MyClass":private]=>
string(3) "baz"
["child":"MyClass":private]=>
NULL
}
}
}
object(MyClass)#%d (%d) {
["id":"MyClass":private]=>
string(3) "foo"
["child":"MyClass":private]=>
object(MyClass)#%d (%d) {
["id":"MyClass":private]=>
string(3) "bar"
["child":"MyClass":private]=>
object(MyClass)#%d (%d) {
["id":"MyClass":private]=>
string(3) "baz"
["child":"MyClass":private]=>
NULL
}
}
}
===DONE===
diff --git a/mongodb-1.3.4/tests/command/command_error-001.phpt b/mongodb-1.4.2/tests/command/command_error-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/command/command_error-001.phpt
rename to mongodb-1.4.2/tests/command/command_error-001.phpt
diff --git a/mongodb-1.4.2/tests/command/cursor-batchsize-001.phpt b/mongodb-1.4.2/tests/command/cursor-batchsize-001.phpt
new file mode 100644
index 00000000..54884854
--- /dev/null
+++ b/mongodb-1.4.2/tests/command/cursor-batchsize-001.phpt
@@ -0,0 +1,83 @@
+--TEST--
+MongoDB\Driver\Command non-zero batchSize applies to getMore
+--SKIPIF--
+<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS('STANDALONE'); CLEANUP(STANDALONE); ?>
+--FILE--
+<?php
+require_once __DIR__ . "/../utils/basic.inc";
+
+class Test implements MongoDB\Driver\Monitoring\CommandSubscriber
+{
+ public function executeCommand()
+ {
+ MongoDB\Driver\Monitoring\addSubscriber($this);
+
+ $manager = new MongoDB\Driver\Manager(STANDALONE);
+
+ $bulkWrite = new MongoDB\Driver\BulkWrite;
+
+ for ($i = 0; $i < 5; $i++) {
+ $bulkWrite->insert(['_id' => $i]);
+ }
+
+ $writeResult = $manager->executeBulkWrite(NS, $bulkWrite);
+ printf("Inserted: %d\n", $writeResult->getInsertedCount());
+
+ $command = new MongoDB\Driver\Command([
+ 'aggregate' => COLLECTION_NAME,
+ 'pipeline' => [['$match' => new stdClass]],
+ 'cursor' => ['batchSize' => 2]
+ ]);
+ $cursor = $manager->executeCommand(DATABASE_NAME, $command);
+
+ $cursor->toArray();
+
+ MongoDB\Driver\Monitoring\removeSubscriber($this);
+ }
+
+ public function commandStarted(MongoDB\Driver\Monitoring\CommandStartedEvent $event)
+ {
+ $command = $event->getCommand();
+
+ if ($event->getCommandName() === 'aggregate') {
+ printf("aggregate command specifies batchSize: %d\n", $command->cursor->batchSize);
+ }
+
+ if ($event->getCommandName() === 'getMore') {
+ printf("getMore command specifies batchSize: %d\n", $command->batchSize);
+ }
+ }
+
+ public function commandSucceeded(MongoDB\Driver\Monitoring\CommandSucceededEvent $event)
+ {
+ $reply = $event->getReply();
+
+ if ($event->getCommandName() === 'aggregate') {
+ printf("aggregate response contains %d document(s)\n", count($reply->cursor->firstBatch));
+ }
+
+ if ($event->getCommandName() === 'getMore') {
+ printf("getMore response contains %d document(s)\n", count($reply->cursor->nextBatch));
+ }
+ }
+
+ public function commandFailed(MongoDB\Driver\Monitoring\CommandFailedEvent $event)
+ {
+ }
+}
+
+(new Test)->executeCommand();
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECT--
+Inserted: 5
+aggregate command specifies batchSize: 2
+aggregate response contains 2 document(s)
+getMore command specifies batchSize: 2
+getMore response contains 2 document(s)
+getMore command specifies batchSize: 2
+getMore response contains 1 document(s)
+===DONE===
diff --git a/mongodb-1.4.2/tests/command/cursor-batchsize-002.phpt b/mongodb-1.4.2/tests/command/cursor-batchsize-002.phpt
new file mode 100644
index 00000000..3d464c70
--- /dev/null
+++ b/mongodb-1.4.2/tests/command/cursor-batchsize-002.phpt
@@ -0,0 +1,81 @@
+--TEST--
+MongoDB\Driver\Command batchSize of zero is ignored for getMore
+--SKIPIF--
+<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS('STANDALONE'); CLEANUP(STANDALONE); ?>
+--FILE--
+<?php
+require_once __DIR__ . "/../utils/basic.inc";
+
+class Test implements MongoDB\Driver\Monitoring\CommandSubscriber
+{
+ public function executeCommand()
+ {
+ MongoDB\Driver\Monitoring\addSubscriber($this);
+
+ $manager = new MongoDB\Driver\Manager(STANDALONE);
+
+ $bulkWrite = new MongoDB\Driver\BulkWrite;
+
+ for ($i = 0; $i < 5; $i++) {
+ $bulkWrite->insert(['_id' => $i]);
+ }
+
+ $writeResult = $manager->executeBulkWrite(NS, $bulkWrite);
+ printf("Inserted: %d\n", $writeResult->getInsertedCount());
+
+ $command = new MongoDB\Driver\Command([
+ 'aggregate' => COLLECTION_NAME,
+ 'pipeline' => [['$match' => new stdClass]],
+ 'cursor' => ['batchSize' => 0]
+ ]);
+ $cursor = $manager->executeCommand(DATABASE_NAME, $command);
+
+ $cursor->toArray();
+
+ MongoDB\Driver\Monitoring\removeSubscriber($this);
+ }
+
+ public function commandStarted(MongoDB\Driver\Monitoring\CommandStartedEvent $event)
+ {
+ $command = $event->getCommand();
+
+ if ($event->getCommandName() === 'aggregate') {
+ printf("aggregate command specifies batchSize: %d\n", $command->cursor->batchSize);
+ }
+
+ if ($event->getCommandName() === 'getMore') {
+ printf("getMore command specifies batchSize: %s\n", isset($command->batchSize) ? 'yes' : 'no');
+ }
+ }
+
+ public function commandSucceeded(MongoDB\Driver\Monitoring\CommandSucceededEvent $event)
+ {
+ $reply = $event->getReply();
+
+ if ($event->getCommandName() === 'aggregate') {
+ printf("aggregate response contains %d document(s)\n", count($reply->cursor->firstBatch));
+ }
+
+ if ($event->getCommandName() === 'getMore') {
+ printf("getMore response contains %d document(s)\n", count($reply->cursor->nextBatch));
+ }
+ }
+
+ public function commandFailed(MongoDB\Driver\Monitoring\CommandFailedEvent $event)
+ {
+ }
+}
+
+(new Test)->executeCommand();
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECT--
+Inserted: 5
+aggregate command specifies batchSize: 0
+aggregate response contains 0 document(s)
+getMore command specifies batchSize: no
+getMore response contains 5 document(s)
+===DONE===
diff --git a/mongodb-1.4.2/tests/command/cursor-tailable-001.phpt b/mongodb-1.4.2/tests/command/cursor-tailable-001.phpt
new file mode 100644
index 00000000..ff3d6bbb
--- /dev/null
+++ b/mongodb-1.4.2/tests/command/cursor-tailable-001.phpt
@@ -0,0 +1,69 @@
+--TEST--
+MongoDB\Driver\Command tailable cursor iteration with maxAwaitTimeMS option
+--SKIPIF--
+<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS('REPLICASET'); CLEANUP(REPLICASET); ?>
+--FILE--
+<?php
+require_once __DIR__ . "/../utils/basic.inc";
+$manager = new MongoDB\Driver\Manager(REPLICASET);
+
+$manager->executeCommand(DATABASE_NAME, new MongoDB\Driver\Command([
+ 'create' => COLLECTION_NAME,
+ 'capped' => true,
+ 'size' => 1048576,
+]));
+
+$bulkWrite = new MongoDB\Driver\BulkWrite;
+$bulkWrite->insert(['_id' => 1]);
+$manager->executeBulkWrite(NS, $bulkWrite);
+
+$pipeline = [
+ [ '$changeStream' => [ 'fullDocument' => 'updateLookup' ] ]
+];
+
+$command = new MongoDB\Driver\Command([
+ 'aggregate' => COLLECTION_NAME,
+ 'pipeline' => $pipeline,
+ 'cursor' => ['batchSize' => 0],
+], [
+ 'maxAwaitTimeMS' => 100,
+]);
+
+$cursor = $manager->executeCommand(DATABASE_NAME, $command);
+$it = new IteratorIterator($cursor);
+
+$it->rewind();
+$it->next();
+
+$bulkWrite = new MongoDB\Driver\BulkWrite;
+$bulkWrite->insert(['_id' => "new-document"]);
+$manager->executeBulkWrite(NS, $bulkWrite);
+
+$startTime = microtime(true);
+echo "Awaiting results...\n";
+$it->next();
+var_dump($it->current()->operationType, $it->current()->documentKey);
+printf("Waited for %.6f seconds\n", microtime(true) - $startTime);
+
+$startTime = microtime(true);
+echo "Awaiting results...\n";
+$it->next();
+var_dump($it->current()); /* Should be NULL */
+printf("Waited for %.6f seconds\n", microtime(true) - $startTime);
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECTF--
+Awaiting results...
+string(6) "insert"
+object(stdClass)#%d (%d) {
+ ["_id"]=>
+ string(12) "new-document"
+}
+Waited for 0.%d seconds
+Awaiting results...
+NULL
+Waited for 0.1%d seconds
+===DONE===
diff --git a/mongodb-1.4.2/tests/command/findAndModify-001.phpt b/mongodb-1.4.2/tests/command/findAndModify-001.phpt
new file mode 100644
index 00000000..b1a9dfa2
--- /dev/null
+++ b/mongodb-1.4.2/tests/command/findAndModify-001.phpt
@@ -0,0 +1,86 @@
+--TEST--
+MongoDB\Driver\Command with findAndModify and arrayFilters
+--SKIPIF--
+<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php START('THROWAWAY', ["version" => "36-release"]); CLEANUP(THROWAWAY); ?>
+--FILE--
+<?php
+require_once __DIR__ . "/../utils/basic.inc";
+
+$manager = new MongoDB\Driver\Manager(THROWAWAY);
+
+$bulk = new MongoDB\Driver\BulkWrite();
+
+$bulk->insert([ '_id' => 1, 'grades' => [ 95, 92, 90 ] ]);
+$bulk->insert([ '_id' => 2, 'grades' => [ 98, 100, 102 ] ]);
+$bulk->insert([ '_id' => 3, 'grades' => [ 95, 110, 100 ] ]);
+
+$manager->executeBulkWrite(DATABASE_NAME . '.' . COLLECTION_NAME, $bulk);
+
+$command = new MongoDB\Driver\Command([
+ 'findAndModify' => COLLECTION_NAME,
+ 'query' => ['grades' => [ '$gt' => 100 ] ],
+ 'update' => ['$set' => [ 'grades.$[element]' => 100 ] ],
+ 'arrayFilters' => [ [ 'element' => [ '$gt' => 100 ] ] ],
+]);
+
+// Running this twice, because findAndModify only updates the first document
+// it finds.
+$manager->executeCommand(DATABASE_NAME, $command);
+$manager->executeCommand(DATABASE_NAME, $command);
+
+$cursor = $manager->executeQuery( DATABASE_NAME . '.' . COLLECTION_NAME, new \MongoDB\Driver\Query([]));
+var_dump($cursor->toArray());
+?>
+===DONE===
+<?php DELETE("THROWAWAY"); ?>
+<?php exit(0); ?>
+--CLEAN--
+<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php DELETE("THROWAWAY"); ?>
+--EXPECTF--
+array(%d) {
+ [0]=>
+ object(stdClass)#%d (%d) {
+ ["_id"]=>
+ int(1)
+ ["grades"]=>
+ array(%d) {
+ [0]=>
+ int(95)
+ [1]=>
+ int(92)
+ [2]=>
+ int(90)
+ }
+ }
+ [1]=>
+ object(stdClass)#%d (%d) {
+ ["_id"]=>
+ int(2)
+ ["grades"]=>
+ array(%d) {
+ [0]=>
+ int(98)
+ [1]=>
+ int(100)
+ [2]=>
+ int(100)
+ }
+ }
+ [2]=>
+ object(stdClass)#%d (%d) {
+ ["_id"]=>
+ int(3)
+ ["grades"]=>
+ array(%d) {
+ [0]=>
+ int(95)
+ [1]=>
+ int(100)
+ [2]=>
+ int(100)
+ }
+ }
+}
+===DONE===
diff --git a/mongodb-1.4.2/tests/command/update-001.phpt b/mongodb-1.4.2/tests/command/update-001.phpt
new file mode 100644
index 00000000..eca9fd5a
--- /dev/null
+++ b/mongodb-1.4.2/tests/command/update-001.phpt
@@ -0,0 +1,86 @@
+--TEST--
+MongoDB\Driver\Command with update and arrayFilters
+--SKIPIF--
+<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php START('THROWAWAY', ["version" => "36-release"]); CLEANUP(THROWAWAY); ?>
+--FILE--
+<?php
+require_once __DIR__ . "/../utils/basic.inc";
+
+$manager = new MongoDB\Driver\Manager(THROWAWAY);
+
+$bulk = new MongoDB\Driver\BulkWrite();
+
+$bulk->insert([ '_id' => 1, 'grades' => [ 95, 92, 90 ] ]);
+$bulk->insert([ '_id' => 2, 'grades' => [ 98, 100, 102 ] ]);
+$bulk->insert([ '_id' => 3, 'grades' => [ 95, 110, 100 ] ]);
+
+$manager->executeBulkWrite(DATABASE_NAME . '.' . COLLECTION_NAME, $bulk);
+
+$command = new MongoDB\Driver\Command([
+ 'update' => COLLECTION_NAME,
+ 'updates' => [[
+ 'q' => [ 'grades' => [ '$gte' => 100 ] ],
+ 'u' => [ '$set' => [ 'grades.$[element]' => 100 ] ],
+ 'arrayFilters' => [ [ 'element' => [ '$gte' => 100 ] ] ],
+ 'multi' => true
+ ]],
+]);
+
+$manager->executeCommand(DATABASE_NAME, $command);
+
+$cursor = $manager->executeQuery( DATABASE_NAME . '.' . COLLECTION_NAME, new \MongoDB\Driver\Query([]));
+var_dump($cursor->toArray());
+?>
+===DONE===
+<?php DELETE("THROWAWAY"); ?>
+<?php exit(0); ?>
+--CLEAN--
+<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php DELETE("THROWAWAY"); ?>
+--EXPECTF--
+array(%d) {
+ [0]=>
+ object(stdClass)#%d (%d) {
+ ["_id"]=>
+ int(1)
+ ["grades"]=>
+ array(%d) {
+ [0]=>
+ int(95)
+ [1]=>
+ int(92)
+ [2]=>
+ int(90)
+ }
+ }
+ [1]=>
+ object(stdClass)#%d (%d) {
+ ["_id"]=>
+ int(2)
+ ["grades"]=>
+ array(%d) {
+ [0]=>
+ int(98)
+ [1]=>
+ int(100)
+ [2]=>
+ int(100)
+ }
+ }
+ [2]=>
+ object(stdClass)#%d (%d) {
+ ["_id"]=>
+ int(3)
+ ["grades"]=>
+ array(%d) {
+ [0]=>
+ int(95)
+ [1]=>
+ int(100)
+ [2]=>
+ int(100)
+ }
+ }
+}
+===DONE===
diff --git a/mongodb-1.3.4/tests/connect/bug0720.phpt b/mongodb-1.4.2/tests/connect/bug0720.phpt
similarity index 98%
rename from mongodb-1.3.4/tests/connect/bug0720.phpt
rename to mongodb-1.4.2/tests/connect/bug0720.phpt
index d31bbeda..802b87d7 100644
--- a/mongodb-1.3.4/tests/connect/bug0720.phpt
+++ b/mongodb-1.4.2/tests/connect/bug0720.phpt
@@ -1,40 +1,41 @@
--TEST--
PHPC-720: Do not persist SSL streams to avoid SSL reinitialization errors
--SKIPIF--
<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS_SSL(); ?>
<?php NEEDS('STANDALONE_SSL'); ?>
--FILE--
<?php
require_once __DIR__ . "/../utils/basic.inc";
$SSL_DIR = realpath(__DIR__ . '/../../scripts/ssl/');
$driverOptions = [
// libmongoc does not allow the hostname to be overridden as "server"
'allow_invalid_hostname' => true,
'ca_file' => $SSL_DIR . '/ca.pem',
];
$manager = new MongoDB\Driver\Manager(STANDALONE_SSL, ['ssl' => true], $driverOptions);
$cursor = $manager->executeCommand(DATABASE_NAME, new MongoDB\Driver\Command(['ping' => 1]));
var_dump($cursor->toArray()[0]);
unset($manager, $cursor);
$manager = new MongoDB\Driver\Manager(STANDALONE_SSL, ['ssl' => true], $driverOptions);
$cursor = $manager->executeCommand(DATABASE_NAME, new MongoDB\Driver\Command(['ping' => 1]));
var_dump($cursor->toArray()[0]);
?>
===DONE===
<?php exit(0); ?>
--EXPECTF--
object(stdClass)#%d (%d) {
["ok"]=>
float(1)
}
object(stdClass)#%d (%d) {
["ok"]=>
float(1)
}
===DONE===
diff --git a/mongodb-1.4.2/tests/connect/bug1015.phpt b/mongodb-1.4.2/tests/connect/bug1015.phpt
new file mode 100644
index 00000000..7ed0c863
--- /dev/null
+++ b/mongodb-1.4.2/tests/connect/bug1015.phpt
@@ -0,0 +1,36 @@
+--TEST--
+PHPC-1015: Initial DNS Seedlist test
+--SKIPIF--
+<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php echo "skip Manual test, as it needs configuration\n"; ?>
+<?php NEEDS('REPLICASET_DNS'); ?>
+--FILE--
+<?php
+/**
+ * This test requires additional configuration, and hence is not enabled by
+ * default. In order for this test to succeed, you need the following line in
+ * /etc/hosts:
+ *
+ * 192.168.112.10 localhost.test.build.10gen.cc
+ *
+ * The IP address needs to match the IP address that your vagrant environment
+ * has created. The IP address is shown when you run "make start-servers".
+ */
+require_once __DIR__ . "/../utils/basic.inc";
+
+$m = new MongoDB\Driver\Manager("mongodb+srv://test1.test.build.10gen.cc/");
+$s = $m->selectServer( new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_NEAREST ) );
+$servers = $m->getServers();
+
+foreach ( $servers as $server )
+{
+ echo $server->getHost(), ':', $server->getPort(), "\n";
+}
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECTF--
+%d.%d.%d.%d:27017
+%d.%d.%d.%d:27018
+%d.%d.%d.%d:27019
+===DONE===
diff --git a/mongodb-1.3.4/tests/connect/bug1045.phpt b/mongodb-1.4.2/tests/connect/bug1045.phpt
similarity index 97%
rename from mongodb-1.3.4/tests/connect/bug1045.phpt
rename to mongodb-1.4.2/tests/connect/bug1045.phpt
index ab35b8e6..bfc8b91b 100644
--- a/mongodb-1.3.4/tests/connect/bug1045.phpt
+++ b/mongodb-1.4.2/tests/connect/bug1045.phpt
@@ -1,25 +1,26 @@
--TEST--
PHPC-1045: Segfault if username is not provided for SCRAM-SHA-1 authMechanism
--SKIPIF--
<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS_SSL(); ?>
<?php NEEDS('STANDALONE'); CLEANUP(STANDALONE); ?>
--FILE--
<?php
require_once __DIR__ . "/../utils/basic.inc";
// STANDALONE does not support auth, but that is not necessary for the test
$m = new MongoDB\Driver\Manager(STANDALONE, ['authMechanism' => 'SCRAM-SHA-1', 'ssl' => false]);
// Execute a basic ping command to trigger connection initialization
echo throws(function() use ($m) {
$m->executeCommand('admin', new MongoDB\Driver\Command(['ping'=>1]));
}, 'MongoDB\Driver\Exception\RuntimeException'), "\n";
?>
===DONE===
<?php exit(0); ?>
--EXPECTF--
OK: Got MongoDB\Driver\Exception\RuntimeException
SCRAM Failure: username is not set
===DONE===
diff --git a/mongodb-1.4.2/tests/connect/compression_error-001.phpt b/mongodb-1.4.2/tests/connect/compression_error-001.phpt
new file mode 100644
index 00000000..7a29eb04
--- /dev/null
+++ b/mongodb-1.4.2/tests/connect/compression_error-001.phpt
@@ -0,0 +1,18 @@
+--TEST--
+MongoDB\Driver\Manager: Connecting with unsupported compressor
+--SKIPIF--
+<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS('STANDALONE'); ?>
+--FILE--
+<?php
+require_once __DIR__ . "/../utils/basic.inc";
+
+ini_set('mongodb.debug', 'stdout');
+$manager = new MongoDB\Driver\Manager(STANDALONE, [ 'compressors' => 'zli'] );
+ini_set('mongodb.debug', null);
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECTF--
+%AWARNING > Unsupported compressor: 'zli'%A
+===DONE===
diff --git a/mongodb-1.4.2/tests/connect/compression_error-002.phpt b/mongodb-1.4.2/tests/connect/compression_error-002.phpt
new file mode 100644
index 00000000..3c781e72
--- /dev/null
+++ b/mongodb-1.4.2/tests/connect/compression_error-002.phpt
@@ -0,0 +1,19 @@
+--TEST--
+MongoDB\Driver\Manager: Connecting with invalid compressor values
+--SKIPIF--
+<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS('STANDALONE'); ?>
+--FILE--
+<?php
+require_once __DIR__ . "/../utils/basic.inc";
+
+echo throws(function() {
+ $manager = new MongoDB\Driver\Manager(STANDALONE, [ 'compressors' => "foo\xFEbar"] );
+}, 'MongoDB\Driver\Exception\UnexpectedValueException'), "\n";
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECTF--
+OK: Got MongoDB\Driver\Exception\UnexpectedValueException
+Detected invalid UTF-8 for fieldname "compressors": %s
+===DONE===
diff --git a/mongodb-1.3.4/tests/connect/replicaset-seedlist-001.phpt b/mongodb-1.4.2/tests/connect/replicaset-seedlist-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/connect/replicaset-seedlist-001.phpt
rename to mongodb-1.4.2/tests/connect/replicaset-seedlist-001.phpt
diff --git a/mongodb-1.3.4/tests/connect/replicaset-seedlist-002.phpt b/mongodb-1.4.2/tests/connect/replicaset-seedlist-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/connect/replicaset-seedlist-002.phpt
rename to mongodb-1.4.2/tests/connect/replicaset-seedlist-002.phpt
diff --git a/mongodb-1.3.4/tests/connect/standalone-auth-0001.phpt b/mongodb-1.4.2/tests/connect/standalone-auth-0001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/connect/standalone-auth-0001.phpt
rename to mongodb-1.4.2/tests/connect/standalone-auth-0001.phpt
diff --git a/mongodb-1.3.4/tests/connect/standalone-auth-0002.phpt b/mongodb-1.4.2/tests/connect/standalone-auth-0002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/connect/standalone-auth-0002.phpt
rename to mongodb-1.4.2/tests/connect/standalone-auth-0002.phpt
diff --git a/mongodb-1.3.4/tests/connect/standalone-plain-0001.phpt b/mongodb-1.4.2/tests/connect/standalone-plain-0001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/connect/standalone-plain-0001.phpt
rename to mongodb-1.4.2/tests/connect/standalone-plain-0001.phpt
diff --git a/mongodb-1.3.4/tests/connect/standalone-plain-0002.phpt b/mongodb-1.4.2/tests/connect/standalone-plain-0002.phpt
similarity index 98%
rename from mongodb-1.3.4/tests/connect/standalone-plain-0002.phpt
rename to mongodb-1.4.2/tests/connect/standalone-plain-0002.phpt
index f6d67fb3..8a3d4015 100644
--- a/mongodb-1.3.4/tests/connect/standalone-plain-0002.phpt
+++ b/mongodb-1.4.2/tests/connect/standalone-plain-0002.phpt
@@ -1,62 +1,63 @@
--TEST--
Connect to MongoDB with using PLAIN auth mechanism #002
--SKIPIF--
<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS_SSL(); ?>
<?php NEEDS('STANDALONE_PLAIN'); ?>
--FILE--
<?php
require_once __DIR__ . "/../utils/basic.inc";
$username = "root";
$password = "toor";
$database = "admin";
$parsed = parse_url(STANDALONE_PLAIN);
$dsn = sprintf("mongodb://%s:%s@%s:%d/%s", $username, $password, $parsed["host"], $parsed["port"], $database);
$adminmanager = new MongoDB\Driver\Manager($dsn);
$cmd = array(
"createUser" => "bugs",
"roles" => array(array("role" => "readWrite", "db" => DATABASE_NAME)),
);
$command = new MongoDB\Driver\Command($cmd);
try {
$result = $adminmanager->executeCommand('$external', $command);
echo "User Created\n";
} catch(Exception $e) {
echo $e->getMessage(), "\n";
}
$username = "bugs";
$password = "wrong-password";
$database = '$external';
$dsn = sprintf("mongodb://%s:%s@%s:%d/?authSource=%s&authMechanism=PLAIN", $username, $password, $parsed["host"], $parsed["port"], $database);
$manager = new MongoDB\Driver\Manager($dsn);
$bulk = new MongoDB\Driver\BulkWrite();
$bulk->insert(array("very" => "important"));
throws(function() use($manager, $bulk) {
$manager->executeBulkWrite(NS, $bulk);
}, "MongoDB\Driver\Exception\AuthenticationException");
$cmd = array(
"dropUser" => "bugs",
);
$command = new MongoDB\Driver\Command($cmd);
try {
$result = $adminmanager->executeCommand('$external', $command);
echo "User deleted\n";
} catch(Exception $e) {
echo $e->getMessage(), "\n";
}
?>
===DONE===
<?php exit(0); ?>
--EXPECT--
User Created
OK: Got MongoDB\Driver\Exception\AuthenticationException
User deleted
===DONE===
diff --git a/mongodb-1.3.4/tests/connect/standalone-ssl-no_verify-001.phpt b/mongodb-1.4.2/tests/connect/standalone-ssl-no_verify-001.phpt
similarity index 96%
rename from mongodb-1.3.4/tests/connect/standalone-ssl-no_verify-001.phpt
rename to mongodb-1.4.2/tests/connect/standalone-ssl-no_verify-001.phpt
index cc813d87..0cd0afa2 100644
--- a/mongodb-1.3.4/tests/connect/standalone-ssl-no_verify-001.phpt
+++ b/mongodb-1.4.2/tests/connect/standalone-ssl-no_verify-001.phpt
@@ -1,27 +1,28 @@
--TEST--
Connect to MongoDB with SSL and no host/cert verification
--SKIPIF--
<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS_SSL(); ?>
<?php NEEDS('STANDALONE_SSL'); ?>
--FILE--
<?php
require_once __DIR__ . "/../utils/basic.inc";
$driverOptions = [
'allow_invalid_hostname' => true,
"weak_cert_validation" => true,
];
$manager = new MongoDB\Driver\Manager(STANDALONE_SSL, ['ssl' => true], $driverOptions);
$cursor = $manager->executeCommand(DATABASE_NAME, new MongoDB\Driver\Command(['ping' => 1]));
var_dump($cursor->toArray()[0]);
?>
===DONE===
<?php exit(0); ?>
--EXPECTF--
object(stdClass)#%d (%d) {
["ok"]=>
float(1)
}
===DONE===
diff --git a/mongodb-1.3.4/tests/connect/standalone-ssl-no_verify-002.phpt b/mongodb-1.4.2/tests/connect/standalone-ssl-no_verify-002.phpt
similarity index 97%
rename from mongodb-1.3.4/tests/connect/standalone-ssl-no_verify-002.phpt
rename to mongodb-1.4.2/tests/connect/standalone-ssl-no_verify-002.phpt
index 574fa10c..49f91f81 100644
--- a/mongodb-1.3.4/tests/connect/standalone-ssl-no_verify-002.phpt
+++ b/mongodb-1.4.2/tests/connect/standalone-ssl-no_verify-002.phpt
@@ -1,31 +1,32 @@
--TEST--
Connect to MongoDB with SSL and no host/cert verification (context options)
--SKIPIF--
<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS_SSL(); ?>
<?php NEEDS('STANDALONE_SSL'); ?>
--FILE--
<?php
require_once __DIR__ . "/../utils/basic.inc";
$driverOptions = [
'context' => stream_context_create([
'ssl' => [
'allow_invalid_hostname' => true,
'allow_self_signed' => true, // "weak_cert_validation" alias
],
]),
];
$manager = new MongoDB\Driver\Manager(STANDALONE_SSL, ['ssl' => true], $driverOptions);
$cursor = $manager->executeCommand(DATABASE_NAME, new MongoDB\Driver\Command(['ping' => 1]));
var_dump($cursor->toArray()[0]);
?>
===DONE===
<?php exit(0); ?>
--EXPECTF--
object(stdClass)#%d (%d) {
["ok"]=>
float(1)
}
===DONE===
diff --git a/mongodb-1.3.4/tests/connect/standalone-ssl-verify_cert-001.phpt b/mongodb-1.4.2/tests/connect/standalone-ssl-verify_cert-001.phpt
similarity index 97%
rename from mongodb-1.3.4/tests/connect/standalone-ssl-verify_cert-001.phpt
rename to mongodb-1.4.2/tests/connect/standalone-ssl-verify_cert-001.phpt
index 9d438488..304caa6a 100644
--- a/mongodb-1.3.4/tests/connect/standalone-ssl-verify_cert-001.phpt
+++ b/mongodb-1.4.2/tests/connect/standalone-ssl-verify_cert-001.phpt
@@ -1,31 +1,32 @@
--TEST--
Connect to MongoDB with SSL and cert verification
--SKIPIF--
<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS_SSL(); ?>
<?php NEEDS('STANDALONE_SSL'); ?>
--FILE--
<?php
require_once __DIR__ . "/../utils/basic.inc";
$SSL_DIR = realpath(__DIR__ . '/../../scripts/ssl/');
$driverOptions = [
// libmongoc does not allow the hostname to be overridden as "server"
'allow_invalid_hostname' => true,
'weak_cert_validation' => false,
'ca_file' => $SSL_DIR . '/ca.pem',
];
$manager = new MongoDB\Driver\Manager(STANDALONE_SSL, ['ssl' => true], $driverOptions);
$cursor = $manager->executeCommand(DATABASE_NAME, new MongoDB\Driver\Command(['ping' => 1]));
var_dump($cursor->toArray()[0]);
?>
===DONE===
<?php exit(0); ?>
--EXPECTF--
object(stdClass)#%d (%d) {
["ok"]=>
float(1)
}
===DONE===
diff --git a/mongodb-1.3.4/tests/connect/standalone-ssl-verify_cert-002.phpt b/mongodb-1.4.2/tests/connect/standalone-ssl-verify_cert-002.phpt
similarity index 97%
rename from mongodb-1.3.4/tests/connect/standalone-ssl-verify_cert-002.phpt
rename to mongodb-1.4.2/tests/connect/standalone-ssl-verify_cert-002.phpt
index 9c4ac959..645eab51 100644
--- a/mongodb-1.3.4/tests/connect/standalone-ssl-verify_cert-002.phpt
+++ b/mongodb-1.4.2/tests/connect/standalone-ssl-verify_cert-002.phpt
@@ -1,35 +1,36 @@
--TEST--
Connect to MongoDB with SSL and cert verification (context options)
--SKIPIF--
<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS_SSL(); ?>
<?php NEEDS('STANDALONE_SSL'); ?>
--FILE--
<?php
require_once __DIR__ . "/../utils/basic.inc";
$SSL_DIR = realpath(__DIR__ . '/../../scripts/ssl/');
$driverOptions = [
'context' => stream_context_create([
'ssl' => [
// libmongoc does not allow the hostname to be overridden as "server"
'allow_invalid_hostname' => true,
'allow_self_signed' => false, // "weak_cert_validation" alias
'cafile' => $SSL_DIR . '/ca.pem', // "ca_file" alias
],
]),
];
$manager = new MongoDB\Driver\Manager(STANDALONE_SSL, ['ssl' => true], $driverOptions);
$cursor = $manager->executeCommand(DATABASE_NAME, new MongoDB\Driver\Command(['ping' => 1]));
var_dump($cursor->toArray()[0]);
?>
===DONE===
<?php exit(0); ?>
--EXPECTF--
object(stdClass)#%d (%d) {
["ok"]=>
float(1)
}
===DONE===
diff --git a/mongodb-1.3.4/tests/connect/standalone-ssl-verify_cert-error-001.phpt b/mongodb-1.4.2/tests/connect/standalone-ssl-verify_cert-error-001.phpt
similarity index 97%
rename from mongodb-1.3.4/tests/connect/standalone-ssl-verify_cert-error-001.phpt
rename to mongodb-1.4.2/tests/connect/standalone-ssl-verify_cert-error-001.phpt
index 776d1105..299503d4 100644
--- a/mongodb-1.3.4/tests/connect/standalone-ssl-verify_cert-error-001.phpt
+++ b/mongodb-1.4.2/tests/connect/standalone-ssl-verify_cert-error-001.phpt
@@ -1,28 +1,29 @@
--TEST--
Connect to MongoDB with SSL and cert verification error
--SKIPIF--
<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS_SSL(); ?>
<?php NEEDS('STANDALONE_SSL'); ?>
--FILE--
<?php
require_once __DIR__ . "/../utils/basic.inc";
$driverOptions = [
// libmongoc does not allow the hostname to be overridden as "server"
'allow_invalid_hostname' => true,
'weak_cert_validation' => false,
];
echo throws(function() use ($driverOptions) {
$manager = new MongoDB\Driver\Manager(STANDALONE_SSL, ['ssl' => true], $driverOptions);
$cursor = $manager->executeCommand(DATABASE_NAME, new MongoDB\Driver\Command(['ping' => 1]));
var_dump($cursor->toArray()[0]);
}, 'MongoDB\Driver\Exception\ConnectionTimeoutException', 'executeCommand'), "\n";
?>
===DONE===
<?php exit(0); ?>
--EXPECTF--
OK: Got MongoDB\Driver\Exception\ConnectionTimeoutException thrown from executeCommand
No suitable servers found (`serverSelectionTryOnce` set): [%s calling ismaster on '%s:%d']
===DONE===
diff --git a/mongodb-1.3.4/tests/connect/standalone-ssl-verify_cert-error-002.phpt b/mongodb-1.4.2/tests/connect/standalone-ssl-verify_cert-error-002.phpt
similarity index 98%
rename from mongodb-1.3.4/tests/connect/standalone-ssl-verify_cert-error-002.phpt
rename to mongodb-1.4.2/tests/connect/standalone-ssl-verify_cert-error-002.phpt
index 5ac2eefd..ddc72968 100644
--- a/mongodb-1.3.4/tests/connect/standalone-ssl-verify_cert-error-002.phpt
+++ b/mongodb-1.4.2/tests/connect/standalone-ssl-verify_cert-error-002.phpt
@@ -1,32 +1,33 @@
--TEST--
Connect to MongoDB with SSL and cert verification error (context options)
--SKIPIF--
<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS_SSL(); ?>
<?php NEEDS('STANDALONE_SSL'); ?>
--FILE--
<?php
require_once __DIR__ . "/../utils/basic.inc";
$driverOptions = [
'context' => stream_context_create([
'ssl' => [
// libmongoc does not allow the hostname to be overridden as "server"
'allow_invalid_hostname' => true,
'allow_self_signed' => false, // "weak_cert_validation" alias
],
]),
];
echo throws(function() use ($driverOptions) {
$manager = new MongoDB\Driver\Manager(STANDALONE_SSL, ['ssl' => true], $driverOptions);
$cursor = $manager->executeCommand(DATABASE_NAME, new MongoDB\Driver\Command(['ping' => 1]));
var_dump($cursor->toArray()[0]);
}, 'MongoDB\Driver\Exception\ConnectionTimeoutException', 'executeCommand'), "\n";
?>
===DONE===
<?php exit(0); ?>
--EXPECTF--
OK: Got MongoDB\Driver\Exception\ConnectionTimeoutException thrown from executeCommand
No suitable servers found (`serverSelectionTryOnce` set): [%s calling ismaster on '%s:%d']
===DONE===
diff --git a/mongodb-1.3.4/tests/connect/standalone-x509-auth-001.phpt b/mongodb-1.4.2/tests/connect/standalone-x509-auth-001.phpt
similarity index 97%
rename from mongodb-1.3.4/tests/connect/standalone-x509-auth-001.phpt
rename to mongodb-1.4.2/tests/connect/standalone-x509-auth-001.phpt
index aa2f4fdc..01f45cdf 100644
--- a/mongodb-1.3.4/tests/connect/standalone-x509-auth-001.phpt
+++ b/mongodb-1.4.2/tests/connect/standalone-x509-auth-001.phpt
@@ -1,32 +1,33 @@
--TEST--
Connect to MongoDB with SSL and X509 auth
--SKIPIF--
<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS_SSL(); ?>
<?php NEEDS('STANDALONE_X509'); ?>
--FILE--
<?php
require_once __DIR__ . "/../utils/basic.inc";
$SSL_DIR = realpath(__DIR__ . '/../../scripts/ssl/');
$driverOptions = [
// libmongoc does not allow the hostname to be overridden as "server"
'allow_invalid_hostname' => true,
'weak_cert_validation' => false,
'ca_file' => $SSL_DIR . '/ca.pem',
'pem_file' => $SSL_DIR . '/client.pem',
];
$manager = new MongoDB\Driver\Manager(STANDALONE_X509, ['ssl' => true], $driverOptions);
$cursor = $manager->executeCommand(DATABASE_NAME, new MongoDB\Driver\Command(['ping' => 1]));
var_dump($cursor->toArray()[0]);
?>
===DONE===
<?php exit(0); ?>
--EXPECTF--
object(stdClass)#%d (%d) {
["ok"]=>
float(1)
}
===DONE===
diff --git a/mongodb-1.3.4/tests/connect/standalone-x509-auth-002.phpt b/mongodb-1.4.2/tests/connect/standalone-x509-auth-002.phpt
similarity index 97%
rename from mongodb-1.3.4/tests/connect/standalone-x509-auth-002.phpt
rename to mongodb-1.4.2/tests/connect/standalone-x509-auth-002.phpt
index 78785f51..ea4e1293 100644
--- a/mongodb-1.3.4/tests/connect/standalone-x509-auth-002.phpt
+++ b/mongodb-1.4.2/tests/connect/standalone-x509-auth-002.phpt
@@ -1,36 +1,37 @@
--TEST--
Connect to MongoDB with SSL and X509 auth (stream context)
--SKIPIF--
<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS_SSL(); ?>
<?php NEEDS('STANDALONE_X509'); ?>
--FILE--
<?php
require_once __DIR__ . "/../utils/basic.inc";
$SSL_DIR = realpath(__DIR__ . '/../../scripts/ssl/');
$driverOptions = [
'context' => stream_context_create([
'ssl' => [
// libmongoc does not allow the hostname to be overridden as "server"
'allow_invalid_hostname' => true,
'allow_self_signed' => false, // "weak_cert_validation" alias
'cafile' => $SSL_DIR . '/ca.pem', // "ca_file" alias
'local_cert' => $SSL_DIR . '/client.pem', // "pem_file" alias
],
]),
];
$manager = new MongoDB\Driver\Manager(STANDALONE_X509, ['ssl' => true], $driverOptions);
$cursor = $manager->executeCommand(DATABASE_NAME, new MongoDB\Driver\Command(['ping' => 1]));
var_dump($cursor->toArray()[0]);
?>
===DONE===
<?php exit(0); ?>
--EXPECTF--
object(stdClass)#%d (%d) {
["ok"]=>
float(1)
}
===DONE===
diff --git a/mongodb-1.3.4/tests/connect/standalone-x509-error-0001.phpt b/mongodb-1.4.2/tests/connect/standalone-x509-error-0001.phpt
similarity index 98%
rename from mongodb-1.3.4/tests/connect/standalone-x509-error-0001.phpt
rename to mongodb-1.4.2/tests/connect/standalone-x509-error-0001.phpt
index 4196e6b1..1258211d 100644
--- a/mongodb-1.3.4/tests/connect/standalone-x509-error-0001.phpt
+++ b/mongodb-1.4.2/tests/connect/standalone-x509-error-0001.phpt
@@ -1,40 +1,41 @@
--TEST--
X509 connection should not reuse previous stream after an auth failure
--SKIPIF--
<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS_SSL(); ?>
<?php NEEDS('STANDALONE_X509'); ?>
--FILE--
<?php
require_once __DIR__ . "/../utils/basic.inc";
$SSL_DIR = realpath(__DIR__ . '/../../scripts/ssl/');
$driverOptions = [
// libmongoc does not allow the hostname to be overridden as "server"
'allow_invalid_hostname' => true,
'ca_file' => $SSL_DIR . '/ca.pem',
'pem_file' => $SSL_DIR . '/client.pem',
];
// Wrong username for X509 authentication
$parsed = parse_url(STANDALONE_X509);
$dsn = sprintf('mongodb://username@%s:%d/?ssl=true&authMechanism=MONGODB-X509', $parsed['host'], $parsed['port']);
// Both should fail with auth failure, without reusing the previous stream
for ($i = 0; $i < 2; $i++) {
echo throws(function() use ($dsn, $driverOptions) {
$manager = new MongoDB\Driver\Manager($dsn, [], $driverOptions);
$cursor = $manager->executeCommand(DATABASE_NAME, new MongoDB\Driver\Command(['ping' => 1]));
var_dump($cursor->toArray()[0]);
}, 'MongoDB\Driver\Exception\AuthenticationException', 'executeCommand'), "\n";
}
?>
===DONE===
<?php exit(0); ?>
--EXPECTF--
OK: Got MongoDB\Driver\Exception\AuthenticationException thrown from executeCommand
auth failed
OK: Got MongoDB\Driver\Exception\AuthenticationException thrown from executeCommand
auth failed
===DONE===
diff --git a/mongodb-1.3.4/tests/connect/standalone-x509-extract_username-001.phpt b/mongodb-1.4.2/tests/connect/standalone-x509-extract_username-001.phpt
similarity index 93%
rename from mongodb-1.3.4/tests/connect/standalone-x509-extract_username-001.phpt
rename to mongodb-1.4.2/tests/connect/standalone-x509-extract_username-001.phpt
index d5502e7f..25901fca 100644
--- a/mongodb-1.3.4/tests/connect/standalone-x509-extract_username-001.phpt
+++ b/mongodb-1.4.2/tests/connect/standalone-x509-extract_username-001.phpt
@@ -1,37 +1,38 @@
--TEST--
Connect to MongoDB with SSL and X509 auth and username retrieved from cert
--SKIPIF--
<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS_SSL(['OpenSSL', 'Secure Transport', 'Secure Channel']); ?>
<?php NEEDS('STANDALONE_X509'); ?>
--FILE--
<?php
require_once __DIR__ . "/../utils/basic.inc";
$SSL_DIR = realpath(__DIR__ . '/../../scripts/ssl/');
$driverOptions = [
// libmongoc does not allow the hostname to be overridden as "server"
'allow_invalid_hostname' => true,
'weak_cert_validation' => false,
'ca_file' => $SSL_DIR . '/ca.pem',
'pem_file' => $SSL_DIR . '/client.pem',
];
$uriOptions = ['authMechanism' => 'MONGODB-X509', 'ssl' => true];
$parsed = parse_url(STANDALONE_X509);
$uri = sprintf('mongodb://%s:%d', $parsed['host'], $parsed['port']);
$manager = new MongoDB\Driver\Manager($uri, $uriOptions, $driverOptions);
$cursor = $manager->executeCommand(DATABASE_NAME, new MongoDB\Driver\Command(['ping' => 1]));
var_dump($cursor->toArray()[0]);
?>
===DONE===
<?php exit(0); ?>
--EXPECTF--
object(stdClass)#%d (%d) {
["ok"]=>
float(1)
}
===DONE===
diff --git a/mongodb-1.3.4/tests/connect/standalone-x509-extract_username-002.phpt b/mongodb-1.4.2/tests/connect/standalone-x509-extract_username-002.phpt
similarity index 94%
rename from mongodb-1.3.4/tests/connect/standalone-x509-extract_username-002.phpt
rename to mongodb-1.4.2/tests/connect/standalone-x509-extract_username-002.phpt
index efd9318e..3706923e 100644
--- a/mongodb-1.3.4/tests/connect/standalone-x509-extract_username-002.phpt
+++ b/mongodb-1.4.2/tests/connect/standalone-x509-extract_username-002.phpt
@@ -1,41 +1,42 @@
--TEST--
Connect to MongoDB with SSL and X509 auth and username retrieved from cert (stream context)
--SKIPIF--
<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS_SSL(['OpenSSL', 'Secure Transport', 'Secure Channel']); ?>
<?php NEEDS('STANDALONE_X509'); ?>
--FILE--
<?php
require_once __DIR__ . "/../utils/basic.inc";
$SSL_DIR = realpath(__DIR__ . '/../../scripts/ssl/');
$driverOptions = [
'context' => stream_context_create([
'ssl' => [
// libmongoc does not allow the hostname to be overridden as "server"
'allow_invalid_hostname' => true,
'allow_self_signed' => false, // "weak_cert_validation" alias
'cafile' => $SSL_DIR . '/ca.pem', // "ca_file" alias
'local_cert' => $SSL_DIR . '/client.pem', // "pem_file" alias
],
]),
];
$uriOptions = ['authMechanism' => 'MONGODB-X509', 'ssl' => true];
$parsed = parse_url(STANDALONE_X509);
$uri = sprintf('mongodb://%s:%d', $parsed['host'], $parsed['port']);
$manager = new MongoDB\Driver\Manager($uri, $uriOptions, $driverOptions);
$cursor = $manager->executeCommand(DATABASE_NAME, new MongoDB\Driver\Command(['ping' => 1]));
var_dump($cursor->toArray()[0]);
?>
===DONE===
<?php exit(0); ?>
--EXPECTF--
object(stdClass)#%d (%d) {
["ok"]=>
float(1)
}
===DONE===
diff --git a/mongodb-1.3.4/tests/cursor/bug0671-001.phpt b/mongodb-1.4.2/tests/cursor/bug0671-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/cursor/bug0671-001.phpt
rename to mongodb-1.4.2/tests/cursor/bug0671-001.phpt
diff --git a/mongodb-1.3.4/tests/cursor/bug0732-001.phpt b/mongodb-1.4.2/tests/cursor/bug0732-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/cursor/bug0732-001.phpt
rename to mongodb-1.4.2/tests/cursor/bug0732-001.phpt
diff --git a/mongodb-1.3.4/tests/cursor/bug0849-001.phpt b/mongodb-1.4.2/tests/cursor/bug0849-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/cursor/bug0849-001.phpt
rename to mongodb-1.4.2/tests/cursor/bug0849-001.phpt
diff --git a/mongodb-1.3.4/tests/cursor/bug0924-001.phpt b/mongodb-1.4.2/tests/cursor/bug0924-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/cursor/bug0924-001.phpt
rename to mongodb-1.4.2/tests/cursor/bug0924-001.phpt
diff --git a/mongodb-1.3.4/tests/cursor/bug0924-002.phpt b/mongodb-1.4.2/tests/cursor/bug0924-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/cursor/bug0924-002.phpt
rename to mongodb-1.4.2/tests/cursor/bug0924-002.phpt
diff --git a/mongodb-1.4.2/tests/cursor/bug1050-001.phpt b/mongodb-1.4.2/tests/cursor/bug1050-001.phpt
new file mode 100644
index 00000000..4c45dae4
--- /dev/null
+++ b/mongodb-1.4.2/tests/cursor/bug1050-001.phpt
@@ -0,0 +1,71 @@
+--TEST--
+PHPC-1050: Command cursor should not invoke getMore at execution
+--SKIPIF--
+<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS('REPLICASET'); CLEANUP(REPLICASET); ?>
+--FILE--
+<?php
+require_once __DIR__ . "/../utils/basic.inc";
+
+$manager = new MongoDB\Driver\Manager(REPLICASET);
+
+$cmd = new MongoDB\Driver\Command(
+ [
+ 'aggregate' => COLLECTION_NAME,
+ 'pipeline' => [
+ ['$changeStream' => (object) []],
+ ],
+ 'cursor' => (object) [],
+ ],
+ [
+ 'maxAwaitTimeMS' => 1000,
+ ]
+);
+
+$start = microtime(true);
+$cursor = $manager->executeReadCommand(DATABASE_NAME, $cmd);
+printf("Executing command took %0.6f seconds\n", microtime(true) - $start);
+
+$it = new IteratorIterator($cursor);
+
+$start = microtime(true);
+$it->rewind();
+printf("Rewinding cursor took %0.6f seconds\n", microtime(true) - $start);
+printf("Current position is valid: %s\n", $it->valid() ? 'yes' : 'no');
+
+$bulk = new MongoDB\Driver\BulkWrite;
+$bulk->insert(['x' => 1]);
+$manager->executeBulkWrite(NS, $bulk);
+
+$start = microtime(true);
+$it->next();
+printf("Advancing cursor took %0.6f seconds\n", microtime(true) - $start);
+printf("Current position is valid: %s\n", $it->valid() ? 'yes' : 'no');
+
+$document = $it->current();
+
+if (isset($document)) {
+ printf("Operation type: %s\n", $document->operationType);
+ var_dump($document->fullDocument);
+}
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECTF--
+Executing command took 0.%d seconds
+Rewinding cursor took 1.%d seconds
+Current position is valid: no
+Advancing cursor took %d.%d seconds
+Current position is valid: yes
+Operation type: insert
+object(stdClass)#%d (%d) {
+ ["_id"]=>
+ object(MongoDB\BSON\ObjectId)#%d (%d) {
+ ["oid"]=>
+ string(24) "%x"
+ }
+ ["x"]=>
+ int(1)
+}
+===DONE===
diff --git a/mongodb-1.4.2/tests/cursor/bug1050-002.phpt b/mongodb-1.4.2/tests/cursor/bug1050-002.phpt
new file mode 100644
index 00000000..664eb372
--- /dev/null
+++ b/mongodb-1.4.2/tests/cursor/bug1050-002.phpt
@@ -0,0 +1,74 @@
+--TEST--
+PHPC-1050: Command cursor should not invoke getMore at execution (rewind omitted)
+--SKIPIF--
+<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS('REPLICASET'); CLEANUP(REPLICASET); ?>
+--FILE--
+<?php
+require_once __DIR__ . "/../utils/basic.inc";
+
+$manager = new MongoDB\Driver\Manager(REPLICASET);
+
+$cmd = new MongoDB\Driver\Command(
+ [
+ 'aggregate' => COLLECTION_NAME,
+ 'pipeline' => [
+ ['$changeStream' => (object) []],
+ ],
+ 'cursor' => (object) [],
+ ],
+ [
+ 'maxAwaitTimeMS' => 1000,
+ ]
+);
+
+$start = microtime(true);
+$cursor = $manager->executeReadCommand(DATABASE_NAME, $cmd);
+printf("Executing command took %0.6f seconds\n", microtime(true) - $start);
+
+$it = new IteratorIterator($cursor);
+
+printf("Current position is valid: %s\n", $it->valid() ? 'yes' : 'no');
+
+$start = microtime(true);
+$it->next();
+printf("Advancing cursor took %0.6f seconds\n", microtime(true) - $start);
+printf("Current position is valid: %s\n", $it->valid() ? 'yes' : 'no');
+
+$bulk = new MongoDB\Driver\BulkWrite;
+$bulk->insert(['x' => 1]);
+$manager->executeBulkWrite(NS, $bulk);
+
+$start = microtime(true);
+$it->next();
+printf("Advancing cursor took %0.6f seconds\n", microtime(true) - $start);
+printf("Current position is valid: %s\n", $it->valid() ? 'yes' : 'no');
+
+$document = $it->current();
+
+if (isset($document)) {
+ printf("Operation type: %s\n", $document->operationType);
+ var_dump($document->fullDocument);
+}
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECTF--
+Executing command took 0.%d seconds
+Current position is valid: no
+Advancing cursor took 1.%d seconds
+Current position is valid: no
+Advancing cursor took %d.%d seconds
+Current position is valid: yes
+Operation type: insert
+object(stdClass)#%d (%d) {
+ ["_id"]=>
+ object(MongoDB\BSON\ObjectId)#%d (%d) {
+ ["oid"]=>
+ string(24) "%x"
+ }
+ ["x"]=>
+ int(1)
+}
+===DONE===
diff --git a/mongodb-1.3.4/tests/cursor/cursor-IteratorIterator-001.phpt b/mongodb-1.4.2/tests/cursor/cursor-IteratorIterator-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/cursor/cursor-IteratorIterator-001.phpt
rename to mongodb-1.4.2/tests/cursor/cursor-IteratorIterator-001.phpt
diff --git a/mongodb-1.3.4/tests/cursor/cursor-IteratorIterator-002.phpt b/mongodb-1.4.2/tests/cursor/cursor-IteratorIterator-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/cursor/cursor-IteratorIterator-002.phpt
rename to mongodb-1.4.2/tests/cursor/cursor-IteratorIterator-002.phpt
diff --git a/mongodb-1.3.4/tests/cursor/cursor-IteratorIterator-003.phpt b/mongodb-1.4.2/tests/cursor/cursor-IteratorIterator-003.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/cursor/cursor-IteratorIterator-003.phpt
rename to mongodb-1.4.2/tests/cursor/cursor-IteratorIterator-003.phpt
diff --git a/mongodb-1.3.4/tests/cursor/cursor-IteratorIterator-004.phpt b/mongodb-1.4.2/tests/cursor/cursor-IteratorIterator-004.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/cursor/cursor-IteratorIterator-004.phpt
rename to mongodb-1.4.2/tests/cursor/cursor-IteratorIterator-004.phpt
diff --git a/mongodb-1.3.4/tests/cursor/cursor-NoRewindIterator-001.phpt b/mongodb-1.4.2/tests/cursor/cursor-NoRewindIterator-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/cursor/cursor-NoRewindIterator-001.phpt
rename to mongodb-1.4.2/tests/cursor/cursor-NoRewindIterator-001.phpt
diff --git a/mongodb-1.3.4/tests/cursor/cursor-destruct-001.phpt b/mongodb-1.4.2/tests/cursor/cursor-destruct-001.phpt
similarity index 95%
rename from mongodb-1.3.4/tests/cursor/cursor-destruct-001.phpt
rename to mongodb-1.4.2/tests/cursor/cursor-destruct-001.phpt
index 1d0c9bb2..63135bb9 100644
--- a/mongodb-1.3.4/tests/cursor/cursor-destruct-001.phpt
+++ b/mongodb-1.4.2/tests/cursor/cursor-destruct-001.phpt
@@ -1,52 +1,52 @@
--TEST--
MongoDB\Driver\Cursor destruct should kill a live cursor
--SKIPIF--
+<?php if (PHP_INT_SIZE !== 8) { die('skip Only for 64-bit platform'); } ?>
<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
<?php NEEDS('STANDALONE'); CLEANUP(STANDALONE); ?>
--FILE--
<?php
require_once __DIR__ . "/../utils/basic.inc";
function getNumOpenCursors(MongoDB\Driver\Manager $manager)
{
$cursor = $manager->executeCommand(DATABASE_NAME, new MongoDB\Driver\Command(array('serverStatus' => 1)));
$result = current($cursor->toArray());
-
if (isset($result->metrics->cursor->open->total)) {
return $result->metrics->cursor->open->total;
}
if (isset($result->cursors->totalOpen)) {
return $result->cursors->totalOpen;
}
throw new RuntimeException('Could not find number of open cursors in serverStatus');
}
$manager = new MongoDB\Driver\Manager(STANDALONE);
$bulk = new MongoDB\Driver\BulkWrite();
$bulk->insert(array('_id' => 1));
$bulk->insert(array('_id' => 2));
$bulk->insert(array('_id' => 3));
$manager->executeBulkWrite(NS, $bulk);
$numOpenCursorsBeforeQuery = getNumOpenCursors($manager);
$cursor = $manager->executeQuery(NS, new MongoDB\Driver\Query(array(), array('batchSize' => 2)));
var_dump($cursor->isDead());
var_dump(getNumOpenCursors($manager) == $numOpenCursorsBeforeQuery + 1);
unset($cursor);
var_dump(getNumOpenCursors($manager) == $numOpenCursorsBeforeQuery);
?>
===DONE===
<?php exit(0); ?>
--EXPECT--
bool(false)
bool(true)
bool(true)
===DONE===
diff --git a/mongodb-1.3.4/tests/cursor/cursor-get_iterator-001.phpt b/mongodb-1.4.2/tests/cursor/cursor-get_iterator-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/cursor/cursor-get_iterator-001.phpt
rename to mongodb-1.4.2/tests/cursor/cursor-get_iterator-001.phpt
diff --git a/mongodb-1.3.4/tests/cursor/cursor-get_iterator-002.phpt b/mongodb-1.4.2/tests/cursor/cursor-get_iterator-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/cursor/cursor-get_iterator-002.phpt
rename to mongodb-1.4.2/tests/cursor/cursor-get_iterator-002.phpt
diff --git a/mongodb-1.3.4/tests/cursor/cursor-get_iterator-003.phpt b/mongodb-1.4.2/tests/cursor/cursor-get_iterator-003.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/cursor/cursor-get_iterator-003.phpt
rename to mongodb-1.4.2/tests/cursor/cursor-get_iterator-003.phpt
diff --git a/mongodb-1.3.4/tests/cursor/cursor-getmore-001.phpt b/mongodb-1.4.2/tests/cursor/cursor-getmore-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/cursor/cursor-getmore-001.phpt
rename to mongodb-1.4.2/tests/cursor/cursor-getmore-001.phpt
diff --git a/mongodb-1.3.4/tests/cursor/cursor-getmore-002.phpt b/mongodb-1.4.2/tests/cursor/cursor-getmore-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/cursor/cursor-getmore-002.phpt
rename to mongodb-1.4.2/tests/cursor/cursor-getmore-002.phpt
diff --git a/mongodb-1.3.4/tests/cursor/cursor-getmore-003.phpt b/mongodb-1.4.2/tests/cursor/cursor-getmore-003.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/cursor/cursor-getmore-003.phpt
rename to mongodb-1.4.2/tests/cursor/cursor-getmore-003.phpt
diff --git a/mongodb-1.3.4/tests/cursor/cursor-getmore-004.phpt b/mongodb-1.4.2/tests/cursor/cursor-getmore-004.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/cursor/cursor-getmore-004.phpt
rename to mongodb-1.4.2/tests/cursor/cursor-getmore-004.phpt
diff --git a/mongodb-1.3.4/tests/cursor/cursor-getmore-005.phpt b/mongodb-1.4.2/tests/cursor/cursor-getmore-005.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/cursor/cursor-getmore-005.phpt
rename to mongodb-1.4.2/tests/cursor/cursor-getmore-005.phpt
diff --git a/mongodb-1.3.4/tests/cursor/cursor-getmore-006.phpt b/mongodb-1.4.2/tests/cursor/cursor-getmore-006.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/cursor/cursor-getmore-006.phpt
rename to mongodb-1.4.2/tests/cursor/cursor-getmore-006.phpt
diff --git a/mongodb-1.3.4/tests/cursor/cursor-isDead-001.phpt b/mongodb-1.4.2/tests/cursor/cursor-isDead-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/cursor/cursor-isDead-001.phpt
rename to mongodb-1.4.2/tests/cursor/cursor-isDead-001.phpt
diff --git a/mongodb-1.3.4/tests/cursor/cursor-isDead-002.phpt b/mongodb-1.4.2/tests/cursor/cursor-isDead-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/cursor/cursor-isDead-002.phpt
rename to mongodb-1.4.2/tests/cursor/cursor-isDead-002.phpt
diff --git a/mongodb-1.3.4/tests/cursor/cursor-isDead-003.phpt b/mongodb-1.4.2/tests/cursor/cursor-isDead-003.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/cursor/cursor-isDead-003.phpt
rename to mongodb-1.4.2/tests/cursor/cursor-isDead-003.phpt
diff --git a/mongodb-1.3.4/tests/cursor/cursor-isDead-004.phpt b/mongodb-1.4.2/tests/cursor/cursor-isDead-004.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/cursor/cursor-isDead-004.phpt
rename to mongodb-1.4.2/tests/cursor/cursor-isDead-004.phpt
diff --git a/mongodb-1.3.4/tests/cursor/cursor-iterator_handlers-001.phpt b/mongodb-1.4.2/tests/cursor/cursor-iterator_handlers-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/cursor/cursor-iterator_handlers-001.phpt
rename to mongodb-1.4.2/tests/cursor/cursor-iterator_handlers-001.phpt
diff --git a/mongodb-1.3.4/tests/cursor/cursor-rewind-001.phpt b/mongodb-1.4.2/tests/cursor/cursor-rewind-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/cursor/cursor-rewind-001.phpt
rename to mongodb-1.4.2/tests/cursor/cursor-rewind-001.phpt
diff --git a/mongodb-1.3.4/tests/cursor/cursor-setTypeMap_error-001.phpt b/mongodb-1.4.2/tests/cursor/cursor-setTypeMap_error-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/cursor/cursor-setTypeMap_error-001.phpt
rename to mongodb-1.4.2/tests/cursor/cursor-setTypeMap_error-001.phpt
diff --git a/mongodb-1.3.4/tests/cursor/cursor-setTypeMap_error-002.phpt b/mongodb-1.4.2/tests/cursor/cursor-setTypeMap_error-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/cursor/cursor-setTypeMap_error-002.phpt
rename to mongodb-1.4.2/tests/cursor/cursor-setTypeMap_error-002.phpt
diff --git a/mongodb-1.3.4/tests/cursor/cursor-tailable-001.phpt b/mongodb-1.4.2/tests/cursor/cursor-tailable-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/cursor/cursor-tailable-001.phpt
rename to mongodb-1.4.2/tests/cursor/cursor-tailable-001.phpt
diff --git a/mongodb-1.3.4/tests/cursor/cursor-tailable-002.phpt b/mongodb-1.4.2/tests/cursor/cursor-tailable-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/cursor/cursor-tailable-002.phpt
rename to mongodb-1.4.2/tests/cursor/cursor-tailable-002.phpt
diff --git a/mongodb-1.3.4/tests/cursor/cursor-tailable-003.phpt b/mongodb-1.4.2/tests/cursor/cursor-tailable-003.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/cursor/cursor-tailable-003.phpt
rename to mongodb-1.4.2/tests/cursor/cursor-tailable-003.phpt
diff --git a/mongodb-1.3.4/tests/cursor/cursor-tailable_error-001.phpt b/mongodb-1.4.2/tests/cursor/cursor-tailable_error-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/cursor/cursor-tailable_error-001.phpt
rename to mongodb-1.4.2/tests/cursor/cursor-tailable_error-001.phpt
diff --git a/mongodb-1.3.4/tests/cursor/cursor-tailable_error-002.phpt b/mongodb-1.4.2/tests/cursor/cursor-tailable_error-002.phpt
similarity index 97%
rename from mongodb-1.3.4/tests/cursor/cursor-tailable_error-002.phpt
rename to mongodb-1.4.2/tests/cursor/cursor-tailable_error-002.phpt
index 57dd3d79..51ea06e6 100644
--- a/mongodb-1.3.4/tests/cursor/cursor-tailable_error-002.phpt
+++ b/mongodb-1.4.2/tests/cursor/cursor-tailable_error-002.phpt
@@ -1,88 +1,88 @@
--TEST--
MongoDB\Driver\Cursor cursor killed during tailable iteration
--SKIPIF--
<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
<?php NEEDS('STANDALONE'); CLEANUP(STANDALONE); ?>
--FILE--
<?php
require_once __DIR__ . "/../utils/basic.inc";
function insert(MongoDB\Driver\Manager $manager, $from, $to = null)
{
if ($to === null) {
$to = $from;
}
$bulkWrite = new MongoDB\Driver\BulkWrite;
for ($i = $from; $i <= $to; $i++) {
$bulkWrite->insert(['_id' => $i]);
}
$writeResult = $manager->executeBulkWrite(NS, $bulkWrite);
printf("Inserted %d document(s): %s\n", $writeResult->getInsertedCount(), implode(range($from, $to), ', '));
}
$manager = new MongoDB\Driver\Manager(STANDALONE);
$manager->executeCommand(DATABASE_NAME, new MongoDB\Driver\Command([
'create' => COLLECTION_NAME,
'capped' => true,
'size' => 1048576,
]));
insert($manager, 1, 3);
echo throws(function() use ($manager) {
$cursor = $manager->executeQuery(NS, new MongoDB\Driver\Query([], ['tailable' => true]));
$it = new IteratorIterator($cursor);
$numAwaitAttempts = 0;
$maxAwaitAttempts = 7;
for ($it->rewind(); $numAwaitAttempts < $maxAwaitAttempts; $it->next()) {
$document = $it->current();
if ($document !== null) {
printf("{_id: %d}\n", $document->_id);
continue;
}
if ($numAwaitAttempts === 2) {
insert($manager, 4, 6);
}
if ($numAwaitAttempts === 5) {
$manager->executeCommand(DATABASE_NAME, new MongoDB\Driver\Command([
'killCursors' => COLLECTION_NAME,
'cursors' => [ $cursor->getId() ],
]));
}
echo "Awaiting results...\n";
$numAwaitAttempts += 1;
}
}, 'MongoDB\Driver\Exception\RuntimeException'), "\n";
?>
===DONE===
<?php exit(0); ?>
--EXPECTF--
Inserted 3 document(s): 1, 2, 3
{_id: 1}
{_id: 2}
{_id: 3}
Awaiting results...
Awaiting results...
Inserted 3 document(s): 4, 5, 6
Awaiting results...
{_id: 4}
{_id: 5}
{_id: 6}
Awaiting results...
Awaiting results...
Awaiting results...
OK: Got MongoDB\Driver\Exception\RuntimeException
-Cursor not found, cursor id: %d
+%r(Cursor not found, cursor id: \d+|cursor id \d+ not found)%r
===DONE===
diff --git a/mongodb-1.3.4/tests/cursor/cursor-toArray-001.phpt b/mongodb-1.4.2/tests/cursor/cursor-toArray-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/cursor/cursor-toArray-001.phpt
rename to mongodb-1.4.2/tests/cursor/cursor-toArray-001.phpt
diff --git a/mongodb-1.3.4/tests/cursor/cursor-toArray-002.phpt b/mongodb-1.4.2/tests/cursor/cursor-toArray-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/cursor/cursor-toArray-002.phpt
rename to mongodb-1.4.2/tests/cursor/cursor-toArray-002.phpt
diff --git a/mongodb-1.3.4/tests/cursor/cursor_error-001.phpt b/mongodb-1.4.2/tests/cursor/cursor_error-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/cursor/cursor_error-001.phpt
rename to mongodb-1.4.2/tests/cursor/cursor_error-001.phpt
diff --git a/mongodb-1.3.4/tests/cursorid/cursorid-001.phpt b/mongodb-1.4.2/tests/cursorid/cursorid-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/cursorid/cursorid-001.phpt
rename to mongodb-1.4.2/tests/cursorid/cursorid-001.phpt
diff --git a/mongodb-1.3.4/tests/cursorid/cursorid-002.phpt b/mongodb-1.4.2/tests/cursorid/cursorid-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/cursorid/cursorid-002.phpt
rename to mongodb-1.4.2/tests/cursorid/cursorid-002.phpt
diff --git a/mongodb-1.3.4/tests/cursorid/cursorid_error-001.phpt b/mongodb-1.4.2/tests/cursorid/cursorid_error-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/cursorid/cursorid_error-001.phpt
rename to mongodb-1.4.2/tests/cursorid/cursorid_error-001.phpt
diff --git a/mongodb-1.3.4/tests/functional/cursor-001.phpt b/mongodb-1.4.2/tests/functional/cursor-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/functional/cursor-001.phpt
rename to mongodb-1.4.2/tests/functional/cursor-001.phpt
diff --git a/mongodb-1.3.4/tests/functional/cursorid-001.phpt b/mongodb-1.4.2/tests/functional/cursorid-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/functional/cursorid-001.phpt
rename to mongodb-1.4.2/tests/functional/cursorid-001.phpt
diff --git a/mongodb-1.3.4/tests/functional/phpinfo-1.phpt b/mongodb-1.4.2/tests/functional/phpinfo-1.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/functional/phpinfo-1.phpt
rename to mongodb-1.4.2/tests/functional/phpinfo-1.phpt
diff --git a/mongodb-1.3.4/tests/functional/phpinfo-2.phpt b/mongodb-1.4.2/tests/functional/phpinfo-2.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/functional/phpinfo-2.phpt
rename to mongodb-1.4.2/tests/functional/phpinfo-2.phpt
diff --git a/mongodb-1.3.4/tests/functional/query-sort-001.phpt b/mongodb-1.4.2/tests/functional/query-sort-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/functional/query-sort-001.phpt
rename to mongodb-1.4.2/tests/functional/query-sort-001.phpt
diff --git a/mongodb-1.3.4/tests/functional/query-sort-002.phpt b/mongodb-1.4.2/tests/functional/query-sort-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/functional/query-sort-002.phpt
rename to mongodb-1.4.2/tests/functional/query-sort-002.phpt
diff --git a/mongodb-1.3.4/tests/functional/query-sort-003.phpt b/mongodb-1.4.2/tests/functional/query-sort-003.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/functional/query-sort-003.phpt
rename to mongodb-1.4.2/tests/functional/query-sort-003.phpt
diff --git a/mongodb-1.3.4/tests/functional/query-sort-004.phpt b/mongodb-1.4.2/tests/functional/query-sort-004.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/functional/query-sort-004.phpt
rename to mongodb-1.4.2/tests/functional/query-sort-004.phpt
diff --git a/mongodb-1.3.4/tests/manager/bug0572.phpt b/mongodb-1.4.2/tests/manager/bug0572.phpt
similarity index 97%
rename from mongodb-1.3.4/tests/manager/bug0572.phpt
rename to mongodb-1.4.2/tests/manager/bug0572.phpt
index 8f846983..0a1c09e9 100644
--- a/mongodb-1.3.4/tests/manager/bug0572.phpt
+++ b/mongodb-1.4.2/tests/manager/bug0572.phpt
@@ -1,33 +1,34 @@
--TEST--
PHPC-572: Ensure stream context does not go out of scope before socket init
--SKIPIF--
<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS_SSL(); ?>
<?php NEEDS('STANDALONE_SSL'); ?>
--FILE--
<?php
require_once __DIR__ . "/../utils/basic.inc";
$closure = function() {
$context = stream_context_create([
'ssl' => [
'verify_peer' => false,
'verify_peer_name' => false,
'allow_self_signed' => true,
],
]);
return new MongoDB\Driver\Manager(STANDALONE_SSL, ['ssl' => true], ['context' => $context]);
};
$manager = $closure();
$cursor = $manager->executeCommand(DATABASE_NAME, new MongoDB\Driver\Command(['ping' => 1]));
var_dump($cursor->toArray()[0]);
?>
===DONE===
<?php exit(0); ?>
--EXPECTF--
object(stdClass)#%d (%d) {
["ok"]=>
float(1)
}
===DONE===
diff --git a/mongodb-1.3.4/tests/manager/bug0851-001.phpt b/mongodb-1.4.2/tests/manager/bug0851-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/manager/bug0851-001.phpt
rename to mongodb-1.4.2/tests/manager/bug0851-001.phpt
diff --git a/mongodb-1.3.4/tests/manager/bug0851-002.phpt b/mongodb-1.4.2/tests/manager/bug0851-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/manager/bug0851-002.phpt
rename to mongodb-1.4.2/tests/manager/bug0851-002.phpt
diff --git a/mongodb-1.3.4/tests/manager/bug0912-001.phpt b/mongodb-1.4.2/tests/manager/bug0912-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/manager/bug0912-001.phpt
rename to mongodb-1.4.2/tests/manager/bug0912-001.phpt
diff --git a/mongodb-1.3.4/tests/manager/bug0913-001.phpt b/mongodb-1.4.2/tests/manager/bug0913-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/manager/bug0913-001.phpt
rename to mongodb-1.4.2/tests/manager/bug0913-001.phpt
diff --git a/mongodb-1.3.4/tests/manager/bug0940-001.phpt b/mongodb-1.4.2/tests/manager/bug0940-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/manager/bug0940-001.phpt
rename to mongodb-1.4.2/tests/manager/bug0940-001.phpt
diff --git a/mongodb-1.3.4/tests/manager/bug0940-002.phpt b/mongodb-1.4.2/tests/manager/bug0940-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/manager/bug0940-002.phpt
rename to mongodb-1.4.2/tests/manager/bug0940-002.phpt
diff --git a/mongodb-1.3.4/tests/manager/manager-ctor-001.phpt b/mongodb-1.4.2/tests/manager/manager-ctor-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/manager/manager-ctor-001.phpt
rename to mongodb-1.4.2/tests/manager/manager-ctor-001.phpt
diff --git a/mongodb-1.3.4/tests/manager/manager-ctor-002.phpt b/mongodb-1.4.2/tests/manager/manager-ctor-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/manager/manager-ctor-002.phpt
rename to mongodb-1.4.2/tests/manager/manager-ctor-002.phpt
diff --git a/mongodb-1.3.4/tests/manager/manager-ctor-003.phpt b/mongodb-1.4.2/tests/manager/manager-ctor-003.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/manager/manager-ctor-003.phpt
rename to mongodb-1.4.2/tests/manager/manager-ctor-003.phpt
diff --git a/mongodb-1.3.4/tests/manager/manager-ctor-004.phpt b/mongodb-1.4.2/tests/manager/manager-ctor-004.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/manager/manager-ctor-004.phpt
rename to mongodb-1.4.2/tests/manager/manager-ctor-004.phpt
diff --git a/mongodb-1.3.4/tests/manager/manager-ctor-appname-001.phpt b/mongodb-1.4.2/tests/manager/manager-ctor-appname-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/manager/manager-ctor-appname-001.phpt
rename to mongodb-1.4.2/tests/manager/manager-ctor-appname-001.phpt
diff --git a/mongodb-1.3.4/tests/manager/manager-ctor-appname_error-001.phpt b/mongodb-1.4.2/tests/manager/manager-ctor-appname_error-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/manager/manager-ctor-appname_error-001.phpt
rename to mongodb-1.4.2/tests/manager/manager-ctor-appname_error-001.phpt
diff --git a/mongodb-1.3.4/tests/manager/manager-ctor-auth_mechanism-001.phpt b/mongodb-1.4.2/tests/manager/manager-ctor-auth_mechanism-001.phpt
similarity index 79%
rename from mongodb-1.3.4/tests/manager/manager-ctor-auth_mechanism-001.phpt
rename to mongodb-1.4.2/tests/manager/manager-ctor-auth_mechanism-001.phpt
index e00318d0..b75c7e1d 100644
--- a/mongodb-1.3.4/tests/manager/manager-ctor-auth_mechanism-001.phpt
+++ b/mongodb-1.4.2/tests/manager/manager-ctor-auth_mechanism-001.phpt
@@ -1,25 +1,25 @@
--TEST--
MongoDB\Driver\Manager::__construct(): authMechanism option
--FILE--
<?php
$tests = [
- ['mongodb://127.0.0.1/?authMechanism=MONGODB-X509', []],
- ['mongodb://127.0.0.1/?authMechanism=GSSAPI', []],
+ ['mongodb://username@127.0.0.1/?authMechanism=MONGODB-X509', []],
+ ['mongodb://username@127.0.0.1/?authMechanism=GSSAPI', []],
[null, ['authMechanism' => 'MONGODB-X509']],
[null, ['authMechanism' => 'GSSAPI']],
];
foreach ($tests as $test) {
list($uri, $options) = $test;
/* Note: the Manager's debug information does not include the auth mechanism
* so we are merely testing that no exception is thrown. */
$manager = new MongoDB\Driver\Manager($uri, $options);
}
?>
===DONE===
<?php exit(0); ?>
--EXPECT--
===DONE===
diff --git a/mongodb-1.3.4/tests/manager/manager-ctor-auth_mechanism-002.phpt b/mongodb-1.4.2/tests/manager/manager-ctor-auth_mechanism-002.phpt
similarity index 77%
rename from mongodb-1.3.4/tests/manager/manager-ctor-auth_mechanism-002.phpt
rename to mongodb-1.4.2/tests/manager/manager-ctor-auth_mechanism-002.phpt
index a9abcd20..05136477 100644
--- a/mongodb-1.3.4/tests/manager/manager-ctor-auth_mechanism-002.phpt
+++ b/mongodb-1.4.2/tests/manager/manager-ctor-auth_mechanism-002.phpt
@@ -1,29 +1,29 @@
--TEST--
MongoDB\Driver\Manager::__construct(): authMechanismProperties option
--FILE--
<?php
$tests = [
- ['mongodb://127.0.0.1/?authMechanism=GSSAPI&authMechanismProperties=CANONICALIZE_HOST_NAME:true,SERVICE_NAME:foo,SERVICE_REALM:bar', []],
+ ['mongodb://username@127.0.0.1/?authMechanism=GSSAPI&authMechanismProperties=CANONICALIZE_HOST_NAME:true,SERVICE_NAME:foo,SERVICE_REALM:bar', []],
[null, ['authMechanism' => 'GSSAPI', 'authMechanismProperties' => ['CANONICALIZE_HOST_NAME' => 'true', 'SERVICE_NAME' => 'foo', 'SERVICE_REALM' => 'bar']]],
// Options are case-insensitive
- ['mongodb://127.0.0.1/?authMechanism=GSSAPI&authMechanismProperties=canonicalize_host_name:TRUE,service_name:foo,service_realm:bar', []],
+ ['mongodb://username@127.0.0.1/?authMechanism=GSSAPI&authMechanismProperties=canonicalize_host_name:TRUE,service_name:foo,service_realm:bar', []],
[null, ['authMechanism' => 'GSSAPI', 'authMechanismProperties' => ['canonicalize_host_name' => 'TRUE', 'service_name' => 'foo', 'service_realm' => 'bar']]],
// Boolean true "CANONICALIZE_HOST_NAME" value is converted to "true"
[null, ['authMechanism' => 'GSSAPI', 'authMechanismProperties' => ['canonicalize_host_name' => true]]],
];
foreach ($tests as $test) {
list($uri, $options) = $test;
/* Note: the Manager's debug information does not include the auth mechanism
* so we are merely testing that no exception is thrown and that option
* processing does not leak memory. */
$manager = new MongoDB\Driver\Manager($uri, $options);
}
?>
===DONE===
<?php exit(0); ?>
--EXPECT--
===DONE===
diff --git a/mongodb-1.3.4/tests/manager/manager-ctor-read_concern-001.phpt b/mongodb-1.4.2/tests/manager/manager-ctor-read_concern-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/manager/manager-ctor-read_concern-001.phpt
rename to mongodb-1.4.2/tests/manager/manager-ctor-read_concern-001.phpt
diff --git a/mongodb-1.3.4/tests/manager/manager-ctor-read_concern-error-001.phpt b/mongodb-1.4.2/tests/manager/manager-ctor-read_concern-error-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/manager/manager-ctor-read_concern-error-001.phpt
rename to mongodb-1.4.2/tests/manager/manager-ctor-read_concern-error-001.phpt
diff --git a/mongodb-1.3.4/tests/manager/manager-ctor-read_preference-001.phpt b/mongodb-1.4.2/tests/manager/manager-ctor-read_preference-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/manager/manager-ctor-read_preference-001.phpt
rename to mongodb-1.4.2/tests/manager/manager-ctor-read_preference-001.phpt
diff --git a/mongodb-1.3.4/tests/manager/manager-ctor-read_preference-002.phpt b/mongodb-1.4.2/tests/manager/manager-ctor-read_preference-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/manager/manager-ctor-read_preference-002.phpt
rename to mongodb-1.4.2/tests/manager/manager-ctor-read_preference-002.phpt
diff --git a/mongodb-1.3.4/tests/manager/manager-ctor-read_preference-004.phpt b/mongodb-1.4.2/tests/manager/manager-ctor-read_preference-004.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/manager/manager-ctor-read_preference-004.phpt
rename to mongodb-1.4.2/tests/manager/manager-ctor-read_preference-004.phpt
diff --git a/mongodb-1.3.4/tests/manager/manager-ctor-read_preference-error-001.phpt b/mongodb-1.4.2/tests/manager/manager-ctor-read_preference-error-001.phpt
similarity index 94%
rename from mongodb-1.3.4/tests/manager/manager-ctor-read_preference-error-001.phpt
rename to mongodb-1.4.2/tests/manager/manager-ctor-read_preference-error-001.phpt
index fc48913e..83a501b3 100644
--- a/mongodb-1.3.4/tests/manager/manager-ctor-read_preference-error-001.phpt
+++ b/mongodb-1.4.2/tests/manager/manager-ctor-read_preference-error-001.phpt
@@ -1,66 +1,66 @@
--TEST--
MongoDB\Driver\Manager::__construct(): invalid read preference (mode and tags)
--FILE--
<?php
require_once __DIR__ . '/../utils/tools.php';
// Invalid types in URI string
echo throws(function() {
new MongoDB\Driver\Manager('mongodb://127.0.0.1/?readPreference=1');
}, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n";
echo throws(function() {
new MongoDB\Driver\Manager('mongodb://127.0.0.1/?readPreference=secondary&readPreferenceTags=invalid');
}, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n";
// Invalid types in URI options array
echo throws(function() {
new MongoDB\Driver\Manager(null, ['readPreference' => 1]);
}, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n";
echo throws(function() {
new MongoDB\Driver\Manager(null, ['readPreference' => 'primary', 'readPreferenceTags' => 'invalid']);
}, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n";
// Invalid values
echo throws(function() {
new MongoDB\Driver\Manager('mongodb://127.0.0.1/?readPreference=primary&readPreferenceTags=dc:ny');
}, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n";
echo throws(function() {
new MongoDB\Driver\Manager(null, ['readPreference' => 'nothing']);
}, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n";
echo throws(function() {
new MongoDB\Driver\Manager('mongodb://127.0.0.1/?readPreference=primary', ['readPreferenceTags' => [[]]]);
}, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n";
echo throws(function() {
new MongoDB\Driver\Manager('mongodb://127.0.0.1/?readPreference=primary', ['readPreferenceTags' => ['invalid']]);
}, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n";
?>
===DONE===
<?php exit(0); ?>
--EXPECT--
OK: Got MongoDB\Driver\Exception\InvalidArgumentException
-Failed to parse MongoDB URI: 'mongodb://127.0.0.1/?readPreference=1'. Unsupported readPreference value [readPreference=1]..
+Failed to parse MongoDB URI: 'mongodb://127.0.0.1/?readPreference=1'. Unsupported readPreference value [readPreference=1].
OK: Got MongoDB\Driver\Exception\InvalidArgumentException
-Failed to parse MongoDB URI: 'mongodb://127.0.0.1/?readPreference=secondary&readPreferenceTags=invalid'. Unknown option or value for 'readPreferenceTags=invalid'.
+Failed to parse MongoDB URI: 'mongodb://127.0.0.1/?readPreference=secondary&readPreferenceTags=invalid'. Unsupported value for "readPreferenceTags": "invalid".
OK: Got MongoDB\Driver\Exception\InvalidArgumentException
Expected string for "readPreference" URI option, 32-bit integer given
OK: Got MongoDB\Driver\Exception\InvalidArgumentException
Expected array for "readPreferenceTags" URI option, string given
OK: Got MongoDB\Driver\Exception\InvalidArgumentException
Failed to parse MongoDB URI: 'mongodb://127.0.0.1/?readPreference=primary&readPreferenceTags=dc:ny'. Invalid readPreferences.
OK: Got MongoDB\Driver\Exception\InvalidArgumentException
Unsupported readPreference value: 'nothing'
OK: Got MongoDB\Driver\Exception\InvalidArgumentException
Primary read preference mode conflicts with tags
OK: Got MongoDB\Driver\Exception\InvalidArgumentException
Read preference tags must be an array of zero or more documents
===DONE===
diff --git a/mongodb-1.3.4/tests/manager/manager-ctor-read_preference-error-002.phpt b/mongodb-1.4.2/tests/manager/manager-ctor-read_preference-error-002.phpt
similarity index 94%
rename from mongodb-1.3.4/tests/manager/manager-ctor-read_preference-error-002.phpt
rename to mongodb-1.4.2/tests/manager/manager-ctor-read_preference-error-002.phpt
index 766d65b7..3eee6200 100644
--- a/mongodb-1.3.4/tests/manager/manager-ctor-read_preference-error-002.phpt
+++ b/mongodb-1.4.2/tests/manager/manager-ctor-read_preference-error-002.phpt
@@ -1,78 +1,78 @@
--TEST--
MongoDB\Driver\Manager::__construct(): invalid read preference (maxStalenessSeconds)
--FILE--
<?php
require_once __DIR__ . '/../utils/tools.php';
// Invalid types
echo throws(function() {
new MongoDB\Driver\Manager('mongodb://127.0.0.1/?readPreference=secondary&maxStalenessSeconds=invalid');
}, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n";
echo throws(function() {
new MongoDB\Driver\Manager(null, ['maxStalenessSeconds' => 'invalid']);
}, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n";
// Invalid range in URI string (array option is tested in 64-bit error test)
echo throws(function() {
new MongoDB\Driver\Manager('mongodb://127.0.0.1/?readPreference=secondary&maxStalenessSeconds=2147483648');
}, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n";
// Invalid values
echo throws(function() {
new MongoDB\Driver\Manager('mongodb://127.0.0.1/?maxstalenessseconds=1231');
}, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n";
echo throws(function() {
new MongoDB\Driver\Manager('mongodb://127.0.0.1/?maxStalenessSeconds=1231');
}, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n";
echo throws(function() {
new MongoDB\Driver\Manager(null, ['maxstalenessseconds' => 1231]);
}, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n";
echo throws(function() {
new MongoDB\Driver\Manager(null, ['maxStalenessSeconds' => 1231]);
}, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n";
echo throws(function() {
new MongoDB\Driver\Manager(null, ['readPreference' => 'secondary', 'maxStalenessSeconds' => -2]);
}, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n";
echo throws(function() {
new MongoDB\Driver\Manager(null, ['readPreference' => 'secondary', 'maxStalenessSeconds' => 0]);
}, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n";
echo throws(function() {
new MongoDB\Driver\Manager(null, ['readPreference' => 'secondary', 'maxStalenessSeconds' => 42]);
}, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n";
?>
===DONE===
<?php exit(0); ?>
--EXPECT--
OK: Got MongoDB\Driver\Exception\InvalidArgumentException
-Failed to parse MongoDB URI: 'mongodb://127.0.0.1/?readPreference=secondary&maxStalenessSeconds=invalid'. Unknown option or value for 'maxStalenessSeconds=invalid'.
+Failed to parse MongoDB URI: 'mongodb://127.0.0.1/?readPreference=secondary&maxStalenessSeconds=invalid'. Unsupported value for "maxStalenessSeconds": "invalid".
OK: Got MongoDB\Driver\Exception\InvalidArgumentException
Expected integer for "maxStalenessSeconds" URI option, string given
OK: Got MongoDB\Driver\Exception\InvalidArgumentException
-Failed to parse MongoDB URI: 'mongodb://127.0.0.1/?readPreference=secondary&maxStalenessSeconds=2147483648'. Unknown option or value for 'maxStalenessSeconds=2147483648'.
+Failed to parse MongoDB URI: 'mongodb://127.0.0.1/?readPreference=secondary&maxStalenessSeconds=2147483648'. Unsupported value for "maxStalenessSeconds": "2147483648".
OK: Got MongoDB\Driver\Exception\InvalidArgumentException
Failed to parse MongoDB URI: 'mongodb://127.0.0.1/?maxstalenessseconds=1231'. Invalid readPreferences.
OK: Got MongoDB\Driver\Exception\InvalidArgumentException
Failed to parse MongoDB URI: 'mongodb://127.0.0.1/?maxStalenessSeconds=1231'. Invalid readPreferences.
OK: Got MongoDB\Driver\Exception\InvalidArgumentException
Primary read preference mode conflicts with maxStalenessSeconds
OK: Got MongoDB\Driver\Exception\InvalidArgumentException
Primary read preference mode conflicts with maxStalenessSeconds
OK: Got MongoDB\Driver\Exception\InvalidArgumentException
Expected maxStalenessSeconds to be >= 90, -2 given
OK: Got MongoDB\Driver\Exception\InvalidArgumentException
Expected maxStalenessSeconds to be >= 90, 0 given
OK: Got MongoDB\Driver\Exception\InvalidArgumentException
Expected maxStalenessSeconds to be >= 90, 42 given
===DONE===
diff --git a/mongodb-1.3.4/tests/manager/manager-ctor-read_preference-error-003.phpt b/mongodb-1.4.2/tests/manager/manager-ctor-read_preference-error-003.phpt
similarity index 93%
rename from mongodb-1.3.4/tests/manager/manager-ctor-read_preference-error-003.phpt
rename to mongodb-1.4.2/tests/manager/manager-ctor-read_preference-error-003.phpt
index 7f00ea00..d10c4f78 100644
--- a/mongodb-1.3.4/tests/manager/manager-ctor-read_preference-error-003.phpt
+++ b/mongodb-1.4.2/tests/manager/manager-ctor-read_preference-error-003.phpt
@@ -1,24 +1,24 @@
--TEST--
MongoDB\Driver\Manager::__construct(): invalid read preference (slaveOk)
--FILE--
<?php
require_once __DIR__ . '/../utils/tools.php';
echo throws(function() {
new MongoDB\Driver\Manager('mongodb://127.0.0.1/?slaveok=other');
}, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n";
echo throws(function() {
new MongoDB\Driver\Manager(null, ['slaveOk' => 1]);
}, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n";
?>
===DONE===
<?php exit(0); ?>
--EXPECT--
OK: Got MongoDB\Driver\Exception\InvalidArgumentException
-Failed to parse MongoDB URI: 'mongodb://127.0.0.1/?slaveok=other'. Unknown option or value for 'slaveok=other'.
+Failed to parse MongoDB URI: 'mongodb://127.0.0.1/?slaveok=other'. Unsupported value for "slaveok": "other".
OK: Got MongoDB\Driver\Exception\InvalidArgumentException
Expected boolean for "slaveOk" URI option, 32-bit integer given
===DONE===
diff --git a/mongodb-1.3.4/tests/manager/manager-ctor-read_preference-error-004.phpt b/mongodb-1.4.2/tests/manager/manager-ctor-read_preference-error-004.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/manager/manager-ctor-read_preference-error-004.phpt
rename to mongodb-1.4.2/tests/manager/manager-ctor-read_preference-error-004.phpt
diff --git a/mongodb-1.3.4/tests/manager/manager-ctor-ssl-001.phpt b/mongodb-1.4.2/tests/manager/manager-ctor-ssl-001.phpt
similarity index 90%
rename from mongodb-1.3.4/tests/manager/manager-ctor-ssl-001.phpt
rename to mongodb-1.4.2/tests/manager/manager-ctor-ssl-001.phpt
index ca070ef5..d1339f8d 100644
--- a/mongodb-1.3.4/tests/manager/manager-ctor-ssl-001.phpt
+++ b/mongodb-1.4.2/tests/manager/manager-ctor-ssl-001.phpt
@@ -1,30 +1,33 @@
--TEST--
MongoDB\Driver\Manager::__construct(): ssl option does not require driverOptions
+--SKIPIF--
+<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS_SSL(); ?>
--FILE--
<?php
/* Note: Since the STANDALONE_SSL server uses a self-signed certificate, we
* cannot connect to it without also providing driver options. Since the purpose
* of this test is to demonstrate that the SSL option does not require driver
* options, we will simply dump the constructed Manager. */
var_dump(new MongoDB\Driver\Manager('mongodb://127.0.0.1/?ssl=true'));
var_dump(new MongoDB\Driver\Manager(null, ['ssl' => true]));
?>
===DONE===
--EXPECTF--
object(MongoDB\Driver\Manager)#%d (%d) {
["uri"]=>
string(29) "mongodb://127.0.0.1/?ssl=true"
["cluster"]=>
array(0) {
}
}
object(MongoDB\Driver\Manager)#%d (%d) {
["uri"]=>
string(20) "mongodb://127.0.0.1/"
["cluster"]=>
array(0) {
}
}
===DONE===
diff --git a/mongodb-1.3.4/tests/manager/manager-ctor-write_concern-001.phpt b/mongodb-1.4.2/tests/manager/manager-ctor-write_concern-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/manager/manager-ctor-write_concern-001.phpt
rename to mongodb-1.4.2/tests/manager/manager-ctor-write_concern-001.phpt
diff --git a/mongodb-1.3.4/tests/manager/manager-ctor-write_concern-002.phpt b/mongodb-1.4.2/tests/manager/manager-ctor-write_concern-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/manager/manager-ctor-write_concern-002.phpt
rename to mongodb-1.4.2/tests/manager/manager-ctor-write_concern-002.phpt
diff --git a/mongodb-1.3.4/tests/manager/manager-ctor-write_concern-003.phpt b/mongodb-1.4.2/tests/manager/manager-ctor-write_concern-003.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/manager/manager-ctor-write_concern-003.phpt
rename to mongodb-1.4.2/tests/manager/manager-ctor-write_concern-003.phpt
diff --git a/mongodb-1.3.4/tests/manager/manager-ctor-write_concern-004.phpt b/mongodb-1.4.2/tests/manager/manager-ctor-write_concern-004.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/manager/manager-ctor-write_concern-004.phpt
rename to mongodb-1.4.2/tests/manager/manager-ctor-write_concern-004.phpt
diff --git a/mongodb-1.3.4/tests/manager/manager-ctor-write_concern-error-001.phpt b/mongodb-1.4.2/tests/manager/manager-ctor-write_concern-error-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/manager/manager-ctor-write_concern-error-001.phpt
rename to mongodb-1.4.2/tests/manager/manager-ctor-write_concern-error-001.phpt
diff --git a/mongodb-1.3.4/tests/manager/manager-ctor-write_concern-error-002.phpt b/mongodb-1.4.2/tests/manager/manager-ctor-write_concern-error-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/manager/manager-ctor-write_concern-error-002.phpt
rename to mongodb-1.4.2/tests/manager/manager-ctor-write_concern-error-002.phpt
diff --git a/mongodb-1.3.4/tests/manager/manager-ctor-write_concern-error-003.phpt b/mongodb-1.4.2/tests/manager/manager-ctor-write_concern-error-003.phpt
similarity index 92%
rename from mongodb-1.3.4/tests/manager/manager-ctor-write_concern-error-003.phpt
rename to mongodb-1.4.2/tests/manager/manager-ctor-write_concern-error-003.phpt
index db6acd85..ab4be414 100644
--- a/mongodb-1.3.4/tests/manager/manager-ctor-write_concern-error-003.phpt
+++ b/mongodb-1.4.2/tests/manager/manager-ctor-write_concern-error-003.phpt
@@ -1,25 +1,25 @@
--TEST--
MongoDB\Driver\Manager::__construct(): invalid write concern (wtimeoutms)
--FILE--
<?php
require_once __DIR__ . '/../utils/tools.php';
echo throws(function() {
new MongoDB\Driver\Manager('mongodb://127.0.0.1/?wtimeoutms=invalid');
}, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n";
echo throws(function() {
new MongoDB\Driver\Manager(null, ['wTimeoutMS' => 'invalid']);
}, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n";
?>
===DONE===
<?php exit(0); ?>
--EXPECT--
OK: Got MongoDB\Driver\Exception\InvalidArgumentException
-Failed to parse MongoDB URI: 'mongodb://127.0.0.1/?wtimeoutms=invalid'. Unknown option or value for 'wtimeoutms=invalid'.
+Failed to parse MongoDB URI: 'mongodb://127.0.0.1/?wtimeoutms=invalid'. Unsupported value for "wtimeoutms": "invalid".
OK: Got MongoDB\Driver\Exception\InvalidArgumentException
Expected 32-bit integer for "wTimeoutMS" URI option, string given
===DONE===
diff --git a/mongodb-1.3.4/tests/manager/manager-ctor-write_concern-error-004.phpt b/mongodb-1.4.2/tests/manager/manager-ctor-write_concern-error-004.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/manager/manager-ctor-write_concern-error-004.phpt
rename to mongodb-1.4.2/tests/manager/manager-ctor-write_concern-error-004.phpt
diff --git a/mongodb-1.3.4/tests/manager/manager-ctor-write_concern-error-005.phpt b/mongodb-1.4.2/tests/manager/manager-ctor-write_concern-error-005.phpt
similarity index 95%
rename from mongodb-1.3.4/tests/manager/manager-ctor-write_concern-error-005.phpt
rename to mongodb-1.4.2/tests/manager/manager-ctor-write_concern-error-005.phpt
index 82e79bf6..2077b374 100644
--- a/mongodb-1.3.4/tests/manager/manager-ctor-write_concern-error-005.phpt
+++ b/mongodb-1.4.2/tests/manager/manager-ctor-write_concern-error-005.phpt
@@ -1,76 +1,76 @@
--TEST--
MongoDB\Driver\Manager::__construct(): invalid write concern (journal)
--FILE--
<?php
require_once __DIR__ . '/../utils/tools.php';
// Invalid types
echo throws(function() {
new MongoDB\Driver\Manager('mongodb://127.0.0.1/?journal=invalid');
}, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n";
echo throws(function() {
new MongoDB\Driver\Manager(null, ['journal' => 'invalid']);
}, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n";
// Invalid values (journal conflicts with unacknowledged write concerns)
echo throws(function() {
new MongoDB\Driver\Manager('mongodb://127.0.0.1/?w=-1&journal=true');
}, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n";
echo throws(function() {
new MongoDB\Driver\Manager('mongodb://127.0.0.1/?w=0&journal=true');
}, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n";
echo throws(function() {
new MongoDB\Driver\Manager('mongodb://127.0.0.1/?w=-1', ['journal' => true]);
}, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n";
echo throws(function() {
new MongoDB\Driver\Manager('mongodb://127.0.0.1/?w=0', ['journal' => true]);
}, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n";
echo throws(function() {
new MongoDB\Driver\Manager('mongodb://127.0.0.1/?journal=true', ['w' => -1]);
}, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n";
echo throws(function() {
new MongoDB\Driver\Manager('mongodb://127.0.0.1/?journal=true', ['w' => 0]);
}, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n";
echo throws(function() {
new MongoDB\Driver\Manager(null, ['w' => -1, 'journal' => true]);
}, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n";
echo throws(function() {
new MongoDB\Driver\Manager(null, ['w' => 0, 'journal' => true]);
}, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n";
?>
===DONE===
<?php exit(0); ?>
--EXPECT--
OK: Got MongoDB\Driver\Exception\InvalidArgumentException
-Failed to parse MongoDB URI: 'mongodb://127.0.0.1/?journal=invalid'. Unknown option or value for 'journal=invalid'.
+Failed to parse MongoDB URI: 'mongodb://127.0.0.1/?journal=invalid'. Unsupported value for "journal": "invalid".
OK: Got MongoDB\Driver\Exception\InvalidArgumentException
Expected boolean for "journal" URI option, string given
OK: Got MongoDB\Driver\Exception\InvalidArgumentException
-Failed to parse MongoDB URI: 'mongodb://127.0.0.1/?w=-1&journal=true'. Invalid writeConcern.
+Failed to parse MongoDB URI: 'mongodb://127.0.0.1/?w=-1&journal=true'. Journal conflicts with w value [w=-1].
OK: Got MongoDB\Driver\Exception\InvalidArgumentException
-Failed to parse MongoDB URI: 'mongodb://127.0.0.1/?w=0&journal=true'. Invalid writeConcern.
+Failed to parse MongoDB URI: 'mongodb://127.0.0.1/?w=0&journal=true'. Journal conflicts with w value [w=0].
OK: Got MongoDB\Driver\Exception\InvalidArgumentException
Journal conflicts with w value: -1
OK: Got MongoDB\Driver\Exception\InvalidArgumentException
Journal conflicts with w value: 0
OK: Got MongoDB\Driver\Exception\InvalidArgumentException
Journal conflicts with w value: -1
OK: Got MongoDB\Driver\Exception\InvalidArgumentException
Journal conflicts with w value: 0
OK: Got MongoDB\Driver\Exception\InvalidArgumentException
Journal conflicts with w value: -1
OK: Got MongoDB\Driver\Exception\InvalidArgumentException
Journal conflicts with w value: 0
===DONE===
diff --git a/mongodb-1.3.4/tests/manager/manager-ctor-write_concern-error-006.phpt b/mongodb-1.4.2/tests/manager/manager-ctor-write_concern-error-006.phpt
similarity index 94%
rename from mongodb-1.3.4/tests/manager/manager-ctor-write_concern-error-006.phpt
rename to mongodb-1.4.2/tests/manager/manager-ctor-write_concern-error-006.phpt
index 476103bf..847693f4 100644
--- a/mongodb-1.3.4/tests/manager/manager-ctor-write_concern-error-006.phpt
+++ b/mongodb-1.4.2/tests/manager/manager-ctor-write_concern-error-006.phpt
@@ -1,26 +1,26 @@
--TEST--
MongoDB\Driver\Manager::__construct(): invalid write concern (safe)
--FILE--
<?php
require_once __DIR__ . '/../utils/tools.php';
// Invalid types
echo throws(function() {
new MongoDB\Driver\Manager('mongodb://127.0.0.1/?safe=invalid');
}, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n";
echo throws(function() {
new MongoDB\Driver\Manager(null, ['safe' => 'invalid']);
}, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n";
?>
===DONE===
<?php exit(0); ?>
--EXPECT--
OK: Got MongoDB\Driver\Exception\InvalidArgumentException
-Failed to parse MongoDB URI: 'mongodb://127.0.0.1/?safe=invalid'. Unknown option or value for 'safe=invalid'.
+Failed to parse MongoDB URI: 'mongodb://127.0.0.1/?safe=invalid'. Unsupported value for "safe": "invalid".
OK: Got MongoDB\Driver\Exception\InvalidArgumentException
Expected boolean for "safe" URI option, string given
===DONE===
diff --git a/mongodb-1.3.4/tests/manager/manager-ctor_error-001.phpt b/mongodb-1.4.2/tests/manager/manager-ctor_error-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/manager/manager-ctor_error-001.phpt
rename to mongodb-1.4.2/tests/manager/manager-ctor_error-001.phpt
diff --git a/mongodb-1.3.4/tests/manager/manager-ctor_error-002.phpt b/mongodb-1.4.2/tests/manager/manager-ctor_error-002.phpt
similarity index 88%
rename from mongodb-1.3.4/tests/manager/manager-ctor_error-002.phpt
rename to mongodb-1.4.2/tests/manager/manager-ctor_error-002.phpt
index 81157c30..e805564d 100644
--- a/mongodb-1.3.4/tests/manager/manager-ctor_error-002.phpt
+++ b/mongodb-1.4.2/tests/manager/manager-ctor_error-002.phpt
@@ -1,18 +1,18 @@
--TEST--
MongoDB\Driver\Manager::__construct(): invalid URI
--FILE--
<?php
require_once __DIR__ . '/../utils/tools.php';
echo throws(function() {
$manager = new MongoDB\Driver\Manager("not a valid connection string");
}, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n";
?>
===DONE===
<?php exit(0); ?>
--EXPECT--
OK: Got MongoDB\Driver\Exception\InvalidArgumentException
-Failed to parse MongoDB URI: 'not a valid connection string'. Invalid URI Schema, expecting 'mongodb://'.
+Failed to parse MongoDB URI: 'not a valid connection string'. Invalid URI Schema, expecting 'mongodb://' or 'mongodb+srv://'.
===DONE===
diff --git a/mongodb-1.3.4/tests/manager/manager-ctor_error-003.phpt b/mongodb-1.4.2/tests/manager/manager-ctor_error-003.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/manager/manager-ctor_error-003.phpt
rename to mongodb-1.4.2/tests/manager/manager-ctor_error-003.phpt
diff --git a/mongodb-1.3.4/tests/manager/manager-debug-001.phpt b/mongodb-1.4.2/tests/manager/manager-debug-001.phpt
similarity index 84%
rename from mongodb-1.3.4/tests/manager/manager-debug-001.phpt
rename to mongodb-1.4.2/tests/manager/manager-debug-001.phpt
index 68a4ece7..cc4a8184 100644
--- a/mongodb-1.3.4/tests/manager/manager-debug-001.phpt
+++ b/mongodb-1.4.2/tests/manager/manager-debug-001.phpt
@@ -1,28 +1,26 @@
--TEST--
MongoDB\Driver\Manager: Writing debug log files
---SKIPIF--
-<?php if (defined("HHVM_VERSION_ID")) exit("skip HHVM uses HHVM's logging functionality"); ?>
--FILE--
<?php
$name = tempnam(sys_get_temp_dir(), "PHONGO");
unlink($name);
mkdir($name);
ini_set('mongodb.debug', $name);
$manager = new MongoDB\Driver\Manager;
ini_set('mongodb.debug', 'off');
foreach (glob($name."/*") as $file) {
echo file_get_contents($file);
unlink($file);
}
rmdir($name);
?>
===DONE===
<?php exit(0); ?>
--EXPECTF--
%A[%s] PHONGO: DEBUG > Connection string: '%s'
[%s] PHONGO: DEBUG > Creating Manager, phongo-1.%d.%d%S[%s] - mongoc-1.%s(%s), libbson-1.%s(%s), php-%s
%A===DONE===%A
diff --git a/mongodb-1.3.4/tests/manager/manager-debug-002.phpt b/mongodb-1.4.2/tests/manager/manager-debug-002.phpt
similarity index 67%
rename from mongodb-1.3.4/tests/manager/manager-debug-002.phpt
rename to mongodb-1.4.2/tests/manager/manager-debug-002.phpt
index e98aa27a..470c3a54 100644
--- a/mongodb-1.3.4/tests/manager/manager-debug-002.phpt
+++ b/mongodb-1.4.2/tests/manager/manager-debug-002.phpt
@@ -1,18 +1,16 @@
--TEST--
-MongoDB\Driver\Manager: mongodb.debug=stderr
---SKIPIF--
-<?php if (defined("HHVM_VERSION_ID")) exit("skip HHVM uses HHVM's logging functionality"); ?>
+MongoDB\Driver\Manager: mongodb.debug=stderr (connection string and version)
--INI--
mongodb.debug=stderr
--FILE--
<?php
$manager = new MongoDB\Driver\Manager;
?>
===DONE===
<?php exit(0); ?>
--EXPECTF--
%A[%s] PHONGO: DEBUG > Connection string: '%s'
[%s] PHONGO: DEBUG > Creating Manager, phongo-1.%d.%d%S[%s] - mongoc-1.%s(%s), libbson-1.%s(%s), php-%s
%A===DONE===%A
diff --git a/mongodb-1.4.2/tests/manager/manager-debug-003.phpt b/mongodb-1.4.2/tests/manager/manager-debug-003.phpt
new file mode 100644
index 00000000..226733f4
--- /dev/null
+++ b/mongodb-1.4.2/tests/manager/manager-debug-003.phpt
@@ -0,0 +1,13 @@
+--TEST--
+MongoDB\Driver\Manager: mongodb.debug=stderr (date format)
+--INI--
+mongodb.debug=stderr
+--FILE--
+<?php
+$manager = new MongoDB\Driver\Manager;
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECTF--
+[%r(\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.\d{6}\+00:00)%r]%A
+===DONE===%A
diff --git a/mongodb-1.3.4/tests/manager/manager-destruct-001.phpt b/mongodb-1.4.2/tests/manager/manager-destruct-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/manager/manager-destruct-001.phpt
rename to mongodb-1.4.2/tests/manager/manager-destruct-001.phpt
diff --git a/mongodb-1.3.4/tests/manager/manager-executeBulkWrite-001.phpt b/mongodb-1.4.2/tests/manager/manager-executeBulkWrite-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/manager/manager-executeBulkWrite-001.phpt
rename to mongodb-1.4.2/tests/manager/manager-executeBulkWrite-001.phpt
diff --git a/mongodb-1.3.4/tests/manager/manager-executeBulkWrite-002.phpt b/mongodb-1.4.2/tests/manager/manager-executeBulkWrite-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/manager/manager-executeBulkWrite-002.phpt
rename to mongodb-1.4.2/tests/manager/manager-executeBulkWrite-002.phpt
diff --git a/mongodb-1.3.4/tests/manager/manager-executeBulkWrite-003.phpt b/mongodb-1.4.2/tests/manager/manager-executeBulkWrite-003.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/manager/manager-executeBulkWrite-003.phpt
rename to mongodb-1.4.2/tests/manager/manager-executeBulkWrite-003.phpt
diff --git a/mongodb-1.3.4/tests/manager/manager-executeBulkWrite-004.phpt b/mongodb-1.4.2/tests/manager/manager-executeBulkWrite-004.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/manager/manager-executeBulkWrite-004.phpt
rename to mongodb-1.4.2/tests/manager/manager-executeBulkWrite-004.phpt
diff --git a/mongodb-1.3.4/tests/manager/manager-executeBulkWrite-005.phpt b/mongodb-1.4.2/tests/manager/manager-executeBulkWrite-005.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/manager/manager-executeBulkWrite-005.phpt
rename to mongodb-1.4.2/tests/manager/manager-executeBulkWrite-005.phpt
diff --git a/mongodb-1.3.4/tests/manager/manager-executeBulkWrite-006.phpt b/mongodb-1.4.2/tests/manager/manager-executeBulkWrite-006.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/manager/manager-executeBulkWrite-006.phpt
rename to mongodb-1.4.2/tests/manager/manager-executeBulkWrite-006.phpt
diff --git a/mongodb-1.3.4/tests/manager/manager-executeBulkWrite-007.phpt b/mongodb-1.4.2/tests/manager/manager-executeBulkWrite-007.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/manager/manager-executeBulkWrite-007.phpt
rename to mongodb-1.4.2/tests/manager/manager-executeBulkWrite-007.phpt
diff --git a/mongodb-1.3.4/tests/manager/manager-executeBulkWrite-008.phpt b/mongodb-1.4.2/tests/manager/manager-executeBulkWrite-008.phpt
similarity index 93%
rename from mongodb-1.3.4/tests/manager/manager-executeBulkWrite-008.phpt
rename to mongodb-1.4.2/tests/manager/manager-executeBulkWrite-008.phpt
index d4aed4d0..f7e6c1b9 100644
--- a/mongodb-1.3.4/tests/manager/manager-executeBulkWrite-008.phpt
+++ b/mongodb-1.4.2/tests/manager/manager-executeBulkWrite-008.phpt
@@ -1,71 +1,70 @@
--TEST--
MongoDB\Driver\Manager::executeBulkWrite() update multiple documents with no upsert
--SKIPIF--
-<?php if (getenv("TRAVIS")) exit("skip This oddly enough fails on travis and I cannot figureout why") ?>
<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
<?php NEEDS('STANDALONE'); CLEANUP(STANDALONE); ?>
--FILE--
<?php
require_once __DIR__ . "/../utils/basic.inc";
$manager = new MongoDB\Driver\Manager(STANDALONE);
// load fixtures for test
$bulk = new MongoDB\Driver\BulkWrite();
$bulk->insert(array('_id' => 1, 'x' => 1));
$bulk->insert(array('_id' => 2, 'x' => 1));
$bulk->insert(array('_id' => 3, 'x' => 3));
$manager->executeBulkWrite(NS, $bulk);
$bulk = new MongoDB\Driver\BulkWrite();
$bulk->update(
array('x' => 1),
array('$set' => array('x' => 2)),
array('multi' => true, 'upsert' => false)
);
$result = $manager->executeBulkWrite(NS, $bulk);
echo "\n===> WriteResult\n";
printWriteResult($result);
echo "\n===> Collection\n";
$cursor = $manager->executeQuery(NS, new MongoDB\Driver\Query(array()));
var_dump(iterator_to_array($cursor));
?>
===DONE===
<?php exit(0); ?>
--EXPECTF--
===> WriteResult
server: %s:%d
insertedCount: 0
matchedCount: 2
modifiedCount: 2
upsertedCount: 0
deletedCount: 0
===> Collection
array(3) {
[0]=>
object(stdClass)#%d (2) {
["_id"]=>
int(1)
["x"]=>
int(2)
}
[1]=>
object(stdClass)#%d (2) {
["_id"]=>
int(2)
["x"]=>
int(2)
}
[2]=>
object(stdClass)#%d (2) {
["_id"]=>
int(3)
["x"]=>
int(3)
}
}
===DONE===
diff --git a/mongodb-1.3.4/tests/manager/manager-executeBulkWrite-009.phpt b/mongodb-1.4.2/tests/manager/manager-executeBulkWrite-009.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/manager/manager-executeBulkWrite-009.phpt
rename to mongodb-1.4.2/tests/manager/manager-executeBulkWrite-009.phpt
diff --git a/mongodb-1.3.4/tests/manager/manager-executeBulkWrite-010.phpt b/mongodb-1.4.2/tests/manager/manager-executeBulkWrite-010.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/manager/manager-executeBulkWrite-010.phpt
rename to mongodb-1.4.2/tests/manager/manager-executeBulkWrite-010.phpt
diff --git a/mongodb-1.3.4/tests/manager/manager-executeBulkWrite-011.phpt b/mongodb-1.4.2/tests/manager/manager-executeBulkWrite-011.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/manager/manager-executeBulkWrite-011.phpt
rename to mongodb-1.4.2/tests/manager/manager-executeBulkWrite-011.phpt
diff --git a/mongodb-1.3.4/tests/server/server-executeBulkWrite-003.phpt b/mongodb-1.4.2/tests/manager/manager-executeBulkWrite-012.phpt
similarity index 57%
copy from mongodb-1.3.4/tests/server/server-executeBulkWrite-003.phpt
copy to mongodb-1.4.2/tests/manager/manager-executeBulkWrite-012.phpt
index 94cf4742..ba2217ed 100644
--- a/mongodb-1.3.4/tests/server/server-executeBulkWrite-003.phpt
+++ b/mongodb-1.4.2/tests/manager/manager-executeBulkWrite-012.phpt
@@ -1,36 +1,39 @@
--TEST--
-MongoDB\Driver\Server::executeBulkWrite() with write concern (replica set primary)
+MongoDB\Driver\Manager::executeBulkWrite() with legacy write concern (replica set primary)
--SKIPIF--
<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
<?php NEEDS('REPLICASET'); CLEANUP(REPLICASET); ?>
--FILE--
<?php
require_once __DIR__ . "/../utils/basic.inc";
$manager = new MongoDB\Driver\Manager(REPLICASET);
-$server = $manager->selectServer(new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_PRIMARY));
-$writeConcerns = array(0, 1, 2, MongoDB\Driver\WriteConcern::MAJORITY);
+$writeConcerns = [0, 1, 2, MongoDB\Driver\WriteConcern::MAJORITY];
foreach ($writeConcerns as $wc) {
$bulk = new MongoDB\Driver\BulkWrite();
- $bulk->insert(array('wc' => $wc));
+ $bulk->insert(['wc' => $wc]);
- $result = $server->executeBulkWrite(NS, $bulk, new MongoDB\Driver\WriteConcern($wc));
+ $options = [
+ 'writeConcern' => new MongoDB\Driver\WriteConcern($wc),
+ ];
+
+ $result = $manager->executeBulkWrite(NS, $bulk, $options);
var_dump($result->isAcknowledged());
var_dump($result->getInsertedCount());
}
?>
===DONE===
<?php exit(0); ?>
--EXPECT--
bool(false)
NULL
bool(true)
int(1)
bool(true)
int(1)
bool(true)
int(1)
===DONE===
diff --git a/mongodb-1.3.4/tests/manager/manager-executeBulkWrite_error-001.phpt b/mongodb-1.4.2/tests/manager/manager-executeBulkWrite_error-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/manager/manager-executeBulkWrite_error-001.phpt
rename to mongodb-1.4.2/tests/manager/manager-executeBulkWrite_error-001.phpt
diff --git a/mongodb-1.3.4/tests/manager/manager-executeBulkWrite_error-002.phpt b/mongodb-1.4.2/tests/manager/manager-executeBulkWrite_error-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/manager/manager-executeBulkWrite_error-002.phpt
rename to mongodb-1.4.2/tests/manager/manager-executeBulkWrite_error-002.phpt
diff --git a/mongodb-1.3.4/tests/manager/manager-executeBulkWrite_error-003.phpt b/mongodb-1.4.2/tests/manager/manager-executeBulkWrite_error-003.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/manager/manager-executeBulkWrite_error-003.phpt
rename to mongodb-1.4.2/tests/manager/manager-executeBulkWrite_error-003.phpt
diff --git a/mongodb-1.3.4/tests/manager/manager-executeBulkWrite_error-004.phpt b/mongodb-1.4.2/tests/manager/manager-executeBulkWrite_error-004.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/manager/manager-executeBulkWrite_error-004.phpt
rename to mongodb-1.4.2/tests/manager/manager-executeBulkWrite_error-004.phpt
diff --git a/mongodb-1.3.4/tests/manager/manager-executeBulkWrite_error-006.phpt b/mongodb-1.4.2/tests/manager/manager-executeBulkWrite_error-006.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/manager/manager-executeBulkWrite_error-006.phpt
rename to mongodb-1.4.2/tests/manager/manager-executeBulkWrite_error-006.phpt
diff --git a/mongodb-1.3.4/tests/manager/manager-executeBulkWrite_error-007.phpt b/mongodb-1.4.2/tests/manager/manager-executeBulkWrite_error-007.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/manager/manager-executeBulkWrite_error-007.phpt
rename to mongodb-1.4.2/tests/manager/manager-executeBulkWrite_error-007.phpt
diff --git a/mongodb-1.3.4/tests/manager/manager-executeBulkWrite_error-008.phpt b/mongodb-1.4.2/tests/manager/manager-executeBulkWrite_error-008.phpt
similarity index 100%
copy from mongodb-1.3.4/tests/manager/manager-executeBulkWrite_error-008.phpt
copy to mongodb-1.4.2/tests/manager/manager-executeBulkWrite_error-008.phpt
diff --git a/mongodb-1.4.2/tests/manager/manager-executeBulkWrite_error-009.phpt b/mongodb-1.4.2/tests/manager/manager-executeBulkWrite_error-009.phpt
new file mode 100644
index 00000000..eaf4ac2e
--- /dev/null
+++ b/mongodb-1.4.2/tests/manager/manager-executeBulkWrite_error-009.phpt
@@ -0,0 +1,48 @@
+--TEST--
+MongoDB\Driver\Manager::executeBulkWrite() with invalid options
+--SKIPIF--
+<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS('REPLICASET'); ?>
+--FILE--
+<?php
+require_once __DIR__ . "/../utils/basic.inc";
+
+$manager = new MongoDB\Driver\Manager(REPLICASET);
+
+echo throws(function() use ($manager) {
+ $bulk = new MongoDB\Driver\BulkWrite();
+ $bulk->insert(['x' => 1]);
+ $manager->executeBulkWrite(NS, $bulk, ['session' => 'foo']);
+}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
+
+echo throws(function() use ($manager) {
+ $bulk = new MongoDB\Driver\BulkWrite();
+ $bulk->insert(['x' => 1]);
+ $manager->executeBulkWrite(NS, $bulk, ['session' => new stdClass]);
+}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
+
+echo throws(function() use ($manager) {
+ $bulk = new MongoDB\Driver\BulkWrite();
+ $bulk->insert(['x' => 1]);
+ $manager->executeBulkWrite(NS, $bulk, ['writeConcern' => 'foo']);
+}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
+
+echo throws(function() use ($manager) {
+ $bulk = new MongoDB\Driver\BulkWrite();
+ $bulk->insert(['x' => 1]);
+ $manager->executeBulkWrite(NS, $bulk, ['writeConcern' => new stdClass]);
+}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECT--
+OK: Got MongoDB\Driver\Exception\InvalidArgumentException
+Expected "session" option to be MongoDB\Driver\Session, string given
+OK: Got MongoDB\Driver\Exception\InvalidArgumentException
+Expected "session" option to be MongoDB\Driver\Session, stdClass given
+OK: Got MongoDB\Driver\Exception\InvalidArgumentException
+Expected "writeConcern" option to be MongoDB\Driver\WriteConcern, string given
+OK: Got MongoDB\Driver\Exception\InvalidArgumentException
+Expected "writeConcern" option to be MongoDB\Driver\WriteConcern, stdClass given
+===DONE===
diff --git a/mongodb-1.3.4/tests/manager/manager-executeCommand-001.phpt b/mongodb-1.4.2/tests/manager/manager-executeCommand-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/manager/manager-executeCommand-001.phpt
rename to mongodb-1.4.2/tests/manager/manager-executeCommand-001.phpt
diff --git a/mongodb-1.4.2/tests/manager/manager-executeCommand-002.phpt b/mongodb-1.4.2/tests/manager/manager-executeCommand-002.phpt
new file mode 100644
index 00000000..9fa09189
--- /dev/null
+++ b/mongodb-1.4.2/tests/manager/manager-executeCommand-002.phpt
@@ -0,0 +1,40 @@
+--TEST--
+MongoDB\Driver\Manager::executeCommand() takes a read preference in options array
+--SKIPIF--
+<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS('REPLICASET'); CLEANUP(REPLICASET); ?>
+--FILE--
+<?php
+require_once __DIR__ . "/../utils/basic.inc";
+
+$manager = new MongoDB\Driver\Manager(REPLICASET);
+
+$primary = new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_PRIMARY);
+$secondary = new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_SECONDARY);
+
+echo "Testing primary:\n";
+$command = new MongoDB\Driver\Command(['ping' => 1]);
+$cursor = $manager->executeCommand(DATABASE_NAME, $command, ['readPreference' => $primary]);
+
+echo "is_primary: ", $cursor->getServer()->isPrimary() ? 'true' : 'false', "\n";
+echo "is_secondary: ", $cursor->getServer()->isSecondary() ? 'true' : 'false', "\n\n";
+
+echo "Testing secondary:\n";
+$command = new MongoDB\Driver\Command(['ping' => 1]);
+$cursor = $manager->executeCommand(DATABASE_NAME, $command, ['readPreference' => $secondary]);
+
+echo "is_primary: ", $cursor->getServer()->isPrimary() ? 'true' : 'false', "\n";
+echo "is_secondary: ", $cursor->getServer()->isSecondary() ? 'true' : 'false', "\n\n";
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECTF--
+Testing primary:
+is_primary: true
+is_secondary: false
+
+Testing secondary:
+is_primary: false
+is_secondary: true
+
+===DONE===
diff --git a/mongodb-1.4.2/tests/manager/manager-executeCommand-003.phpt b/mongodb-1.4.2/tests/manager/manager-executeCommand-003.phpt
new file mode 100644
index 00000000..7a61dc77
--- /dev/null
+++ b/mongodb-1.4.2/tests/manager/manager-executeCommand-003.phpt
@@ -0,0 +1,40 @@
+--TEST--
+MongoDB\Driver\Manager::executeCommand() takes a read preference as legacy option
+--SKIPIF--
+<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS('REPLICASET'); CLEANUP(REPLICASET); ?>
+--FILE--
+<?php
+require_once __DIR__ . "/../utils/basic.inc";
+
+$manager = new MongoDB\Driver\Manager(REPLICASET);
+
+$primary = new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_PRIMARY);
+$secondary = new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_SECONDARY);
+
+echo "Testing primary:\n";
+$command = new MongoDB\Driver\Command(['ping' => 1]);
+$cursor = $manager->executeCommand(DATABASE_NAME, $command, $primary);
+
+echo "is_primary: ", $cursor->getServer()->isPrimary() ? 'true' : 'false', "\n";
+echo "is_secondary: ", $cursor->getServer()->isSecondary() ? 'true' : 'false', "\n\n";
+
+echo "Testing secondary:\n";
+$command = new MongoDB\Driver\Command(['ping' => 1]);
+$cursor = $manager->executeCommand(DATABASE_NAME, $command, $secondary);
+
+echo "is_primary: ", $cursor->getServer()->isPrimary() ? 'true' : 'false', "\n";
+echo "is_secondary: ", $cursor->getServer()->isSecondary() ? 'true' : 'false', "\n\n";
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECTF--
+Testing primary:
+is_primary: true
+is_secondary: false
+
+Testing secondary:
+is_primary: false
+is_secondary: true
+
+===DONE===
diff --git a/mongodb-1.4.2/tests/manager/manager-executeCommand-004.phpt b/mongodb-1.4.2/tests/manager/manager-executeCommand-004.phpt
new file mode 100644
index 00000000..bfb1f3b8
--- /dev/null
+++ b/mongodb-1.4.2/tests/manager/manager-executeCommand-004.phpt
@@ -0,0 +1,47 @@
+--TEST--
+MongoDB\Driver\Manager::executeCommand() options (MONGOC_CMD_RAW)
+--SKIPIF--
+<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS('STANDALONE'); NEEDS_ATLEAST_MONGODB_VERSION(STANDALONE, "3.6"); CLEANUP(STANDALONE); ?>
+--FILE--
+<?php
+require_once __DIR__ . "/../utils/basic.inc";
+require_once __DIR__ . "/../utils/observer.php";
+
+$manager = new MongoDB\Driver\Manager(STANDALONE);
+
+(new CommandObserver)->observe(
+ function() use ($manager) {
+ $command = new MongoDB\Driver\Command([
+ 'ping' => true,
+ ]);
+
+ try {
+ $manager->executeCommand(
+ DATABASE_NAME,
+ $command,
+ [
+ 'readPreference' => new \MongoDB\Driver\ReadPreference(\MongoDB\Driver\ReadPreference::RP_SECONDARY),
+ 'readConcern' => new \MongoDB\Driver\ReadConcern(\MongoDB\Driver\ReadConcern::LOCAL),
+ 'writeConcern' => new \MongoDB\Driver\WriteConcern(\MongoDB\Driver\WriteConcern::MAJORITY),
+ ]
+ );
+ } catch ( Exception $e ) {
+ // Ignore exception that ping doesn't support writeConcern
+ }
+ },
+ function(stdClass $command) {
+ echo "Read Preference: ", $command->{'$readPreference'}->mode, "\n";
+ echo "Read Concern: ", $command->readConcern->level, "\n";
+ echo "Write Concern: ", $command->writeConcern->w, "\n";
+ }
+);
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECTF--
+Read Preference: secondary
+Read Concern: local
+Write Concern: majority
+===DONE===
diff --git a/mongodb-1.3.4/tests/manager/manager-executeCommand_error-001.phpt b/mongodb-1.4.2/tests/manager/manager-executeCommand_error-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/manager/manager-executeCommand_error-001.phpt
rename to mongodb-1.4.2/tests/manager/manager-executeCommand_error-001.phpt
diff --git a/mongodb-1.4.2/tests/manager/manager-executeCommand_error-002.phpt b/mongodb-1.4.2/tests/manager/manager-executeCommand_error-002.phpt
new file mode 100644
index 00000000..57952ed5
--- /dev/null
+++ b/mongodb-1.4.2/tests/manager/manager-executeCommand_error-002.phpt
@@ -0,0 +1,66 @@
+--TEST--
+MongoDB\Driver\Manager::executeCommand() with invalid options (MONGOC_CMD_RAW)
+--SKIPIF--
+<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS('REPLICASET'); CLEANUP(REPLICASET); ?>
+--FILE--
+<?php
+require_once __DIR__ . "/../utils/basic.inc";
+
+$manager = new MongoDB\Driver\Manager(REPLICASET);
+
+$command = new MongoDB\Driver\Command(['ping' => 1]);
+
+echo throws(function() use ($manager, $command) {
+ $manager->executeCommand(DATABASE_NAME, $command, ['readConcern' => 'foo']);
+}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
+
+echo throws(function() use ($manager, $command) {
+ $manager->executeCommand(DATABASE_NAME, $command, ['readConcern' => new stdClass]);
+}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
+
+echo throws(function() use ($manager, $command) {
+ $manager->executeCommand(DATABASE_NAME, $command, ['readPreference' => 'foo']);
+}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
+
+echo throws(function() use ($manager, $command) {
+ $manager->executeCommand(DATABASE_NAME, $command, ['readPreference' => new stdClass]);
+}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
+
+echo throws(function() use ($manager, $command) {
+ $manager->executeCommand(DATABASE_NAME, $command, ['session' => 'foo']);
+}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
+
+echo throws(function() use ($manager, $command) {
+ $manager->executeCommand(DATABASE_NAME, $command, ['session' => new stdClass]);
+}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
+
+echo throws(function() use ($manager, $command) {
+ $manager->executeCommand(DATABASE_NAME, $command, ['writeConcern' => 'foo']);
+}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
+
+echo throws(function() use ($manager, $command) {
+ $manager->executeCommand(DATABASE_NAME, $command, ['writeConcern' => new stdClass]);
+}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECT--
+OK: Got MongoDB\Driver\Exception\InvalidArgumentException
+Expected "readConcern" option to be MongoDB\Driver\ReadConcern, string given
+OK: Got MongoDB\Driver\Exception\InvalidArgumentException
+Expected "readConcern" option to be MongoDB\Driver\ReadConcern, stdClass given
+OK: Got MongoDB\Driver\Exception\InvalidArgumentException
+Expected "readPreference" option to be MongoDB\Driver\ReadPreference, string given
+OK: Got MongoDB\Driver\Exception\InvalidArgumentException
+Expected "readPreference" option to be MongoDB\Driver\ReadPreference, stdClass given
+OK: Got MongoDB\Driver\Exception\InvalidArgumentException
+Expected "session" option to be MongoDB\Driver\Session, string given
+OK: Got MongoDB\Driver\Exception\InvalidArgumentException
+Expected "session" option to be MongoDB\Driver\Session, stdClass given
+OK: Got MongoDB\Driver\Exception\InvalidArgumentException
+Expected "writeConcern" option to be MongoDB\Driver\WriteConcern, string given
+OK: Got MongoDB\Driver\Exception\InvalidArgumentException
+Expected "writeConcern" option to be MongoDB\Driver\WriteConcern, stdClass given
+===DONE===
diff --git a/mongodb-1.3.4/tests/manager/manager-executeBulkWrite_error-008.phpt b/mongodb-1.4.2/tests/manager/manager-executeCommand_error-004.phpt
similarity index 60%
rename from mongodb-1.3.4/tests/manager/manager-executeBulkWrite_error-008.phpt
rename to mongodb-1.4.2/tests/manager/manager-executeCommand_error-004.phpt
index b6c68387..808e3e27 100644
--- a/mongodb-1.3.4/tests/manager/manager-executeBulkWrite_error-008.phpt
+++ b/mongodb-1.4.2/tests/manager/manager-executeCommand_error-004.phpt
@@ -1,22 +1,23 @@
--TEST--
-MongoDB\Driver\Manager::executeBulkWrite() with empty BulkWrite
+MongoDB\Driver\Manager::executeCommand() with empty command document
--SKIPIF--
<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
<?php NEEDS('STANDALONE'); ?>
--FILE--
<?php
require_once __DIR__ . "/../utils/basic.inc";
$manager = new MongoDB\Driver\Manager(STANDALONE);
+$command = new MongoDB\Driver\Command([]);
-echo throws(function() use ($manager) {
- $manager->executeBulkWrite(NS, new MongoDB\Driver\BulkWrite);
+echo throws(function() use ($manager, $command) {
+ $manager->executeCommand(DATABASE_NAME, $command);
}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
?>
===DONE===
<?php exit(0); ?>
---EXPECTF--
+--EXPECT--
OK: Got MongoDB\Driver\Exception\InvalidArgumentException
-Cannot do an empty bulk write
+Empty command document
===DONE===
diff --git a/mongodb-1.3.4/tests/manager/manager-executeQuery-001.phpt b/mongodb-1.4.2/tests/manager/manager-executeQuery-001.phpt
similarity index 98%
rename from mongodb-1.3.4/tests/manager/manager-executeQuery-001.phpt
rename to mongodb-1.4.2/tests/manager/manager-executeQuery-001.phpt
index b7876eb1..a494402d 100644
--- a/mongodb-1.3.4/tests/manager/manager-executeQuery-001.phpt
+++ b/mongodb-1.4.2/tests/manager/manager-executeQuery-001.phpt
@@ -1,88 +1,90 @@
--TEST--
MongoDB\Driver\Manager::executeQuery() one document (OP_QUERY)
--SKIPIF--
<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
<?php NEEDS('STANDALONE_30'); CLEANUP(STANDALONE_30); ?>
--FILE--
<?php
require_once __DIR__ . "/../utils/basic.inc";
$manager = new MongoDB\Driver\Manager(STANDALONE_30);
// load fixtures for test
$bulk = new MongoDB\Driver\BulkWrite();
$bulk->insert(array('_id' => 1, 'x' => 2, 'y' => 3));
$bulk->insert(array('_id' => 2, 'x' => 3, 'y' => 4));
$bulk->insert(array('_id' => 3, 'x' => 4, 'y' => 5));
$manager->executeBulkWrite(NS, $bulk);
$query = new MongoDB\Driver\Query(array('x' => 3), array('projection' => array('y' => 1)));
$qr = $manager->executeQuery(NS, $query);
var_dump($qr instanceof MongoDB\Driver\Cursor);
var_dump($qr);
$server = $qr->getServer();
var_dump($server instanceof MongoDB\Driver\Server);
var_dump($server->getHost());
var_dump($server->getPort());
var_dump(iterator_to_array($qr));
?>
===DONE===
<?php exit(0); ?>
--EXPECTF--
bool(true)
object(MongoDB\Driver\Cursor)#%d (%d) {
["database"]=>
string(6) "phongo"
["collection"]=>
string(32) "manager_manager_executeQuery_001"
["query"]=>
object(MongoDB\Driver\Query)#%d (%d) {
["filter"]=>
object(stdClass)#%d (%d) {
["x"]=>
int(3)
}
["options"]=>
object(stdClass)#%d (%d) {
["projection"]=>
object(stdClass)#%d (%d) {
["y"]=>
int(1)
}
+ ["serverId"]=>
+ int(%d)
}
["readConcern"]=>
NULL
}
["command"]=>
NULL
["readPreference"]=>
NULL
["isDead"]=>
bool(false)
["currentIndex"]=>
int(0)
["currentDocument"]=>
NULL
["server"]=>
object(MongoDB\Driver\Server)#%d (%d) {
%a
}
}
bool(true)
string(%d) "%s"
int(%d)
array(1) {
[0]=>
object(stdClass)#%d (2) {
["_id"]=>
int(2)
["y"]=>
int(4)
}
}
===DONE===
diff --git a/mongodb-1.3.4/tests/manager/manager-executeQuery-002.phpt b/mongodb-1.4.2/tests/manager/manager-executeQuery-002.phpt
similarity index 98%
rename from mongodb-1.3.4/tests/manager/manager-executeQuery-002.phpt
rename to mongodb-1.4.2/tests/manager/manager-executeQuery-002.phpt
index 433e71ce..8bf81c9b 100644
--- a/mongodb-1.3.4/tests/manager/manager-executeQuery-002.phpt
+++ b/mongodb-1.4.2/tests/manager/manager-executeQuery-002.phpt
@@ -1,88 +1,90 @@
--TEST--
MongoDB\Driver\Manager::executeQuery() one document (find command)
--SKIPIF--
<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
<?php NEEDS('STANDALONE'); CLEANUP(STANDALONE); ?>
--FILE--
<?php
require_once __DIR__ . "/../utils/basic.inc";
$manager = new MongoDB\Driver\Manager(STANDALONE);
// load fixtures for test
$bulk = new MongoDB\Driver\BulkWrite();
$bulk->insert(array('_id' => 1, 'x' => 2, 'y' => 3));
$bulk->insert(array('_id' => 2, 'x' => 3, 'y' => 4));
$bulk->insert(array('_id' => 3, 'x' => 4, 'y' => 5));
$manager->executeBulkWrite(NS, $bulk);
$query = new MongoDB\Driver\Query(array('x' => 3), array('projection' => array('y' => 1)));
$qr = $manager->executeQuery(NS, $query);
var_dump($qr instanceof MongoDB\Driver\Cursor);
var_dump($qr);
$server = $qr->getServer();
var_dump($server instanceof MongoDB\Driver\Server);
var_dump($server->getHost());
var_dump($server->getPort());
var_dump(iterator_to_array($qr));
?>
===DONE===
<?php exit(0); ?>
--EXPECTF--
bool(true)
object(MongoDB\Driver\Cursor)#%d (%d) {
["database"]=>
string(6) "phongo"
["collection"]=>
string(32) "manager_manager_executeQuery_002"
["query"]=>
object(MongoDB\Driver\Query)#%d (%d) {
["filter"]=>
object(stdClass)#%d (%d) {
["x"]=>
int(3)
}
["options"]=>
object(stdClass)#%d (%d) {
["projection"]=>
object(stdClass)#%d (%d) {
["y"]=>
int(1)
}
+ ["serverId"]=>
+ int(%d)
}
["readConcern"]=>
NULL
}
["command"]=>
NULL
["readPreference"]=>
NULL
["isDead"]=>
bool(false)
["currentIndex"]=>
int(0)
["currentDocument"]=>
NULL
["server"]=>
object(MongoDB\Driver\Server)#%d (%d) {
%a
}
}
bool(true)
string(%d) "%s"
int(%d)
array(1) {
[0]=>
object(stdClass)#%d (2) {
["_id"]=>
int(2)
["y"]=>
int(4)
}
}
===DONE===
diff --git a/mongodb-1.4.2/tests/manager/manager-executeQuery-003.phpt b/mongodb-1.4.2/tests/manager/manager-executeQuery-003.phpt
new file mode 100644
index 00000000..8db2fab0
--- /dev/null
+++ b/mongodb-1.4.2/tests/manager/manager-executeQuery-003.phpt
@@ -0,0 +1,45 @@
+--TEST--
+MongoDB\Driver\Manager::executeQuery() takes a read preference in options array
+--SKIPIF--
+<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS('REPLICASET'); CLEANUP(REPLICASET); ?>
+--FILE--
+<?php
+require_once __DIR__ . "/../utils/basic.inc";
+
+$manager = new MongoDB\Driver\Manager(REPLICASET);
+
+// load fixtures for test
+$bulk = new MongoDB\Driver\BulkWrite();
+$bulk->insert(['_id' => 1, 'x' => 2, 'y' => 3]);
+$manager->executeBulkWrite(NS, $bulk);
+
+$primary = new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_PRIMARY);
+$secondary = new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_SECONDARY);
+
+echo "Testing primary:\n";
+$query = new MongoDB\Driver\Query(['x' => 3], ['projection' => ['y' => 1]]);
+$cursor = $manager->executeQuery(NS, $query, ['readPreference' => $primary]);
+
+echo "is_primary: ", $cursor->getServer()->isPrimary() ? 'true' : 'false', "\n";
+echo "is_secondary: ", $cursor->getServer()->isSecondary() ? 'true' : 'false', "\n\n";
+
+echo "Testing secondary:\n";
+$query = new MongoDB\Driver\Query(['x' => 3], ['projection' => ['y' => 1]]);
+$cursor = $manager->executeQuery(NS, $query, ['readPreference' => $secondary]);
+
+echo "is_primary: ", $cursor->getServer()->isPrimary() ? 'true' : 'false', "\n";
+echo "is_secondary: ", $cursor->getServer()->isSecondary() ? 'true' : 'false', "\n\n";
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECTF--
+Testing primary:
+is_primary: true
+is_secondary: false
+
+Testing secondary:
+is_primary: false
+is_secondary: true
+
+===DONE===
diff --git a/mongodb-1.4.2/tests/manager/manager-executeQuery-004.phpt b/mongodb-1.4.2/tests/manager/manager-executeQuery-004.phpt
new file mode 100644
index 00000000..de928671
--- /dev/null
+++ b/mongodb-1.4.2/tests/manager/manager-executeQuery-004.phpt
@@ -0,0 +1,45 @@
+--TEST--
+MongoDB\Driver\Manager::executeQuery() takes a read preference as legacy option
+--SKIPIF--
+<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS('REPLICASET'); CLEANUP(REPLICASET); ?>
+--FILE--
+<?php
+require_once __DIR__ . "/../utils/basic.inc";
+
+$manager = new MongoDB\Driver\Manager(REPLICASET);
+
+// load fixtures for test
+$bulk = new MongoDB\Driver\BulkWrite();
+$bulk->insert(['_id' => 1, 'x' => 2, 'y' => 3]);
+$manager->executeBulkWrite(NS, $bulk);
+
+$primary = new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_PRIMARY);
+$secondary = new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_SECONDARY);
+
+echo "Testing primary:\n";
+$query = new MongoDB\Driver\Query(['x' => 3], ['projection' => ['y' => 1]]);
+$cursor = $manager->executeQuery(NS, $query, $primary);
+
+echo "is_primary: ", $cursor->getServer()->isPrimary() ? 'true' : 'false', "\n";
+echo "is_secondary: ", $cursor->getServer()->isSecondary() ? 'true' : 'false', "\n\n";
+
+echo "Testing secondary:\n";
+$query = new MongoDB\Driver\Query(['x' => 3], ['projection' => ['y' => 1]]);
+$cursor = $manager->executeQuery(NS, $query, $secondary);
+
+echo "is_primary: ", $cursor->getServer()->isPrimary() ? 'true' : 'false', "\n";
+echo "is_secondary: ", $cursor->getServer()->isSecondary() ? 'true' : 'false', "\n\n";
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECTF--
+Testing primary:
+is_primary: true
+is_secondary: false
+
+Testing secondary:
+is_primary: false
+is_secondary: true
+
+===DONE===
diff --git a/mongodb-1.3.4/tests/manager/manager-executeQuery-005.phpt b/mongodb-1.4.2/tests/manager/manager-executeQuery-005.phpt
similarity index 96%
rename from mongodb-1.3.4/tests/manager/manager-executeQuery-005.phpt
rename to mongodb-1.4.2/tests/manager/manager-executeQuery-005.phpt
index e8546427..d5959be7 100644
--- a/mongodb-1.3.4/tests/manager/manager-executeQuery-005.phpt
+++ b/mongodb-1.4.2/tests/manager/manager-executeQuery-005.phpt
@@ -1,87 +1,87 @@
--TEST--
-MongoDB\Driver\Server::executeQuery() with filter and projection
+MongoDB\Driver\Manager::executeQuery() with filter and projection
--SKIPIF--
<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
<?php NEEDS('STANDALONE'); CLEANUP(STANDALONE); ?>
--FILE--
<?php
require_once __DIR__ . "/../utils/basic.inc";
class MyArrayObject extends ArrayObject implements MongoDB\BSON\Unserializable {
function bsonUnserialize(array $data) {
parent::__construct($data);
}
}
$manager = new MongoDB\Driver\Manager(STANDALONE);
$bulk = new \MongoDB\Driver\BulkWrite();
$bulk->insert(array('_id' => 1, array('x' => 2, 'y' => 3)));
$bulk->insert(array('_id' => 2, array('x' => 3, 'y' => 4)));
$bulk->insert(array('_id' => 3, array('x' => 4, 'y' => 5)));
$manager->executeBulkWrite(NS, $bulk);
$query = new MongoDB\Driver\Query(array());
$qr = $manager->executeQuery(NS, $query);
$qr->setTypeMap(array("root"=> "MyArrayObject", "document"=> "MyArrayObject", "array" => "MyArrayObject"));
foreach($qr as $obj) {
var_dump($obj);
}
?>
===DONE===
<?php exit(0); ?>
--EXPECTF--
object(MyArrayObject)#%d (1) {
[%s]=>
array(2) {
["_id"]=>
int(1)
[0]=>
object(MyArrayObject)#%d (1) {
[%s]=>
array(2) {
["x"]=>
int(2)
["y"]=>
int(3)
}
}
}
}
object(MyArrayObject)#%d (1) {
[%s]=>
array(2) {
["_id"]=>
int(2)
[0]=>
object(MyArrayObject)#%d (1) {
[%s]=>
array(2) {
["x"]=>
int(3)
["y"]=>
int(4)
}
}
}
}
object(MyArrayObject)#%d (1) {
[%s]=>
array(2) {
["_id"]=>
int(3)
[0]=>
object(MyArrayObject)#%d (1) {
[%s]=>
array(2) {
["x"]=>
int(4)
["y"]=>
int(5)
}
}
}
}
===DONE===
diff --git a/mongodb-1.3.4/tests/manager/manager-executeQuery_error-001.phpt b/mongodb-1.4.2/tests/manager/manager-executeQuery_error-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/manager/manager-executeQuery_error-001.phpt
rename to mongodb-1.4.2/tests/manager/manager-executeQuery_error-001.phpt
diff --git a/mongodb-1.4.2/tests/manager/manager-executeQuery_error-002.phpt b/mongodb-1.4.2/tests/manager/manager-executeQuery_error-002.phpt
new file mode 100644
index 00000000..0263ad4e
--- /dev/null
+++ b/mongodb-1.4.2/tests/manager/manager-executeQuery_error-002.phpt
@@ -0,0 +1,42 @@
+--TEST--
+MongoDB\Driver\Manager::executeQuery() with invalid options
+--SKIPIF--
+<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS('REPLICASET'); CLEANUP(REPLICASET); ?>
+--FILE--
+<?php
+require_once __DIR__ . "/../utils/basic.inc";
+
+$manager = new MongoDB\Driver\Manager(REPLICASET);
+
+$query = new MongoDB\Driver\Query(['x' => 3], ['projection' => ['y' => 1]]);
+
+echo throws(function() use ($manager, $query) {
+ $manager->executeQuery(NS, $query, ['readPreference' => 'foo']);
+}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
+
+echo throws(function() use ($manager, $query) {
+ $manager->executeQuery(NS, $query, ['readPreference' => new stdClass]);
+}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
+
+echo throws(function() use ($manager, $query) {
+ $manager->executeQuery(NS, $query, ['session' => 'foo']);
+}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
+
+echo throws(function() use ($manager, $query) {
+ $manager->executeQuery(NS, $query, ['session' => new stdClass]);
+}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECT--
+OK: Got MongoDB\Driver\Exception\InvalidArgumentException
+Expected "readPreference" option to be MongoDB\Driver\ReadPreference, string given
+OK: Got MongoDB\Driver\Exception\InvalidArgumentException
+Expected "readPreference" option to be MongoDB\Driver\ReadPreference, stdClass given
+OK: Got MongoDB\Driver\Exception\InvalidArgumentException
+Expected "session" option to be MongoDB\Driver\Session, string given
+OK: Got MongoDB\Driver\Exception\InvalidArgumentException
+Expected "session" option to be MongoDB\Driver\Session, stdClass given
+===DONE===
diff --git a/mongodb-1.4.2/tests/manager/manager-executeReadCommand-001.phpt b/mongodb-1.4.2/tests/manager/manager-executeReadCommand-001.phpt
new file mode 100644
index 00000000..dbc99213
--- /dev/null
+++ b/mongodb-1.4.2/tests/manager/manager-executeReadCommand-001.phpt
@@ -0,0 +1,42 @@
+--TEST--
+MongoDB\Driver\Manager::executeReadCommand()
+--SKIPIF--
+<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS('STANDALONE'); CLEANUP(STANDALONE); NEEDS_ATLEAST_MONGODB_VERSION(STANDALONE, "3.6"); NEEDS_STORAGE_ENGINE(STANDALONE, "wiredTiger"); ?>
+--FILE--
+<?php
+require_once __DIR__ . "/../utils/basic.inc";
+require_once __DIR__ . "/../utils/observer.php";
+
+$manager = new MongoDB\Driver\Manager(STANDALONE);
+
+(new CommandObserver)->observe(
+ function() use ($manager) {
+ $command = new MongoDB\Driver\Command( [
+ 'aggregate' => NS,
+ 'pipeline' => [],
+ 'cursor' => new stdClass(),
+ ] );
+ $manager->executeReadCommand(
+ DATABASE_NAME,
+ $command,
+ [
+ 'readPreference' => new \MongoDB\Driver\ReadPreference(\MongoDB\Driver\ReadPreference::RP_SECONDARY),
+ 'readConcern' => new \MongoDB\Driver\ReadConcern(\MongoDB\Driver\ReadConcern::MAJORITY),
+ ]
+ );
+ },
+ function(stdClass $command) {
+ echo "Read Preference: ", $command->{'$readPreference'}->mode, "\n";
+ echo "Read Concern: ", $command->readConcern->level, "\n";
+ }
+);
+
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECTF--
+Read Preference: secondary
+Read Concern: majority
+===DONE===
diff --git a/mongodb-1.4.2/tests/manager/manager-executeReadCommand_error-001.phpt b/mongodb-1.4.2/tests/manager/manager-executeReadCommand_error-001.phpt
new file mode 100644
index 00000000..72bcba38
--- /dev/null
+++ b/mongodb-1.4.2/tests/manager/manager-executeReadCommand_error-001.phpt
@@ -0,0 +1,54 @@
+--TEST--
+MongoDB\Driver\Manager::executeReadCommand() with invalid options
+--SKIPIF--
+<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS('STANDALONE'); CLEANUP(STANDALONE); ?>
+--FILE--
+<?php
+require_once __DIR__ . "/../utils/basic.inc";
+
+$manager = new MongoDB\Driver\Manager(STANDALONE);
+
+$command = new MongoDB\Driver\Command(['ping' => 1]);
+
+echo throws(function() use ($manager, $command) {
+ $manager->executeReadCommand(DATABASE_NAME, $command, ['readConcern' => 'foo']);
+}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
+
+echo throws(function() use ($manager, $command) {
+ $manager->executeReadCommand(DATABASE_NAME, $command, ['readConcern' => new stdClass]);
+}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
+
+echo throws(function() use ($manager, $command) {
+ $manager->executeReadCommand(DATABASE_NAME, $command, ['readPreference' => 'foo']);
+}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
+
+echo throws(function() use ($manager, $command) {
+ $manager->executeReadCommand(DATABASE_NAME, $command, ['readPreference' => new stdClass]);
+}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
+
+echo throws(function() use ($manager, $command) {
+ $manager->executeReadCommand(DATABASE_NAME, $command, ['session' => 'foo']);
+}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
+
+echo throws(function() use ($manager, $command) {
+ $manager->executeReadCommand(DATABASE_NAME, $command, ['session' => new stdClass]);
+}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECT--
+OK: Got MongoDB\Driver\Exception\InvalidArgumentException
+Expected "readConcern" option to be MongoDB\Driver\ReadConcern, string given
+OK: Got MongoDB\Driver\Exception\InvalidArgumentException
+Expected "readConcern" option to be MongoDB\Driver\ReadConcern, stdClass given
+OK: Got MongoDB\Driver\Exception\InvalidArgumentException
+Expected "readPreference" option to be MongoDB\Driver\ReadPreference, string given
+OK: Got MongoDB\Driver\Exception\InvalidArgumentException
+Expected "readPreference" option to be MongoDB\Driver\ReadPreference, stdClass given
+OK: Got MongoDB\Driver\Exception\InvalidArgumentException
+Expected "session" option to be MongoDB\Driver\Session, string given
+OK: Got MongoDB\Driver\Exception\InvalidArgumentException
+Expected "session" option to be MongoDB\Driver\Session, stdClass given
+===DONE===
diff --git a/mongodb-1.4.2/tests/manager/manager-executeReadWriteCommand-001.phpt b/mongodb-1.4.2/tests/manager/manager-executeReadWriteCommand-001.phpt
new file mode 100644
index 00000000..7f57aa2c
--- /dev/null
+++ b/mongodb-1.4.2/tests/manager/manager-executeReadWriteCommand-001.phpt
@@ -0,0 +1,40 @@
+--TEST--
+MongoDB\Driver\Manager::executeReadWriteCommand()
+--SKIPIF--
+<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS('STANDALONE'); CLEANUP(STANDALONE); ?>
+--FILE--
+<?php
+require_once __DIR__ . "/../utils/basic.inc";
+require_once __DIR__ . "/../utils/observer.php";
+
+$manager = new MongoDB\Driver\Manager(STANDALONE);
+
+(new CommandObserver)->observe(
+ function() use ($manager) {
+ $command = new MongoDB\Driver\Command( [
+ 'findAndModify' => NS,
+ 'update' => [ '$set' => [ 'foo' => 'bar' ] ],
+ ] );
+ $manager->executeReadWriteCommand(
+ DATABASE_NAME,
+ $command,
+ [
+ 'readConcern' => new \MongoDB\Driver\ReadConcern(\MongoDB\Driver\ReadConcern::LOCAL),
+ 'writeConcern' => new \MongoDB\Driver\WriteConcern(\MongoDB\Driver\WriteConcern::MAJORITY),
+ ]
+ );
+ },
+ function(stdClass $command) {
+ echo "Read Concern: ", $command->readConcern->level, "\n";
+ echo "Write Concern: ", $command->writeConcern->w, "\n";
+ }
+);
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECTF--
+Read Concern: local
+Write Concern: majority
+===DONE===
diff --git a/mongodb-1.4.2/tests/manager/manager-executeReadWriteCommand_error-001.phpt b/mongodb-1.4.2/tests/manager/manager-executeReadWriteCommand_error-001.phpt
new file mode 100644
index 00000000..2a56cc13
--- /dev/null
+++ b/mongodb-1.4.2/tests/manager/manager-executeReadWriteCommand_error-001.phpt
@@ -0,0 +1,54 @@
+--TEST--
+MongoDB\Driver\Manager::executeReadWriteCommand() with invalid options
+--SKIPIF--
+<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS('STANDALONE'); CLEANUP(STANDALONE); ?>
+--FILE--
+<?php
+require_once __DIR__ . "/../utils/basic.inc";
+
+$manager = new MongoDB\Driver\Manager(STANDALONE);
+
+$command = new MongoDB\Driver\Command(['ping' => 1]);
+
+echo throws(function() use ($manager, $command) {
+ $manager->executeReadWriteCommand(DATABASE_NAME, $command, ['readConcern' => 'foo']);
+}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
+
+echo throws(function() use ($manager, $command) {
+ $manager->executeReadWriteCommand(DATABASE_NAME, $command, ['readConcern' => new stdClass]);
+}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
+
+echo throws(function() use ($manager, $command) {
+ $manager->executeReadWriteCommand(DATABASE_NAME, $command, ['session' => 'foo']);
+}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
+
+echo throws(function() use ($manager, $command) {
+ $manager->executeReadWriteCommand(DATABASE_NAME, $command, ['session' => new stdClass]);
+}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
+
+echo throws(function() use ($manager, $command) {
+ $manager->executeReadWriteCommand(DATABASE_NAME, $command, ['writeConcern' => 'foo']);
+}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
+
+echo throws(function() use ($manager, $command) {
+ $manager->executeReadWriteCommand(DATABASE_NAME, $command, ['writeConcern' => new stdClass]);
+}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECT--
+OK: Got MongoDB\Driver\Exception\InvalidArgumentException
+Expected "readConcern" option to be MongoDB\Driver\ReadConcern, string given
+OK: Got MongoDB\Driver\Exception\InvalidArgumentException
+Expected "readConcern" option to be MongoDB\Driver\ReadConcern, stdClass given
+OK: Got MongoDB\Driver\Exception\InvalidArgumentException
+Expected "session" option to be MongoDB\Driver\Session, string given
+OK: Got MongoDB\Driver\Exception\InvalidArgumentException
+Expected "session" option to be MongoDB\Driver\Session, stdClass given
+OK: Got MongoDB\Driver\Exception\InvalidArgumentException
+Expected "writeConcern" option to be MongoDB\Driver\WriteConcern, string given
+OK: Got MongoDB\Driver\Exception\InvalidArgumentException
+Expected "writeConcern" option to be MongoDB\Driver\WriteConcern, stdClass given
+===DONE===
diff --git a/mongodb-1.4.2/tests/manager/manager-executeWriteCommand-001.phpt b/mongodb-1.4.2/tests/manager/manager-executeWriteCommand-001.phpt
new file mode 100644
index 00000000..a74c4eee
--- /dev/null
+++ b/mongodb-1.4.2/tests/manager/manager-executeWriteCommand-001.phpt
@@ -0,0 +1,40 @@
+--TEST--
+MongoDB\Driver\Manager::executeWriteCommand()
+--SKIPIF--
+<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS('STANDALONE'); CLEANUP(STANDALONE); NEEDS_ATLEAST_MONGODB_VERSION(STANDALONE, "3.6"); ?>
+--FILE--
+<?php
+require_once __DIR__ . "/../utils/basic.inc";
+require_once __DIR__ . "/../utils/observer.php";
+
+$manager = new MongoDB\Driver\Manager(STANDALONE);
+
+$bw = new MongoDB\Driver\BulkWrite();
+$bw->insert(['a' => 1]);
+$manager->executeBulkWrite(NS, $bw);
+
+(new CommandObserver)->observe(
+ function() use ($manager) {
+ $command = new MongoDB\Driver\Command([
+ 'drop' => COLLECTION_NAME,
+ ]);
+ $manager->executeWriteCommand(
+ DATABASE_NAME,
+ $command,
+ [
+ 'writeConcern' => new \MongoDB\Driver\WriteConcern(\MongoDB\Driver\WriteConcern::MAJORITY),
+ ]
+ );
+ },
+ function(stdClass $command) {
+ echo "Write Concern: ", $command->writeConcern->w, "\n";
+ }
+);
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECTF--
+Write Concern: majority
+===DONE===
diff --git a/mongodb-1.4.2/tests/manager/manager-executeWriteCommand_error-001.phpt b/mongodb-1.4.2/tests/manager/manager-executeWriteCommand_error-001.phpt
new file mode 100644
index 00000000..9f1cf951
--- /dev/null
+++ b/mongodb-1.4.2/tests/manager/manager-executeWriteCommand_error-001.phpt
@@ -0,0 +1,42 @@
+--TEST--
+MongoDB\Driver\Manager::executeWriteCommand() with invalid options
+--SKIPIF--
+<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS('STANDALONE'); CLEANUP(STANDALONE); ?>
+--FILE--
+<?php
+require_once __DIR__ . "/../utils/basic.inc";
+
+$manager = new MongoDB\Driver\Manager(STANDALONE);
+
+$command = new MongoDB\Driver\Command(['ping' => 1]);
+
+echo throws(function() use ($manager, $command) {
+ $manager->executeWriteCommand(DATABASE_NAME, $command, ['session' => 'foo']);
+}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
+
+echo throws(function() use ($manager, $command) {
+ $manager->executeWriteCommand(DATABASE_NAME, $command, ['session' => new stdClass]);
+}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
+
+echo throws(function() use ($manager, $command) {
+ $manager->executeWriteCommand(DATABASE_NAME, $command, ['writeConcern' => 'foo']);
+}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
+
+echo throws(function() use ($manager, $command) {
+ $manager->executeWriteCommand(DATABASE_NAME, $command, ['writeConcern' => new stdClass]);
+}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECT--
+OK: Got MongoDB\Driver\Exception\InvalidArgumentException
+Expected "session" option to be MongoDB\Driver\Session, string given
+OK: Got MongoDB\Driver\Exception\InvalidArgumentException
+Expected "session" option to be MongoDB\Driver\Session, stdClass given
+OK: Got MongoDB\Driver\Exception\InvalidArgumentException
+Expected "writeConcern" option to be MongoDB\Driver\WriteConcern, string given
+OK: Got MongoDB\Driver\Exception\InvalidArgumentException
+Expected "writeConcern" option to be MongoDB\Driver\WriteConcern, stdClass given
+===DONE===
diff --git a/mongodb-1.3.4/tests/manager/manager-getreadconcern-001.phpt b/mongodb-1.4.2/tests/manager/manager-getreadconcern-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/manager/manager-getreadconcern-001.phpt
rename to mongodb-1.4.2/tests/manager/manager-getreadconcern-001.phpt
diff --git a/mongodb-1.3.4/tests/manager/manager-getreadpreference-001.phpt b/mongodb-1.4.2/tests/manager/manager-getreadpreference-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/manager/manager-getreadpreference-001.phpt
rename to mongodb-1.4.2/tests/manager/manager-getreadpreference-001.phpt
diff --git a/mongodb-1.3.4/tests/manager/manager-getservers-001.phpt b/mongodb-1.4.2/tests/manager/manager-getservers-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/manager/manager-getservers-001.phpt
rename to mongodb-1.4.2/tests/manager/manager-getservers-001.phpt
diff --git a/mongodb-1.3.4/tests/manager/manager-getservers-002.phpt b/mongodb-1.4.2/tests/manager/manager-getservers-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/manager/manager-getservers-002.phpt
rename to mongodb-1.4.2/tests/manager/manager-getservers-002.phpt
diff --git a/mongodb-1.3.4/tests/manager/manager-getwriteconcern-001.phpt b/mongodb-1.4.2/tests/manager/manager-getwriteconcern-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/manager/manager-getwriteconcern-001.phpt
rename to mongodb-1.4.2/tests/manager/manager-getwriteconcern-001.phpt
diff --git a/mongodb-1.3.4/tests/manager/manager-invalidnamespace.phpt b/mongodb-1.4.2/tests/manager/manager-invalidnamespace.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/manager/manager-invalidnamespace.phpt
rename to mongodb-1.4.2/tests/manager/manager-invalidnamespace.phpt
diff --git a/mongodb-1.3.4/tests/manager/manager-selectserver-001.phpt b/mongodb-1.4.2/tests/manager/manager-selectserver-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/manager/manager-selectserver-001.phpt
rename to mongodb-1.4.2/tests/manager/manager-selectserver-001.phpt
diff --git a/mongodb-1.3.4/tests/manager/manager-selectserver_error-001.phpt b/mongodb-1.4.2/tests/manager/manager-selectserver_error-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/manager/manager-selectserver_error-001.phpt
rename to mongodb-1.4.2/tests/manager/manager-selectserver_error-001.phpt
diff --git a/mongodb-1.3.4/tests/manager/manager-set-uri-options-001.phpt b/mongodb-1.4.2/tests/manager/manager-set-uri-options-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/manager/manager-set-uri-options-001.phpt
rename to mongodb-1.4.2/tests/manager/manager-set-uri-options-001.phpt
diff --git a/mongodb-1.3.4/tests/manager/manager-set-uri-options-002.phpt b/mongodb-1.4.2/tests/manager/manager-set-uri-options-002.phpt
similarity index 98%
rename from mongodb-1.3.4/tests/manager/manager-set-uri-options-002.phpt
rename to mongodb-1.4.2/tests/manager/manager-set-uri-options-002.phpt
index 0be8bc9a..e20ccac9 100644
--- a/mongodb-1.3.4/tests/manager/manager-set-uri-options-002.phpt
+++ b/mongodb-1.4.2/tests/manager/manager-set-uri-options-002.phpt
@@ -1,50 +1,51 @@
--TEST--
MongoDB\Driver\Manager: Logging into MongoDB using credentials from $options
--SKIPIF--
<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS_SSL(); ?>
<?php NEEDS('STANDALONE_SSL'); ?>
--FILE--
<?php
require_once __DIR__ . "/../utils/basic.inc";
$opts = array(
"ssl" => array(
"verify_peer" => false,
"verify_peer_name" => false,
"allow_self_signed" => true,
),
);
$context = stream_context_create($opts);
$options = array(
"ssl" => false,
"serverselectiontimeoutms" => 100,
);
/* The server requires SSL */
$manager = new MongoDB\Driver\Manager(STANDALONE_SSL, $options, array("context" => $context));
$bulk = new MongoDB\Driver\BulkWrite;
$bulk->insert(array("my" => "value"));
throws(function() use ($manager, $bulk) {
$inserted = $manager->executeBulkWrite(NS, $bulk)->getInsertedCount();
printf("Inserted incorrectly: %d\n", $inserted);
}, "Exception");
$options = array(
"ssl" => true,
);
$manager = new MongoDB\Driver\Manager(STANDALONE_SSL, $options, array("context" => $context));
$bulk = new MongoDB\Driver\BulkWrite;
$bulk->insert(array("my" => "value"));
$inserted = $manager->executeBulkWrite(NS, $bulk)->getInsertedCount();
printf("Inserted: %d\n", $inserted);
?>
===DONE===
<?php exit(0); ?>
--EXPECTF--
OK: Got Exception
Inserted: 1
===DONE===
diff --git a/mongodb-1.3.4/tests/manager/manager-var-dump-001.phpt b/mongodb-1.4.2/tests/manager/manager-var-dump-001.phpt
similarity index 72%
rename from mongodb-1.3.4/tests/manager/manager-var-dump-001.phpt
rename to mongodb-1.4.2/tests/manager/manager-var-dump-001.phpt
index 1a8c238b..25fd6214 100644
--- a/mongodb-1.3.4/tests/manager/manager-var-dump-001.phpt
+++ b/mongodb-1.4.2/tests/manager/manager-var-dump-001.phpt
@@ -1,79 +1,62 @@
--TEST--
MongoDB\Driver\Manager: Constructing invalid manager
--SKIPIF--
<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
<?php NEEDS('STANDALONE'); CLEANUP(STANDALONE); ?>
--FILE--
<?php
require_once __DIR__ . "/../utils/basic.inc";
$manager = new MongoDB\Driver\Manager(STANDALONE);
var_dump($manager);
$bulk = new MongoDB\Driver\BulkWrite;
$bulk->insert(array("my" => "value"));
$retval = $manager->executeBulkWrite(NS, $bulk);
var_dump($manager);
?>
===DONE===
<?php exit(0); ?>
--EXPECTF--
object(MongoDB\Driver\Manager)#%d (%d) {
["uri"]=>
string(%d) "mongodb://%s"
["cluster"]=>
array(0) {
}
}
object(MongoDB\Driver\Manager)#%d (%d) {
["uri"]=>
string(%d) "mongodb://%s"
["cluster"]=>
array(1) {
[0]=>
array(10) {
["host"]=>
string(%d) "%s"
["port"]=>
int(%d)
["type"]=>
int(1)
["is_primary"]=>
bool(false)
["is_secondary"]=>
bool(false)
["is_arbiter"]=>
bool(false)
["is_hidden"]=>
bool(false)
["is_passive"]=>
bool(false)
["last_is_master"]=>
array(%d) {
- ["ismaster"]=>
- bool(true)
- ["maxBsonObjectSize"]=>
- int(16777216)
- ["maxMessageSizeBytes"]=>
- int(48000000)
- ["maxWriteBatchSize"]=>
- int(1000)
- ["localTime"]=>
- object(%s\UTCDateTime)#%d (%d) {
- ["milliseconds"]=>
- %r(int\(\d+\)|string\(\d+\) "\d+")%r
- }
- ["maxWireVersion"]=>
- int(%d)
- ["minWireVersion"]=>
- int(0)
%a
}
["round_trip_time"]=>
int(%d)
}
}
}
===DONE===
diff --git a/mongodb-1.3.4/tests/manager/manager-wakeup.phpt b/mongodb-1.4.2/tests/manager/manager-wakeup.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/manager/manager-wakeup.phpt
rename to mongodb-1.4.2/tests/manager/manager-wakeup.phpt
diff --git a/mongodb-1.3.4/tests/manager/manager_error-001.phpt b/mongodb-1.4.2/tests/manager/manager_error-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/manager/manager_error-001.phpt
rename to mongodb-1.4.2/tests/manager/manager_error-001.phpt
diff --git a/mongodb-1.3.4/tests/query/bug0430-001.phpt b/mongodb-1.4.2/tests/query/bug0430-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/query/bug0430-001.phpt
rename to mongodb-1.4.2/tests/query/bug0430-001.phpt
diff --git a/mongodb-1.3.4/tests/query/bug0430-002.phpt b/mongodb-1.4.2/tests/query/bug0430-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/query/bug0430-002.phpt
rename to mongodb-1.4.2/tests/query/bug0430-002.phpt
diff --git a/mongodb-1.3.4/tests/query/bug0430-003.phpt b/mongodb-1.4.2/tests/query/bug0430-003.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/query/bug0430-003.phpt
rename to mongodb-1.4.2/tests/query/bug0430-003.phpt
diff --git a/mongodb-1.3.4/tests/query/bug0705-001.phpt b/mongodb-1.4.2/tests/query/bug0705-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/query/bug0705-001.phpt
rename to mongodb-1.4.2/tests/query/bug0705-001.phpt
diff --git a/mongodb-1.3.4/tests/query/bug0705-002.phpt b/mongodb-1.4.2/tests/query/bug0705-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/query/bug0705-002.phpt
rename to mongodb-1.4.2/tests/query/bug0705-002.phpt
diff --git a/mongodb-1.3.4/tests/query/query-ctor-001.phpt b/mongodb-1.4.2/tests/query/query-ctor-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/query/query-ctor-001.phpt
rename to mongodb-1.4.2/tests/query/query-ctor-001.phpt
diff --git a/mongodb-1.3.4/tests/query/query-ctor-002.phpt b/mongodb-1.4.2/tests/query/query-ctor-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/query/query-ctor-002.phpt
rename to mongodb-1.4.2/tests/query/query-ctor-002.phpt
diff --git a/mongodb-1.3.4/tests/query/query-ctor-003.phpt b/mongodb-1.4.2/tests/query/query-ctor-003.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/query/query-ctor-003.phpt
rename to mongodb-1.4.2/tests/query/query-ctor-003.phpt
diff --git a/mongodb-1.3.4/tests/query/query-ctor-004.phpt b/mongodb-1.4.2/tests/query/query-ctor-004.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/query/query-ctor-004.phpt
rename to mongodb-1.4.2/tests/query/query-ctor-004.phpt
diff --git a/mongodb-1.3.4/tests/query/query-ctor-005.phpt b/mongodb-1.4.2/tests/query/query-ctor-005.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/query/query-ctor-005.phpt
rename to mongodb-1.4.2/tests/query/query-ctor-005.phpt
diff --git a/mongodb-1.3.4/tests/query/query-ctor-006.phpt b/mongodb-1.4.2/tests/query/query-ctor-006.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/query/query-ctor-006.phpt
rename to mongodb-1.4.2/tests/query/query-ctor-006.phpt
diff --git a/mongodb-1.3.4/tests/query/query-ctor_error-001.phpt b/mongodb-1.4.2/tests/query/query-ctor_error-001.phpt
similarity index 99%
rename from mongodb-1.3.4/tests/query/query-ctor_error-001.phpt
rename to mongodb-1.4.2/tests/query/query-ctor_error-001.phpt
index d55e0cff..d1018ef8 100644
--- a/mongodb-1.3.4/tests/query/query-ctor_error-001.phpt
+++ b/mongodb-1.4.2/tests/query/query-ctor_error-001.phpt
@@ -1,43 +1,43 @@
--TEST--
MongoDB\Driver\Query construction (invalid readConcern type)
--SKIPIF--
<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
--FILE--
<?php
require_once __DIR__ . "/../utils/basic.inc";
$tests = [
1,
1.0,
'string',
true,
[],
new stdClass,
null,
];
foreach ($tests as $test) {
echo throws(function() use ($test) {
new MongoDB\Driver\Query([], ['readConcern' => $test]);
}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
}
?>
===DONE===
<?php exit(0); ?>
--EXPECTF--
OK: Got MongoDB\Driver\Exception\InvalidArgumentException
Expected "readConcern" option to be MongoDB\Driver\ReadConcern, integer given
OK: Got MongoDB\Driver\Exception\InvalidArgumentException
Expected "readConcern" option to be MongoDB\Driver\ReadConcern, %r(double|float)%r given
OK: Got MongoDB\Driver\Exception\InvalidArgumentException
Expected "readConcern" option to be MongoDB\Driver\ReadConcern, string given
OK: Got MongoDB\Driver\Exception\InvalidArgumentException
Expected "readConcern" option to be MongoDB\Driver\ReadConcern, boolean given
OK: Got MongoDB\Driver\Exception\InvalidArgumentException
Expected "readConcern" option to be MongoDB\Driver\ReadConcern, array given
OK: Got MongoDB\Driver\Exception\InvalidArgumentException
-Expected "readConcern" option to be MongoDB\Driver\ReadConcern, object given
+Expected "readConcern" option to be MongoDB\Driver\ReadConcern, stdClass given
OK: Got MongoDB\Driver\Exception\InvalidArgumentException
Expected "readConcern" option to be MongoDB\Driver\ReadConcern, %r(null|NULL)%r given
===DONE===
diff --git a/mongodb-1.3.4/tests/query/query-ctor_error-002.phpt b/mongodb-1.4.2/tests/query/query-ctor_error-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/query/query-ctor_error-002.phpt
rename to mongodb-1.4.2/tests/query/query-ctor_error-002.phpt
diff --git a/mongodb-1.3.4/tests/query/query-ctor_error-003.phpt b/mongodb-1.4.2/tests/query/query-ctor_error-003.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/query/query-ctor_error-003.phpt
rename to mongodb-1.4.2/tests/query/query-ctor_error-003.phpt
diff --git a/mongodb-1.3.4/tests/query/query-ctor_error-004.phpt b/mongodb-1.4.2/tests/query/query-ctor_error-004.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/query/query-ctor_error-004.phpt
rename to mongodb-1.4.2/tests/query/query-ctor_error-004.phpt
diff --git a/mongodb-1.3.4/tests/query/query-ctor_error-005.phpt b/mongodb-1.4.2/tests/query/query-ctor_error-005.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/query/query-ctor_error-005.phpt
rename to mongodb-1.4.2/tests/query/query-ctor_error-005.phpt
diff --git a/mongodb-1.3.4/tests/query/query-ctor_error-006.phpt b/mongodb-1.4.2/tests/query/query-ctor_error-006.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/query/query-ctor_error-006.phpt
rename to mongodb-1.4.2/tests/query/query-ctor_error-006.phpt
diff --git a/mongodb-1.3.4/tests/query/query-debug-001.phpt b/mongodb-1.4.2/tests/query/query-debug-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/query/query-debug-001.phpt
rename to mongodb-1.4.2/tests/query/query-debug-001.phpt
diff --git a/mongodb-1.3.4/tests/query/query_error-001.phpt b/mongodb-1.4.2/tests/query/query_error-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/query/query_error-001.phpt
rename to mongodb-1.4.2/tests/query/query_error-001.phpt
diff --git a/mongodb-1.3.4/tests/readConcern/readconcern-bsonserialize-001.phpt b/mongodb-1.4.2/tests/readConcern/readconcern-bsonserialize-001.phpt
similarity index 85%
rename from mongodb-1.3.4/tests/readConcern/readconcern-bsonserialize-001.phpt
rename to mongodb-1.4.2/tests/readConcern/readconcern-bsonserialize-001.phpt
index d093abba..fb0ef27a 100644
--- a/mongodb-1.3.4/tests/readConcern/readconcern-bsonserialize-001.phpt
+++ b/mongodb-1.4.2/tests/readConcern/readconcern-bsonserialize-001.phpt
@@ -1,27 +1,29 @@
--TEST--
MongoDB\Driver\ReadConcern::bsonSerialize()
--FILE--
<?php
require_once __DIR__ . '/../utils/tools.php';
$tests = [
new MongoDB\Driver\ReadConcern(),
new MongoDB\Driver\ReadConcern(MongoDB\Driver\ReadConcern::LINEARIZABLE),
new MongoDB\Driver\ReadConcern(MongoDB\Driver\ReadConcern::LOCAL),
new MongoDB\Driver\ReadConcern(MongoDB\Driver\ReadConcern::MAJORITY),
+ new MongoDB\Driver\ReadConcern(MongoDB\Driver\ReadConcern::AVAILABLE),
];
foreach ($tests as $test) {
echo toJSON(fromPHP($test)), "\n";
}
?>
===DONE===
<?php exit(0); ?>
--EXPECT--
{ }
{ "level" : "linearizable" }
{ "level" : "local" }
{ "level" : "majority" }
+{ "level" : "available" }
===DONE===
diff --git a/mongodb-1.3.4/tests/readConcern/readconcern-bsonserialize-002.phpt b/mongodb-1.4.2/tests/readConcern/readconcern-bsonserialize-002.phpt
similarity index 84%
rename from mongodb-1.3.4/tests/readConcern/readconcern-bsonserialize-002.phpt
rename to mongodb-1.4.2/tests/readConcern/readconcern-bsonserialize-002.phpt
index bb1fcbe0..bc1fa5cd 100644
--- a/mongodb-1.3.4/tests/readConcern/readconcern-bsonserialize-002.phpt
+++ b/mongodb-1.4.2/tests/readConcern/readconcern-bsonserialize-002.phpt
@@ -1,37 +1,42 @@
--TEST--
MongoDB\Driver\ReadConcern::bsonSerialize() returns an object
--FILE--
<?php
require_once __DIR__ . '/../utils/tools.php';
$tests = [
new MongoDB\Driver\ReadConcern(),
new MongoDB\Driver\ReadConcern(MongoDB\Driver\ReadConcern::LINEARIZABLE),
new MongoDB\Driver\ReadConcern(MongoDB\Driver\ReadConcern::LOCAL),
new MongoDB\Driver\ReadConcern(MongoDB\Driver\ReadConcern::MAJORITY),
+ new MongoDB\Driver\ReadConcern(MongoDB\Driver\ReadConcern::AVAILABLE),
];
foreach ($tests as $test) {
var_dump($test->bsonSerialize());
}
?>
===DONE===
<?php exit(0); ?>
--EXPECTF--
object(stdClass)#%d (%d) {
}
object(stdClass)#%d (%d) {
["level"]=>
string(12) "linearizable"
}
object(stdClass)#%d (%d) {
["level"]=>
string(5) "local"
}
object(stdClass)#%d (%d) {
["level"]=>
string(8) "majority"
}
+object(stdClass)#%d (%d) {
+ ["level"]=>
+ string(9) "available"
+}
===DONE===
diff --git a/mongodb-1.3.4/tests/readConcern/readconcern-constants.phpt b/mongodb-1.4.2/tests/readConcern/readconcern-constants.phpt
similarity index 86%
rename from mongodb-1.3.4/tests/readConcern/readconcern-constants.phpt
rename to mongodb-1.4.2/tests/readConcern/readconcern-constants.phpt
index 96619ce3..e9e4193e 100644
--- a/mongodb-1.3.4/tests/readConcern/readconcern-constants.phpt
+++ b/mongodb-1.4.2/tests/readConcern/readconcern-constants.phpt
@@ -1,20 +1,22 @@
--TEST--
MongoDB\Driver\ReadConcern constants
--SKIPIF--
<?php require __DIR__ . "/../utils/basic-skipif.inc"?>
--FILE--
<?php
require_once __DIR__ . "/../utils/basic.inc";
var_dump(MongoDB\Driver\ReadConcern::LINEARIZABLE);
var_dump(MongoDB\Driver\ReadConcern::LOCAL);
var_dump(MongoDB\Driver\ReadConcern::MAJORITY);
+var_dump(MongoDB\Driver\ReadConcern::AVAILABLE);
?>
===DONE===
<?php exit(0); ?>
--EXPECTF--
string(12) "linearizable"
string(5) "local"
string(8) "majority"
+string(9) "available"
===DONE===
diff --git a/mongodb-1.3.4/tests/readConcern/readconcern-ctor-001.phpt b/mongodb-1.4.2/tests/readConcern/readconcern-ctor-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/readConcern/readconcern-ctor-001.phpt
rename to mongodb-1.4.2/tests/readConcern/readconcern-ctor-001.phpt
diff --git a/mongodb-1.3.4/tests/readConcern/readconcern-ctor_error-001.phpt b/mongodb-1.4.2/tests/readConcern/readconcern-ctor_error-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/readConcern/readconcern-ctor_error-001.phpt
rename to mongodb-1.4.2/tests/readConcern/readconcern-ctor_error-001.phpt
diff --git a/mongodb-1.3.4/tests/readConcern/readconcern-ctor_error-002.phpt b/mongodb-1.4.2/tests/readConcern/readconcern-ctor_error-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/readConcern/readconcern-ctor_error-002.phpt
rename to mongodb-1.4.2/tests/readConcern/readconcern-ctor_error-002.phpt
diff --git a/mongodb-1.3.4/tests/readConcern/readconcern-debug-001.phpt b/mongodb-1.4.2/tests/readConcern/readconcern-debug-001.phpt
similarity index 83%
rename from mongodb-1.3.4/tests/readConcern/readconcern-debug-001.phpt
rename to mongodb-1.4.2/tests/readConcern/readconcern-debug-001.phpt
index bf855190..8e3f4a31 100644
--- a/mongodb-1.3.4/tests/readConcern/readconcern-debug-001.phpt
+++ b/mongodb-1.4.2/tests/readConcern/readconcern-debug-001.phpt
@@ -1,37 +1,42 @@
--TEST--
MongoDB\Driver\ReadConcern debug output
--FILE--
<?php
require_once __DIR__ . '/../utils/tools.php';
$tests = [
new MongoDB\Driver\ReadConcern(),
new MongoDB\Driver\ReadConcern(MongoDB\Driver\ReadConcern::LINEARIZABLE),
new MongoDB\Driver\ReadConcern(MongoDB\Driver\ReadConcern::LOCAL),
new MongoDB\Driver\ReadConcern(MongoDB\Driver\ReadConcern::MAJORITY),
+ new MongoDB\Driver\ReadConcern(MongoDB\Driver\ReadConcern::AVAILABLE),
];
foreach ($tests as $test) {
var_dump($test);
}
?>
===DONE===
<?php exit(0); ?>
--EXPECTF--
object(MongoDB\Driver\ReadConcern)#%d (%d) {
}
object(MongoDB\Driver\ReadConcern)#%d (%d) {
["level"]=>
string(12) "linearizable"
}
object(MongoDB\Driver\ReadConcern)#%d (%d) {
["level"]=>
string(5) "local"
}
object(MongoDB\Driver\ReadConcern)#%d (%d) {
["level"]=>
string(8) "majority"
}
+object(MongoDB\Driver\ReadConcern)#%d (%d) {
+ ["level"]=>
+ string(9) "available"
+}
===DONE===
diff --git a/mongodb-1.3.4/tests/readConcern/readconcern-getlevel-001.phpt b/mongodb-1.4.2/tests/readConcern/readconcern-getlevel-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/readConcern/readconcern-getlevel-001.phpt
rename to mongodb-1.4.2/tests/readConcern/readconcern-getlevel-001.phpt
diff --git a/mongodb-1.3.4/tests/readConcern/readconcern-isdefault-001.phpt b/mongodb-1.4.2/tests/readConcern/readconcern-isdefault-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/readConcern/readconcern-isdefault-001.phpt
rename to mongodb-1.4.2/tests/readConcern/readconcern-isdefault-001.phpt
diff --git a/mongodb-1.3.4/tests/readConcern/readconcern_error-001.phpt b/mongodb-1.4.2/tests/readConcern/readconcern_error-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/readConcern/readconcern_error-001.phpt
rename to mongodb-1.4.2/tests/readConcern/readconcern_error-001.phpt
diff --git a/mongodb-1.3.4/tests/readPreference/bug0146-001.phpt b/mongodb-1.4.2/tests/readPreference/bug0146-001.phpt
similarity index 96%
rename from mongodb-1.3.4/tests/readPreference/bug0146-001.phpt
rename to mongodb-1.4.2/tests/readPreference/bug0146-001.phpt
index 5dc42210..9e7b8157 100644
--- a/mongodb-1.3.4/tests/readPreference/bug0146-001.phpt
+++ b/mongodb-1.4.2/tests/readPreference/bug0146-001.phpt
@@ -1,214 +1,224 @@
--TEST--
PHPC-146: ReadPreference primaryPreferred and secondary swapped (OP_QUERY)
--SKIPIF--
<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
<?php NEEDS('STANDALONE_30'); ?>
--FILE--
<?php
require_once __DIR__ . "/../utils/basic.inc";
$manager = new MongoDB\Driver\Manager(STANDALONE_30);
$bulk = new MongoDB\Driver\BulkWrite();
$bulk->insert(array('my' => 'document'));
$manager->executeBulkWrite(NS, $bulk);
$rps = array(
MongoDB\Driver\ReadPreference::RP_PRIMARY,
MongoDB\Driver\ReadPreference::RP_PRIMARY_PREFERRED,
MongoDB\Driver\ReadPreference::RP_SECONDARY,
MongoDB\Driver\ReadPreference::RP_SECONDARY_PREFERRED,
MongoDB\Driver\ReadPreference::RP_NEAREST,
);
foreach($rps as $r) {
$rp = new MongoDB\Driver\ReadPreference($r);
$cursor = $manager->executeQuery(NS, new MongoDB\Driver\Query(array("my" => "query")), $rp);
var_dump($cursor);
}
?>
===DONE===
<?php exit(0); ?>
--EXPECTF--
object(MongoDB\Driver\Cursor)#%d (%d) {
["database"]=>
string(6) "phongo"
["collection"]=>
string(26) "readPreference_bug0146_001"
["query"]=>
object(MongoDB\Driver\Query)#%d (%d) {
["filter"]=>
object(stdClass)#%d (%d) {
["my"]=>
string(5) "query"
}
["options"]=>
object(stdClass)#%d (%d) {
+ ["serverId"]=>
+ int(%d)
}
["readConcern"]=>
NULL
}
["command"]=>
NULL
["readPreference"]=>
object(MongoDB\Driver\ReadPreference)#%d (%d) {
["mode"]=>
string(7) "primary"
}
["isDead"]=>
bool(true)
["currentIndex"]=>
int(0)
["currentDocument"]=>
NULL
["server"]=>
object(MongoDB\Driver\Server)#%d (%d) {
%a
}
}
object(MongoDB\Driver\Cursor)#%d (%d) {
["database"]=>
string(6) "phongo"
["collection"]=>
string(26) "readPreference_bug0146_001"
["query"]=>
object(MongoDB\Driver\Query)#%d (%d) {
["filter"]=>
object(stdClass)#%d (%d) {
["my"]=>
string(5) "query"
}
["options"]=>
object(stdClass)#%d (%d) {
+ ["serverId"]=>
+ int(%d)
}
["readConcern"]=>
NULL
}
["command"]=>
NULL
["readPreference"]=>
object(MongoDB\Driver\ReadPreference)#%d (%d) {
["mode"]=>
string(16) "primaryPreferred"
}
["isDead"]=>
bool(true)
["currentIndex"]=>
int(0)
["currentDocument"]=>
NULL
["server"]=>
object(MongoDB\Driver\Server)#%d (%d) {
%a
}
}
object(MongoDB\Driver\Cursor)#%d (%d) {
["database"]=>
string(6) "phongo"
["collection"]=>
string(26) "readPreference_bug0146_001"
["query"]=>
object(MongoDB\Driver\Query)#%d (%d) {
["filter"]=>
object(stdClass)#%d (%d) {
["my"]=>
string(5) "query"
}
["options"]=>
object(stdClass)#%d (%d) {
+ ["serverId"]=>
+ int(%d)
}
["readConcern"]=>
NULL
}
["command"]=>
NULL
["readPreference"]=>
object(MongoDB\Driver\ReadPreference)#%d (%d) {
["mode"]=>
string(9) "secondary"
}
["isDead"]=>
bool(true)
["currentIndex"]=>
int(0)
["currentDocument"]=>
NULL
["server"]=>
object(MongoDB\Driver\Server)#%d (%d) {
%a
}
}
object(MongoDB\Driver\Cursor)#%d (%d) {
["database"]=>
string(6) "phongo"
["collection"]=>
string(26) "readPreference_bug0146_001"
["query"]=>
object(MongoDB\Driver\Query)#%d (%d) {
["filter"]=>
object(stdClass)#%d (%d) {
["my"]=>
string(5) "query"
}
["options"]=>
object(stdClass)#%d (%d) {
+ ["serverId"]=>
+ int(%d)
}
["readConcern"]=>
NULL
}
["command"]=>
NULL
["readPreference"]=>
object(MongoDB\Driver\ReadPreference)#%d (%d) {
["mode"]=>
string(18) "secondaryPreferred"
}
["isDead"]=>
bool(true)
["currentIndex"]=>
int(0)
["currentDocument"]=>
NULL
["server"]=>
object(MongoDB\Driver\Server)#%d (%d) {
%a
}
}
object(MongoDB\Driver\Cursor)#%d (%d) {
["database"]=>
string(6) "phongo"
["collection"]=>
string(26) "readPreference_bug0146_001"
["query"]=>
object(MongoDB\Driver\Query)#%d (%d) {
["filter"]=>
object(stdClass)#%d (%d) {
["my"]=>
string(5) "query"
}
["options"]=>
object(stdClass)#%d (%d) {
+ ["serverId"]=>
+ int(%d)
}
["readConcern"]=>
NULL
}
["command"]=>
NULL
["readPreference"]=>
object(MongoDB\Driver\ReadPreference)#%d (%d) {
["mode"]=>
string(7) "nearest"
}
["isDead"]=>
bool(true)
["currentIndex"]=>
int(0)
["currentDocument"]=>
NULL
["server"]=>
object(MongoDB\Driver\Server)#%d (%d) {
%a
}
}
===DONE===
diff --git a/mongodb-1.3.4/tests/readPreference/bug0146-002.phpt b/mongodb-1.4.2/tests/readPreference/bug0146-002.phpt
similarity index 96%
rename from mongodb-1.3.4/tests/readPreference/bug0146-002.phpt
rename to mongodb-1.4.2/tests/readPreference/bug0146-002.phpt
index cdf6f7c2..a1a35a93 100644
--- a/mongodb-1.3.4/tests/readPreference/bug0146-002.phpt
+++ b/mongodb-1.4.2/tests/readPreference/bug0146-002.phpt
@@ -1,214 +1,224 @@
--TEST--
PHPC-146: ReadPreference primaryPreferred and secondary swapped (find command)
--SKIPIF--
<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
<?php NEEDS('STANDALONE'); CLEANUP(STANDALONE); ?>
--FILE--
<?php
require_once __DIR__ . "/../utils/basic.inc";
$manager = new MongoDB\Driver\Manager(STANDALONE);
$bulk = new MongoDB\Driver\BulkWrite();
$bulk->insert(array('my' => 'document'));
$manager->executeBulkWrite(NS, $bulk);
$rps = array(
MongoDB\Driver\ReadPreference::RP_PRIMARY,
MongoDB\Driver\ReadPreference::RP_PRIMARY_PREFERRED,
MongoDB\Driver\ReadPreference::RP_SECONDARY,
MongoDB\Driver\ReadPreference::RP_SECONDARY_PREFERRED,
MongoDB\Driver\ReadPreference::RP_NEAREST,
);
foreach($rps as $r) {
$rp = new MongoDB\Driver\ReadPreference($r);
$cursor = $manager->executeQuery(NS, new MongoDB\Driver\Query(array("my" => "query")), $rp);
var_dump($cursor);
}
?>
===DONE===
<?php exit(0); ?>
--EXPECTF--
object(MongoDB\Driver\Cursor)#%d (%d) {
["database"]=>
string(6) "phongo"
["collection"]=>
string(26) "readPreference_bug0146_002"
["query"]=>
object(MongoDB\Driver\Query)#%d (%d) {
["filter"]=>
object(stdClass)#%d (%d) {
["my"]=>
string(5) "query"
}
["options"]=>
object(stdClass)#%d (%d) {
+ ["serverId"]=>
+ int(%d)
}
["readConcern"]=>
NULL
}
["command"]=>
NULL
["readPreference"]=>
object(MongoDB\Driver\ReadPreference)#%d (%d) {
["mode"]=>
string(7) "primary"
}
["isDead"]=>
bool(true)
["currentIndex"]=>
int(0)
["currentDocument"]=>
NULL
["server"]=>
object(MongoDB\Driver\Server)#%d (%d) {
%a
}
}
object(MongoDB\Driver\Cursor)#%d (%d) {
["database"]=>
string(6) "phongo"
["collection"]=>
string(26) "readPreference_bug0146_002"
["query"]=>
object(MongoDB\Driver\Query)#%d (%d) {
["filter"]=>
object(stdClass)#%d (%d) {
["my"]=>
string(5) "query"
}
["options"]=>
object(stdClass)#%d (%d) {
+ ["serverId"]=>
+ int(%d)
}
["readConcern"]=>
NULL
}
["command"]=>
NULL
["readPreference"]=>
object(MongoDB\Driver\ReadPreference)#%d (%d) {
["mode"]=>
string(16) "primaryPreferred"
}
["isDead"]=>
bool(true)
["currentIndex"]=>
int(0)
["currentDocument"]=>
NULL
["server"]=>
object(MongoDB\Driver\Server)#%d (%d) {
%a
}
}
object(MongoDB\Driver\Cursor)#%d (%d) {
["database"]=>
string(6) "phongo"
["collection"]=>
string(26) "readPreference_bug0146_002"
["query"]=>
object(MongoDB\Driver\Query)#%d (%d) {
["filter"]=>
object(stdClass)#%d (%d) {
["my"]=>
string(5) "query"
}
["options"]=>
object(stdClass)#%d (%d) {
+ ["serverId"]=>
+ int(%d)
}
["readConcern"]=>
NULL
}
["command"]=>
NULL
["readPreference"]=>
object(MongoDB\Driver\ReadPreference)#%d (%d) {
["mode"]=>
string(9) "secondary"
}
["isDead"]=>
bool(true)
["currentIndex"]=>
int(0)
["currentDocument"]=>
NULL
["server"]=>
object(MongoDB\Driver\Server)#%d (%d) {
%a
}
}
object(MongoDB\Driver\Cursor)#%d (%d) {
["database"]=>
string(6) "phongo"
["collection"]=>
string(26) "readPreference_bug0146_002"
["query"]=>
object(MongoDB\Driver\Query)#%d (%d) {
["filter"]=>
object(stdClass)#%d (%d) {
["my"]=>
string(5) "query"
}
["options"]=>
object(stdClass)#%d (%d) {
+ ["serverId"]=>
+ int(%d)
}
["readConcern"]=>
NULL
}
["command"]=>
NULL
["readPreference"]=>
object(MongoDB\Driver\ReadPreference)#%d (%d) {
["mode"]=>
string(18) "secondaryPreferred"
}
["isDead"]=>
bool(true)
["currentIndex"]=>
int(0)
["currentDocument"]=>
NULL
["server"]=>
object(MongoDB\Driver\Server)#%d (%d) {
%a
}
}
object(MongoDB\Driver\Cursor)#%d (%d) {
["database"]=>
string(6) "phongo"
["collection"]=>
string(26) "readPreference_bug0146_002"
["query"]=>
object(MongoDB\Driver\Query)#%d (%d) {
["filter"]=>
object(stdClass)#%d (%d) {
["my"]=>
string(5) "query"
}
["options"]=>
object(stdClass)#%d (%d) {
+ ["serverId"]=>
+ int(%d)
}
["readConcern"]=>
NULL
}
["command"]=>
NULL
["readPreference"]=>
object(MongoDB\Driver\ReadPreference)#%d (%d) {
["mode"]=>
string(7) "nearest"
}
["isDead"]=>
bool(true)
["currentIndex"]=>
int(0)
["currentDocument"]=>
NULL
["server"]=>
object(MongoDB\Driver\Server)#%d (%d) {
%a
}
}
===DONE===
diff --git a/mongodb-1.3.4/tests/readPreference/bug0851-001.phpt b/mongodb-1.4.2/tests/readPreference/bug0851-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/readPreference/bug0851-001.phpt
rename to mongodb-1.4.2/tests/readPreference/bug0851-001.phpt
diff --git a/mongodb-1.3.4/tests/readPreference/readpreference-bsonserialize-001.phpt b/mongodb-1.4.2/tests/readPreference/readpreference-bsonserialize-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/readPreference/readpreference-bsonserialize-001.phpt
rename to mongodb-1.4.2/tests/readPreference/readpreference-bsonserialize-001.phpt
diff --git a/mongodb-1.3.4/tests/readPreference/readpreference-bsonserialize-002.phpt b/mongodb-1.4.2/tests/readPreference/readpreference-bsonserialize-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/readPreference/readpreference-bsonserialize-002.phpt
rename to mongodb-1.4.2/tests/readPreference/readpreference-bsonserialize-002.phpt
diff --git a/mongodb-1.3.4/tests/readPreference/readpreference-ctor-001.phpt b/mongodb-1.4.2/tests/readPreference/readpreference-ctor-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/readPreference/readpreference-ctor-001.phpt
rename to mongodb-1.4.2/tests/readPreference/readpreference-ctor-001.phpt
diff --git a/mongodb-1.3.4/tests/readPreference/readpreference-ctor-002.phpt b/mongodb-1.4.2/tests/readPreference/readpreference-ctor-002.phpt
similarity index 70%
rename from mongodb-1.3.4/tests/readPreference/readpreference-ctor-002.phpt
rename to mongodb-1.4.2/tests/readPreference/readpreference-ctor-002.phpt
index 697e3be5..d3d1400a 100644
--- a/mongodb-1.3.4/tests/readPreference/readpreference-ctor-002.phpt
+++ b/mongodb-1.4.2/tests/readPreference/readpreference-ctor-002.phpt
@@ -1,96 +1,96 @@
--TEST--
MongoDB\Driver\ReadPreference construction with strings
--FILE--
<?php
$data = [
- "primary",
- "PrImAry",
- MongoDB\Driver\ReadPreference::RP_PRIMARY,
+ "primary",
+ "PrImAry",
+ MongoDB\Driver\ReadPreference::RP_PRIMARY,
- "primaryPreferred",
- "primarypreferred",
- MongoDB\Driver\ReadPreference::RP_PRIMARY_PREFERRED,
+ "primaryPreferred",
+ "primarypreferred",
+ MongoDB\Driver\ReadPreference::RP_PRIMARY_PREFERRED,
- "secondary",
- "SEcOndArY",
- MongoDB\Driver\ReadPreference::RP_SECONDARY,
+ "secondary",
+ "SEcOndArY",
+ MongoDB\Driver\ReadPreference::RP_SECONDARY,
- "secondaryPreferred",
- "secondarypreferred",
- MongoDB\Driver\ReadPreference::RP_SECONDARY_PREFERRED,
+ "secondaryPreferred",
+ "secondarypreferred",
+ MongoDB\Driver\ReadPreference::RP_SECONDARY_PREFERRED,
- "nearest",
- "NeaRest",
- MongoDB\Driver\ReadPreference::RP_NEAREST,
+ "nearest",
+ "NeaRest",
+ MongoDB\Driver\ReadPreference::RP_NEAREST,
];
foreach ($data as $item) {
- try {
- $rp = new MongoDB\Driver\ReadPreference($item);
- } catch (\InvalidArgumentException $e) {
- echo $e->getMessage(), "\n";
- }
- var_dump( $rp );
+ try {
+ $rp = new MongoDB\Driver\ReadPreference($item);
+ } catch (\InvalidArgumentException $e) {
+ echo $e->getMessage(), "\n";
+ }
+ var_dump( $rp );
}
?>
--EXPECTF--
object(MongoDB\Driver\ReadPreference)#%d (%d) {
["mode"]=>
string(7) "primary"
}
object(MongoDB\Driver\ReadPreference)#%d (%d) {
["mode"]=>
string(7) "primary"
}
object(MongoDB\Driver\ReadPreference)#%d (%d) {
["mode"]=>
string(7) "primary"
}
object(MongoDB\Driver\ReadPreference)#%d (%d) {
["mode"]=>
string(16) "primaryPreferred"
}
object(MongoDB\Driver\ReadPreference)#%d (%d) {
["mode"]=>
string(16) "primaryPreferred"
}
object(MongoDB\Driver\ReadPreference)#%d (%d) {
["mode"]=>
string(16) "primaryPreferred"
}
object(MongoDB\Driver\ReadPreference)#%d (%d) {
["mode"]=>
string(9) "secondary"
}
object(MongoDB\Driver\ReadPreference)#%d (%d) {
["mode"]=>
string(9) "secondary"
}
object(MongoDB\Driver\ReadPreference)#%d (%d) {
["mode"]=>
string(9) "secondary"
}
object(MongoDB\Driver\ReadPreference)#%d (%d) {
["mode"]=>
string(18) "secondaryPreferred"
}
object(MongoDB\Driver\ReadPreference)#%d (%d) {
["mode"]=>
string(18) "secondaryPreferred"
}
object(MongoDB\Driver\ReadPreference)#%d (%d) {
["mode"]=>
string(18) "secondaryPreferred"
}
object(MongoDB\Driver\ReadPreference)#%d (%d) {
["mode"]=>
string(7) "nearest"
}
object(MongoDB\Driver\ReadPreference)#%d (%d) {
["mode"]=>
string(7) "nearest"
}
object(MongoDB\Driver\ReadPreference)#%d (%d) {
["mode"]=>
string(7) "nearest"
}
diff --git a/mongodb-1.3.4/tests/readPreference/readpreference-ctor_error-001.phpt b/mongodb-1.4.2/tests/readPreference/readpreference-ctor_error-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/readPreference/readpreference-ctor_error-001.phpt
rename to mongodb-1.4.2/tests/readPreference/readpreference-ctor_error-001.phpt
diff --git a/mongodb-1.3.4/tests/readPreference/readpreference-ctor_error-002.phpt b/mongodb-1.4.2/tests/readPreference/readpreference-ctor_error-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/readPreference/readpreference-ctor_error-002.phpt
rename to mongodb-1.4.2/tests/readPreference/readpreference-ctor_error-002.phpt
diff --git a/mongodb-1.3.4/tests/readPreference/readpreference-ctor_error-003.phpt b/mongodb-1.4.2/tests/readPreference/readpreference-ctor_error-003.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/readPreference/readpreference-ctor_error-003.phpt
rename to mongodb-1.4.2/tests/readPreference/readpreference-ctor_error-003.phpt
diff --git a/mongodb-1.3.4/tests/readPreference/readpreference-ctor_error-004.phpt b/mongodb-1.4.2/tests/readPreference/readpreference-ctor_error-004.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/readPreference/readpreference-ctor_error-004.phpt
rename to mongodb-1.4.2/tests/readPreference/readpreference-ctor_error-004.phpt
diff --git a/mongodb-1.3.4/tests/readPreference/readpreference-ctor_error-005.phpt b/mongodb-1.4.2/tests/readPreference/readpreference-ctor_error-005.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/readPreference/readpreference-ctor_error-005.phpt
rename to mongodb-1.4.2/tests/readPreference/readpreference-ctor_error-005.phpt
diff --git a/mongodb-1.3.4/tests/readPreference/readpreference-ctor_error-006.phpt b/mongodb-1.4.2/tests/readPreference/readpreference-ctor_error-006.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/readPreference/readpreference-ctor_error-006.phpt
rename to mongodb-1.4.2/tests/readPreference/readpreference-ctor_error-006.phpt
diff --git a/mongodb-1.3.4/tests/readPreference/readpreference-debug-001.phpt b/mongodb-1.4.2/tests/readPreference/readpreference-debug-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/readPreference/readpreference-debug-001.phpt
rename to mongodb-1.4.2/tests/readPreference/readpreference-debug-001.phpt
diff --git a/mongodb-1.3.4/tests/readPreference/readpreference-getMaxStalenessMS-001.phpt b/mongodb-1.4.2/tests/readPreference/readpreference-getMaxStalenessMS-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/readPreference/readpreference-getMaxStalenessMS-001.phpt
rename to mongodb-1.4.2/tests/readPreference/readpreference-getMaxStalenessMS-001.phpt
diff --git a/mongodb-1.3.4/tests/readPreference/readpreference-getMaxStalenessMS-002.phpt b/mongodb-1.4.2/tests/readPreference/readpreference-getMaxStalenessMS-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/readPreference/readpreference-getMaxStalenessMS-002.phpt
rename to mongodb-1.4.2/tests/readPreference/readpreference-getMaxStalenessMS-002.phpt
diff --git a/mongodb-1.3.4/tests/readPreference/readpreference-getMode-001.phpt b/mongodb-1.4.2/tests/readPreference/readpreference-getMode-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/readPreference/readpreference-getMode-001.phpt
rename to mongodb-1.4.2/tests/readPreference/readpreference-getMode-001.phpt
diff --git a/mongodb-1.3.4/tests/readPreference/readpreference-getTagSets-001.phpt b/mongodb-1.4.2/tests/readPreference/readpreference-getTagSets-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/readPreference/readpreference-getTagSets-001.phpt
rename to mongodb-1.4.2/tests/readPreference/readpreference-getTagSets-001.phpt
diff --git a/mongodb-1.3.4/tests/readPreference/readpreference-getTagSets-002.phpt b/mongodb-1.4.2/tests/readPreference/readpreference-getTagSets-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/readPreference/readpreference-getTagSets-002.phpt
rename to mongodb-1.4.2/tests/readPreference/readpreference-getTagSets-002.phpt
diff --git a/mongodb-1.3.4/tests/readPreference/readpreference_error-001.phpt b/mongodb-1.4.2/tests/readPreference/readpreference_error-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/readPreference/readpreference_error-001.phpt
rename to mongodb-1.4.2/tests/readPreference/readpreference_error-001.phpt
diff --git a/mongodb-1.3.4/tests/replicaset/bug0155.phpt b/mongodb-1.4.2/tests/replicaset/bug0155.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/replicaset/bug0155.phpt
rename to mongodb-1.4.2/tests/replicaset/bug0155.phpt
diff --git a/mongodb-1.3.4/tests/replicaset/bug0898-001.phpt b/mongodb-1.4.2/tests/replicaset/bug0898-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/replicaset/bug0898-001.phpt
rename to mongodb-1.4.2/tests/replicaset/bug0898-001.phpt
diff --git a/mongodb-1.3.4/tests/replicaset/bug0898-002.phpt b/mongodb-1.4.2/tests/replicaset/bug0898-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/replicaset/bug0898-002.phpt
rename to mongodb-1.4.2/tests/replicaset/bug0898-002.phpt
diff --git a/mongodb-1.3.4/tests/replicaset/manager-getservers-001.phpt b/mongodb-1.4.2/tests/replicaset/manager-getservers-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/replicaset/manager-getservers-001.phpt
rename to mongodb-1.4.2/tests/replicaset/manager-getservers-001.phpt
diff --git a/mongodb-1.3.4/tests/replicaset/manager-selectserver-001.phpt b/mongodb-1.4.2/tests/replicaset/manager-selectserver-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/replicaset/manager-selectserver-001.phpt
rename to mongodb-1.4.2/tests/replicaset/manager-selectserver-001.phpt
diff --git a/mongodb-1.3.4/tests/replicaset/readconcern-001.phpt b/mongodb-1.4.2/tests/replicaset/readconcern-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/replicaset/readconcern-001.phpt
rename to mongodb-1.4.2/tests/replicaset/readconcern-001.phpt
diff --git a/mongodb-1.3.4/tests/replicaset/readconcern-002.phpt b/mongodb-1.4.2/tests/replicaset/readconcern-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/replicaset/readconcern-002.phpt
rename to mongodb-1.4.2/tests/replicaset/readconcern-002.phpt
diff --git a/mongodb-1.3.4/tests/replicaset/server-001.phpt b/mongodb-1.4.2/tests/replicaset/server-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/replicaset/server-001.phpt
rename to mongodb-1.4.2/tests/replicaset/server-001.phpt
diff --git a/mongodb-1.3.4/tests/replicaset/server-002.phpt b/mongodb-1.4.2/tests/replicaset/server-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/replicaset/server-002.phpt
rename to mongodb-1.4.2/tests/replicaset/server-002.phpt
diff --git a/mongodb-1.3.4/tests/replicaset/writeconcernerror-001.phpt b/mongodb-1.4.2/tests/replicaset/writeconcernerror-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/replicaset/writeconcernerror-001.phpt
rename to mongodb-1.4.2/tests/replicaset/writeconcernerror-001.phpt
diff --git a/mongodb-1.3.4/tests/replicaset/writeconcernerror-002.phpt b/mongodb-1.4.2/tests/replicaset/writeconcernerror-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/replicaset/writeconcernerror-002.phpt
rename to mongodb-1.4.2/tests/replicaset/writeconcernerror-002.phpt
diff --git a/mongodb-1.3.4/tests/replicaset/writeresult-getserver-001.phpt b/mongodb-1.4.2/tests/replicaset/writeresult-getserver-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/replicaset/writeresult-getserver-001.phpt
rename to mongodb-1.4.2/tests/replicaset/writeresult-getserver-001.phpt
diff --git a/mongodb-1.3.4/tests/replicaset/writeresult-getserver-002.phpt b/mongodb-1.4.2/tests/replicaset/writeresult-getserver-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/replicaset/writeresult-getserver-002.phpt
rename to mongodb-1.4.2/tests/replicaset/writeresult-getserver-002.phpt
diff --git a/mongodb-1.4.2/tests/retryable-writes/retryable-writes-001.phpt b/mongodb-1.4.2/tests/retryable-writes/retryable-writes-001.phpt
new file mode 100644
index 00000000..0d5e6874
--- /dev/null
+++ b/mongodb-1.4.2/tests/retryable-writes/retryable-writes-001.phpt
@@ -0,0 +1,83 @@
+--TEST--
+Retryable writes: supported single-statement operations include transaction IDs
+--SKIPIF--
+<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS_CRYPTO(); ?>
+<?php NEEDS('REPLICASET'); CLEANUP(REPLICASET); ?>
+--FILE--
+<?php
+require_once __DIR__ . "/../utils/basic.inc";
+
+class TransactionIdObserver implements MongoDB\Driver\Monitoring\CommandSubscriber
+{
+ public function commandStarted(MongoDB\Driver\Monitoring\CommandStartedEvent $event)
+ {
+ $command = $event->getCommand();
+ $hasTransactionId = isset($command->lsid) && isset($command->txnNumber);
+
+ printf("%s command includes transaction ID: %s\n", $event->getCommandName(), $hasTransactionId ? 'yes' : 'no');
+ }
+
+ public function commandSucceeded(MongoDB\Driver\Monitoring\CommandSucceededEvent $event)
+ {
+ }
+
+ public function commandFailed(MongoDB\Driver\Monitoring\CommandFailedEvent $event)
+ {
+ }
+}
+
+$observer = new TransactionIdObserver;
+MongoDB\Driver\Monitoring\addSubscriber($observer);
+
+$manager = new MongoDB\Driver\Manager(REPLICASET, ['retryWrites' => true]);
+
+echo "Testing deleteOne\n";
+$bulk = new MongoDB\Driver\BulkWrite;
+$bulk->delete(['x' => 1], ['limit' => 1]);
+$manager->executeBulkWrite(NS, $bulk);
+
+echo "\nTesting insertOne\n";
+$bulk = new MongoDB\Driver\BulkWrite;
+$bulk->insert(['x' => 1]);
+$manager->executeBulkWrite(NS, $bulk);
+
+echo "\nTesting replaceOne\n";
+$bulk = new MongoDB\Driver\BulkWrite;
+$bulk->update(['x' => 1], ['x' => 2]);
+$manager->executeBulkWrite(NS, $bulk);
+
+echo "\nTesting updateOne\n";
+$bulk = new MongoDB\Driver\BulkWrite;
+$bulk->update(['x' => 1], ['$inc' => ['x' => 1]]);
+$manager->executeBulkWrite(NS, $bulk);
+
+echo "\nTesting findAndModify\n";
+$command = new MongoDB\Driver\Command([
+ 'findAndModify' => COLLECTION_NAME,
+ 'query' => ['x' => 1],
+ 'update' => ['$inc' => ['x' => 1]],
+]);
+$manager->executeReadWriteCommand(DATABASE_NAME, $command);
+
+MongoDB\Driver\Monitoring\removeSubscriber($observer);
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECT--
+Testing deleteOne
+delete command includes transaction ID: yes
+
+Testing insertOne
+insert command includes transaction ID: yes
+
+Testing replaceOne
+update command includes transaction ID: yes
+
+Testing updateOne
+update command includes transaction ID: yes
+
+Testing findAndModify
+findAndModify command includes transaction ID: yes
+===DONE===
\ No newline at end of file
diff --git a/mongodb-1.4.2/tests/retryable-writes/retryable-writes-002.phpt b/mongodb-1.4.2/tests/retryable-writes/retryable-writes-002.phpt
new file mode 100644
index 00000000..a1fab305
--- /dev/null
+++ b/mongodb-1.4.2/tests/retryable-writes/retryable-writes-002.phpt
@@ -0,0 +1,84 @@
+--TEST--
+Retryable writes: supported multi-statement operations include transaction IDs
+--SKIPIF--
+<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS_CRYPTO(); ?>
+<?php NEEDS('REPLICASET'); CLEANUP(REPLICASET); ?>
+--FILE--
+<?php
+require_once __DIR__ . "/../utils/basic.inc";
+
+class TransactionIdObserver implements MongoDB\Driver\Monitoring\CommandSubscriber
+{
+ public function commandStarted(MongoDB\Driver\Monitoring\CommandStartedEvent $event)
+ {
+ $command = $event->getCommand();
+ $hasTransactionId = isset($command->lsid) && isset($command->txnNumber);
+
+ printf("%s command includes transaction ID: %s\n", $event->getCommandName(), $hasTransactionId ? 'yes' : 'no');
+ }
+
+ public function commandSucceeded(MongoDB\Driver\Monitoring\CommandSucceededEvent $event)
+ {
+ }
+
+ public function commandFailed(MongoDB\Driver\Monitoring\CommandFailedEvent $event)
+ {
+ }
+}
+
+$observer = new TransactionIdObserver;
+MongoDB\Driver\Monitoring\addSubscriber($observer);
+
+$manager = new MongoDB\Driver\Manager(REPLICASET, ['retryWrites' => true]);
+
+echo "Testing multi-statement bulk write (ordered=true)\n";
+$bulk = new MongoDB\Driver\BulkWrite(['ordered' => true]);
+$bulk->delete(['x' => 1], ['limit' => 1]);
+$bulk->insert(['x' => 1]);
+$bulk->update(['x' => 1], ['$inc' => ['x' => 1]]);
+$bulk->update(['x' => 1], ['x' => 2]);
+$manager->executeBulkWrite(NS, $bulk);
+
+echo "\nTesting multi-statement bulk write (ordered=false)\n";
+$bulk = new MongoDB\Driver\BulkWrite(['ordered' => false]);
+$bulk->delete(['x' => 1], ['limit' => 1]);
+$bulk->insert(['x' => 1]);
+$bulk->update(['x' => 1], ['$inc' => ['x' => 1]]);
+$bulk->update(['x' => 1], ['x' => 2]);
+$manager->executeBulkWrite(NS, $bulk);
+
+echo "\nTesting insertMany (ordered=true)\n";
+$bulk = new MongoDB\Driver\BulkWrite(['ordered' => true]);
+$bulk->insert(['x' => 1]);
+$bulk->insert(['x' => 2]);
+$manager->executeBulkWrite(NS, $bulk);
+
+echo "\nTesting insertMany (ordered=false)\n";
+$bulk = new MongoDB\Driver\BulkWrite(['ordered' => false]);
+$bulk->insert(['x' => 1]);
+$bulk->insert(['x' => 2]);
+$manager->executeBulkWrite(NS, $bulk);
+
+MongoDB\Driver\Monitoring\removeSubscriber($observer);
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECT--
+Testing multi-statement bulk write (ordered=true)
+delete command includes transaction ID: yes
+insert command includes transaction ID: yes
+update command includes transaction ID: yes
+
+Testing multi-statement bulk write (ordered=false)
+delete command includes transaction ID: yes
+insert command includes transaction ID: yes
+update command includes transaction ID: yes
+
+Testing insertMany (ordered=true)
+insert command includes transaction ID: yes
+
+Testing insertMany (ordered=false)
+insert command includes transaction ID: yes
+===DONE===
\ No newline at end of file
diff --git a/mongodb-1.4.2/tests/retryable-writes/retryable-writes-003.phpt b/mongodb-1.4.2/tests/retryable-writes/retryable-writes-003.phpt
new file mode 100644
index 00000000..bcd4f944
--- /dev/null
+++ b/mongodb-1.4.2/tests/retryable-writes/retryable-writes-003.phpt
@@ -0,0 +1,98 @@
+--TEST--
+Retryable writes: unsupported operations do not include transaction IDs
+--SKIPIF--
+<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS_CRYPTO(); ?>
+<?php NEEDS('REPLICASET'); CLEANUP(REPLICASET); ?>
+--FILE--
+<?php
+require_once __DIR__ . "/../utils/basic.inc";
+
+class TransactionIdObserver implements MongoDB\Driver\Monitoring\CommandSubscriber
+{
+ public function commandStarted(MongoDB\Driver\Monitoring\CommandStartedEvent $event)
+ {
+ $command = $event->getCommand();
+ $hasTransactionId = isset($command->lsid) && isset($command->txnNumber);
+
+ printf("%s command includes transaction ID: %s\n", $event->getCommandName(), $hasTransactionId ? 'yes' : 'no');
+ }
+
+ public function commandSucceeded(MongoDB\Driver\Monitoring\CommandSucceededEvent $event)
+ {
+ }
+
+ public function commandFailed(MongoDB\Driver\Monitoring\CommandFailedEvent $event)
+ {
+ }
+}
+
+$observer = new TransactionIdObserver;
+MongoDB\Driver\Monitoring\addSubscriber($observer);
+
+$manager = new MongoDB\Driver\Manager(REPLICASET, ['retryWrites' => true]);
+
+echo "Testing deleteMany\n";
+$bulk = new MongoDB\Driver\BulkWrite;
+$bulk->delete(['x' => 1], ['limit' => 0]);
+$manager->executeBulkWrite(NS, $bulk);
+
+echo "\nTesting updateMany\n";
+$bulk = new MongoDB\Driver\BulkWrite;
+$bulk->update(['x' => 1], ['$inc' => ['x' => 1]], ['multi' => true]);
+$manager->executeBulkWrite(NS, $bulk);
+
+echo "\nTesting multi-statement bulk write with one unsupported operation (ordered=true)\n";
+$bulk = new MongoDB\Driver\BulkWrite(['ordered' => true]);
+$bulk->delete(['x' => 1], ['limit' => 1]);
+$bulk->insert(['x' => 1]);
+$bulk->update(['x' => 1], ['$inc' => ['x' => 1]]);
+$bulk->update(['x' => 1], ['x' => 2]);
+$bulk->update(['x' => 1], ['$inc' => ['x' => 1]], ['multi' => true]);
+$manager->executeBulkWrite(NS, $bulk);
+
+echo "\nTesting multi-statement bulk write with one unsupported operation (ordered=false)\n";
+$bulk = new MongoDB\Driver\BulkWrite(['ordered' => false]);
+$bulk->delete(['x' => 1], ['limit' => 1]);
+$bulk->insert(['x' => 1]);
+$bulk->update(['x' => 1], ['$inc' => ['x' => 1]]);
+$bulk->update(['x' => 1], ['x' => 2]);
+$bulk->update(['x' => 1], ['$inc' => ['x' => 1]], ['multi' => true]);
+$manager->executeBulkWrite(NS, $bulk);
+
+echo "\nTesting aggregate\n";
+$command = new MongoDB\Driver\Command([
+ 'aggregate' => COLLECTION_NAME,
+ 'pipeline' => [
+ ['$match' => ['x' => 1]],
+ ['$out' => COLLECTION_NAME . '.out'],
+ ],
+ 'cursor' => new stdClass,
+]);
+$manager->executeReadWriteCommand(DATABASE_NAME, $command);
+
+MongoDB\Driver\Monitoring\removeSubscriber($observer);
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECT--
+Testing deleteMany
+delete command includes transaction ID: no
+
+Testing updateMany
+update command includes transaction ID: no
+
+Testing multi-statement bulk write with one unsupported operation (ordered=true)
+delete command includes transaction ID: yes
+insert command includes transaction ID: yes
+update command includes transaction ID: no
+
+Testing multi-statement bulk write with one unsupported operation (ordered=false)
+delete command includes transaction ID: yes
+insert command includes transaction ID: yes
+update command includes transaction ID: no
+
+Testing aggregate
+aggregate command includes transaction ID: no
+===DONE===
\ No newline at end of file
diff --git a/mongodb-1.4.2/tests/retryable-writes/retryable-writes-004.phpt b/mongodb-1.4.2/tests/retryable-writes/retryable-writes-004.phpt
new file mode 100644
index 00000000..9058f7f3
--- /dev/null
+++ b/mongodb-1.4.2/tests/retryable-writes/retryable-writes-004.phpt
@@ -0,0 +1,87 @@
+--TEST--
+Retryable writes: unacknowledged write operations do not include transaction IDs
+--XFAIL--
+Depends on CDRIVER-2432
+--SKIPIF--
+<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS_CRYPTO(); ?>
+<?php NEEDS('REPLICASET'); CLEANUP(REPLICASET); ?>
+--FILE--
+<?php
+require_once __DIR__ . "/../utils/basic.inc";
+
+class TransactionIdObserver implements MongoDB\Driver\Monitoring\CommandSubscriber
+{
+ public function commandStarted(MongoDB\Driver\Monitoring\CommandStartedEvent $event)
+ {
+ $command = $event->getCommand();
+ $hasTransactionId = isset($command->lsid) && isset($command->txnNumber);
+
+ printf("%s command includes transaction ID: %s\n", $event->getCommandName(), $hasTransactionId ? 'yes' : 'no');
+ }
+
+ public function commandSucceeded(MongoDB\Driver\Monitoring\CommandSucceededEvent $event)
+ {
+ }
+
+ public function commandFailed(MongoDB\Driver\Monitoring\CommandFailedEvent $event)
+ {
+ }
+}
+
+$observer = new TransactionIdObserver;
+MongoDB\Driver\Monitoring\addSubscriber($observer);
+
+$manager = new MongoDB\Driver\Manager(REPLICASET, ['retryWrites' => true]);
+$writeConcern = new MongoDB\Driver\WriteConcern(0);
+
+echo "Testing unacknowledged deleteOne\n";
+$bulk = new MongoDB\Driver\BulkWrite;
+$bulk->delete(['x' => 1], ['limit' => 1]);
+$manager->executeBulkWrite(NS, $bulk, ['writeConcern' => $writeConcern]);
+
+echo "\nTesting unacknowledged insertOne\n";
+$bulk = new MongoDB\Driver\BulkWrite;
+$bulk->insert(['x' => 1]);
+$manager->executeBulkWrite(NS, $bulk, ['writeConcern' => $writeConcern]);
+
+echo "\nTesting unacknowledged replaceOne\n";
+$bulk = new MongoDB\Driver\BulkWrite;
+$bulk->update(['x' => 1], ['x' => 2]);
+$manager->executeBulkWrite(NS, $bulk, ['writeConcern' => $writeConcern]);
+
+echo "\nTesting unacknowledged updateOne\n";
+$bulk = new MongoDB\Driver\BulkWrite;
+$bulk->update(['x' => 1], ['$inc' => ['x' => 1]]);
+$manager->executeBulkWrite(NS, $bulk, ['writeConcern' => $writeConcern]);
+
+echo "\nTesting unacknowledged findAndModify\n";
+$command = new MongoDB\Driver\Command([
+ 'findAndModify' => COLLECTION_NAME,
+ 'query' => ['x' => 1],
+ 'update' => ['$inc' => ['x' => 1]],
+ 'writeConcern' => $writeConcern,
+]);
+$manager->executeReadWriteCommand(DATABASE_NAME, $command);
+
+MongoDB\Driver\Monitoring\removeSubscriber($observer);
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECT--
+Testing unacknowledged deleteOne
+delete command includes transaction ID: no
+
+Testing unacknowledged insertOne
+insert command includes transaction ID: no
+
+Testing unacknowledged replaceOne
+update command includes transaction ID: no
+
+Testing unacknowledged updateOne
+update command includes transaction ID: no
+
+Testing unacknowledged findAndModify
+findAndModify command includes transaction ID: no
+===DONE===
diff --git a/mongodb-1.4.2/tests/retryable-writes/retryable-writes-005.phpt b/mongodb-1.4.2/tests/retryable-writes/retryable-writes-005.phpt
new file mode 100644
index 00000000..af033d76
--- /dev/null
+++ b/mongodb-1.4.2/tests/retryable-writes/retryable-writes-005.phpt
@@ -0,0 +1,69 @@
+--TEST--
+Retryable writes: non-write command methods do not include transaction IDs
+--SKIPIF--
+<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS_CRYPTO(); ?>
+<?php NEEDS('REPLICASET'); CLEANUP(REPLICASET); ?>
+--FILE--
+<?php
+require_once __DIR__ . "/../utils/basic.inc";
+
+class TransactionIdObserver implements MongoDB\Driver\Monitoring\CommandSubscriber
+{
+ public function commandStarted(MongoDB\Driver\Monitoring\CommandStartedEvent $event)
+ {
+ $command = $event->getCommand();
+ $hasTransactionId = isset($command->lsid) && isset($command->txnNumber);
+
+ printf("%s command includes transaction ID: %s\n", $event->getCommandName(), $hasTransactionId ? 'yes' : 'no');
+ }
+
+ public function commandSucceeded(MongoDB\Driver\Monitoring\CommandSucceededEvent $event)
+ {
+ }
+
+ public function commandFailed(MongoDB\Driver\Monitoring\CommandFailedEvent $event)
+ {
+ }
+}
+
+$observer = new TransactionIdObserver;
+MongoDB\Driver\Monitoring\addSubscriber($observer);
+
+$manager = new MongoDB\Driver\Manager(REPLICASET, ['retryWrites' => true]);
+$command = new MongoDB\Driver\Command([
+ 'findAndModify' => COLLECTION_NAME,
+ 'query' => ['x' => 1],
+ 'update' => ['$inc' => ['x' => 1]],
+]);
+
+echo "Testing Manager::executeCommand()\n";
+$manager->executeCommand(DATABASE_NAME, $command);
+
+echo "\nTesting Manager::executeReadCommand()\n";
+$manager->executeReadCommand(DATABASE_NAME, $command);
+
+echo "\nTesting Manager::executeReadWriteCommand()\n";
+$manager->executeReadWriteCommand(DATABASE_NAME, $command);
+
+echo "\nTesting Manager::executeWriteCommand()\n";
+$manager->executeWriteCommand(DATABASE_NAME, $command);
+
+MongoDB\Driver\Monitoring\removeSubscriber($observer);
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECT--
+Testing Manager::executeCommand()
+findAndModify command includes transaction ID: no
+
+Testing Manager::executeReadCommand()
+findAndModify command includes transaction ID: no
+
+Testing Manager::executeReadWriteCommand()
+findAndModify command includes transaction ID: yes
+
+Testing Manager::executeWriteCommand()
+findAndModify command includes transaction ID: yes
+===DONE===
\ No newline at end of file
diff --git a/mongodb-1.3.4/tests/server/bug0671-002.phpt b/mongodb-1.4.2/tests/server/bug0671-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/server/bug0671-002.phpt
rename to mongodb-1.4.2/tests/server/bug0671-002.phpt
diff --git a/mongodb-1.3.4/tests/server/server-constants.phpt b/mongodb-1.4.2/tests/server/server-constants.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/server/server-constants.phpt
rename to mongodb-1.4.2/tests/server/server-constants.phpt
diff --git a/mongodb-1.3.4/tests/server/server-construct-001.phpt b/mongodb-1.4.2/tests/server/server-construct-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/server/server-construct-001.phpt
rename to mongodb-1.4.2/tests/server/server-construct-001.phpt
diff --git a/mongodb-1.3.4/tests/server/server-debug.phpt b/mongodb-1.4.2/tests/server/server-debug.phpt
similarity index 67%
rename from mongodb-1.3.4/tests/server/server-debug.phpt
rename to mongodb-1.4.2/tests/server/server-debug.phpt
index ae1ea56c..5caa1f2a 100644
--- a/mongodb-1.3.4/tests/server/server-debug.phpt
+++ b/mongodb-1.4.2/tests/server/server-debug.phpt
@@ -1,61 +1,43 @@
--TEST--
MongoDB\Driver\Server debugInfo
--SKIPIF--
<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
<?php NEEDS('STANDALONE'); CLEANUP(STANDALONE); ?>
--FILE--
<?php
require_once __DIR__ . "/../utils/basic.inc";
$manager = new MongoDB\Driver\Manager(STANDALONE);
$server = $manager->executeQuery(NS, new MongoDB\Driver\Query(array()))->getServer();
var_dump($server);
?>
===DONE===
<?php exit(0); ?>
--EXPECTF--
object(MongoDB\Driver\Server)#%d (%d) {
["host"]=>
string(%d) "%s"
["port"]=>
int(%d)
["type"]=>
int(1)
["is_primary"]=>
bool(false)
["is_secondary"]=>
bool(false)
["is_arbiter"]=>
bool(false)
["is_hidden"]=>
bool(false)
["is_passive"]=>
bool(false)
["last_is_master"]=>
array(%d) {
- ["ismaster"]=>
- bool(true)
- ["maxBsonObjectSize"]=>
- int(16777216)
- ["maxMessageSizeBytes"]=>
- int(48000000)
- ["maxWriteBatchSize"]=>
- int(1000)
- ["localTime"]=>
- object(%s\UTCDateTime)#%d (%d) {
- ["milliseconds"]=>
- %r(int\(\d+\)|string\(\d+\) "\d+")%r
- }
- ["maxWireVersion"]=>
- int(%d)
- ["minWireVersion"]=>
- int(0)
%a
- float(1)
}
["round_trip_time"]=>
int(%d)
}
===DONE===
diff --git a/mongodb-1.3.4/tests/server/server-errors.phpt b/mongodb-1.4.2/tests/server/server-errors.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/server/server-errors.phpt
rename to mongodb-1.4.2/tests/server/server-errors.phpt
diff --git a/mongodb-1.3.4/tests/server/server-executeBulkWrite-001.phpt b/mongodb-1.4.2/tests/server/server-executeBulkWrite-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/server/server-executeBulkWrite-001.phpt
rename to mongodb-1.4.2/tests/server/server-executeBulkWrite-001.phpt
diff --git a/mongodb-1.3.4/tests/server/server-executeBulkWrite-002.phpt b/mongodb-1.4.2/tests/server/server-executeBulkWrite-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/server/server-executeBulkWrite-002.phpt
rename to mongodb-1.4.2/tests/server/server-executeBulkWrite-002.phpt
diff --git a/mongodb-1.3.4/tests/server/server-executeBulkWrite-003.phpt b/mongodb-1.4.2/tests/server/server-executeBulkWrite-003.phpt
similarity index 90%
copy from mongodb-1.3.4/tests/server/server-executeBulkWrite-003.phpt
copy to mongodb-1.4.2/tests/server/server-executeBulkWrite-003.phpt
index 94cf4742..e24e716a 100644
--- a/mongodb-1.3.4/tests/server/server-executeBulkWrite-003.phpt
+++ b/mongodb-1.4.2/tests/server/server-executeBulkWrite-003.phpt
@@ -1,36 +1,36 @@
--TEST--
-MongoDB\Driver\Server::executeBulkWrite() with write concern (replica set primary)
+MongoDB\Driver\Server::executeBulkWrite() with legacy write concern (replica set primary)
--SKIPIF--
<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
<?php NEEDS('REPLICASET'); CLEANUP(REPLICASET); ?>
--FILE--
<?php
require_once __DIR__ . "/../utils/basic.inc";
$manager = new MongoDB\Driver\Manager(REPLICASET);
$server = $manager->selectServer(new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_PRIMARY));
$writeConcerns = array(0, 1, 2, MongoDB\Driver\WriteConcern::MAJORITY);
foreach ($writeConcerns as $wc) {
$bulk = new MongoDB\Driver\BulkWrite();
$bulk->insert(array('wc' => $wc));
$result = $server->executeBulkWrite(NS, $bulk, new MongoDB\Driver\WriteConcern($wc));
var_dump($result->isAcknowledged());
var_dump($result->getInsertedCount());
}
?>
===DONE===
<?php exit(0); ?>
--EXPECT--
bool(false)
NULL
bool(true)
int(1)
bool(true)
int(1)
bool(true)
int(1)
===DONE===
diff --git a/mongodb-1.3.4/tests/server/server-executeBulkWrite-004.phpt b/mongodb-1.4.2/tests/server/server-executeBulkWrite-004.phpt
similarity index 74%
copy from mongodb-1.3.4/tests/server/server-executeBulkWrite-004.phpt
copy to mongodb-1.4.2/tests/server/server-executeBulkWrite-004.phpt
index 1537e1be..0faf039a 100644
--- a/mongodb-1.3.4/tests/server/server-executeBulkWrite-004.phpt
+++ b/mongodb-1.4.2/tests/server/server-executeBulkWrite-004.phpt
@@ -1,43 +1,34 @@
--TEST--
-MongoDB\Driver\Server::executeBulkWrite() with write concern (replica set secondary)
+MongoDB\Driver\Server::executeBulkWrite() with legacy write concern (replica set secondary)
--SKIPIF--
<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
<?php NEEDS('REPLICASET'); CLEANUP(REPLICASET); ?>
--FILE--
<?php
require_once __DIR__ . "/../utils/basic.inc";
$manager = new MongoDB\Driver\Manager(REPLICASET);
$server = $manager->selectServer(new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_SECONDARY));
-$bulk = new MongoDB\Driver\BulkWrite();
-$bulk->insert(array('wc' => 0));
-
-$result = $server->executeBulkWrite(NS, $bulk, new MongoDB\Driver\WriteConcern(0));
-var_dump($result->isAcknowledged());
-var_dump($result->getInsertedCount());
-
$writeConcerns = array(1, 2, MongoDB\Driver\WriteConcern::MAJORITY);
foreach ($writeConcerns as $wc) {
$bulk = new MongoDB\Driver\BulkWrite();
$bulk->insert(array('wc' => $wc));
echo throws(function() use ($server, $bulk, $wc) {
$server->executeBulkWrite(NS, $bulk, new MongoDB\Driver\WriteConcern($wc));
}, "MongoDB\Driver\Exception\RuntimeException"), "\n";
}
?>
===DONE===
<?php exit(0); ?>
--EXPECT--
-bool(false)
-NULL
OK: Got MongoDB\Driver\Exception\RuntimeException
not master
OK: Got MongoDB\Driver\Exception\RuntimeException
not master
OK: Got MongoDB\Driver\Exception\RuntimeException
not master
===DONE===
diff --git a/mongodb-1.3.4/tests/server/server-executeBulkWrite-005.phpt b/mongodb-1.4.2/tests/server/server-executeBulkWrite-005.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/server/server-executeBulkWrite-005.phpt
rename to mongodb-1.4.2/tests/server/server-executeBulkWrite-005.phpt
diff --git a/mongodb-1.3.4/tests/server/server-executeBulkWrite-003.phpt b/mongodb-1.4.2/tests/server/server-executeBulkWrite-006.phpt
similarity index 65%
rename from mongodb-1.3.4/tests/server/server-executeBulkWrite-003.phpt
rename to mongodb-1.4.2/tests/server/server-executeBulkWrite-006.phpt
index 94cf4742..c2609383 100644
--- a/mongodb-1.3.4/tests/server/server-executeBulkWrite-003.phpt
+++ b/mongodb-1.4.2/tests/server/server-executeBulkWrite-006.phpt
@@ -1,36 +1,40 @@
--TEST--
-MongoDB\Driver\Server::executeBulkWrite() with write concern (replica set primary)
+MongoDB\Driver\Server::executeBulkWrite() with legacy write concern (replica set primary)
--SKIPIF--
<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
<?php NEEDS('REPLICASET'); CLEANUP(REPLICASET); ?>
--FILE--
<?php
require_once __DIR__ . "/../utils/basic.inc";
$manager = new MongoDB\Driver\Manager(REPLICASET);
$server = $manager->selectServer(new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_PRIMARY));
-$writeConcerns = array(0, 1, 2, MongoDB\Driver\WriteConcern::MAJORITY);
+$writeConcerns = [0, 1, 2, MongoDB\Driver\WriteConcern::MAJORITY];
foreach ($writeConcerns as $wc) {
$bulk = new MongoDB\Driver\BulkWrite();
- $bulk->insert(array('wc' => $wc));
+ $bulk->insert(['wc' => $wc]);
- $result = $server->executeBulkWrite(NS, $bulk, new MongoDB\Driver\WriteConcern($wc));
+ $options = [
+ 'writeConcern' => new MongoDB\Driver\WriteConcern($wc),
+ ];
+
+ $result = $server->executeBulkWrite(NS, $bulk, $options);
var_dump($result->isAcknowledged());
var_dump($result->getInsertedCount());
}
?>
===DONE===
<?php exit(0); ?>
--EXPECT--
bool(false)
NULL
bool(true)
int(1)
bool(true)
int(1)
bool(true)
int(1)
===DONE===
diff --git a/mongodb-1.3.4/tests/server/server-executeBulkWrite-004.phpt b/mongodb-1.4.2/tests/server/server-executeBulkWrite-007.phpt
similarity index 62%
rename from mongodb-1.3.4/tests/server/server-executeBulkWrite-004.phpt
rename to mongodb-1.4.2/tests/server/server-executeBulkWrite-007.phpt
index 1537e1be..53499352 100644
--- a/mongodb-1.3.4/tests/server/server-executeBulkWrite-004.phpt
+++ b/mongodb-1.4.2/tests/server/server-executeBulkWrite-007.phpt
@@ -1,43 +1,38 @@
--TEST--
MongoDB\Driver\Server::executeBulkWrite() with write concern (replica set secondary)
--SKIPIF--
<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
<?php NEEDS('REPLICASET'); CLEANUP(REPLICASET); ?>
--FILE--
<?php
require_once __DIR__ . "/../utils/basic.inc";
$manager = new MongoDB\Driver\Manager(REPLICASET);
$server = $manager->selectServer(new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_SECONDARY));
-$bulk = new MongoDB\Driver\BulkWrite();
-$bulk->insert(array('wc' => 0));
-
-$result = $server->executeBulkWrite(NS, $bulk, new MongoDB\Driver\WriteConcern(0));
-var_dump($result->isAcknowledged());
-var_dump($result->getInsertedCount());
-
-$writeConcerns = array(1, 2, MongoDB\Driver\WriteConcern::MAJORITY);
+$writeConcerns = [1, 2, MongoDB\Driver\WriteConcern::MAJORITY];
foreach ($writeConcerns as $wc) {
$bulk = new MongoDB\Driver\BulkWrite();
- $bulk->insert(array('wc' => $wc));
+ $bulk->insert(['wc' => $wc]);
+
+ $options = [
+ 'writeConcern' => new MongoDB\Driver\WriteConcern($wc),
+ ];
- echo throws(function() use ($server, $bulk, $wc) {
- $server->executeBulkWrite(NS, $bulk, new MongoDB\Driver\WriteConcern($wc));
+ echo throws(function() use ($server, $bulk, $options) {
+ $server->executeBulkWrite(NS, $bulk, $options);
}, "MongoDB\Driver\Exception\RuntimeException"), "\n";
}
?>
===DONE===
<?php exit(0); ?>
--EXPECT--
-bool(false)
-NULL
OK: Got MongoDB\Driver\Exception\RuntimeException
not master
OK: Got MongoDB\Driver\Exception\RuntimeException
not master
OK: Got MongoDB\Driver\Exception\RuntimeException
not master
===DONE===
diff --git a/mongodb-1.3.4/tests/server/server-executeBulkWrite_error-001.phpt b/mongodb-1.4.2/tests/server/server-executeBulkWrite_error-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/server/server-executeBulkWrite_error-001.phpt
rename to mongodb-1.4.2/tests/server/server-executeBulkWrite_error-001.phpt
diff --git a/mongodb-1.4.2/tests/server/server-executeBulkWrite_error-002.phpt b/mongodb-1.4.2/tests/server/server-executeBulkWrite_error-002.phpt
new file mode 100644
index 00000000..23680ffb
--- /dev/null
+++ b/mongodb-1.4.2/tests/server/server-executeBulkWrite_error-002.phpt
@@ -0,0 +1,49 @@
+--TEST--
+MongoDB\Driver\Server::executeBulkWrite() with invalid options
+--SKIPIF--
+<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS('REPLICASET'); ?>
+--FILE--
+<?php
+require_once __DIR__ . "/../utils/basic.inc";
+
+$manager = new MongoDB\Driver\Manager(REPLICASET);
+$server = $manager->selectServer(new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_PRIMARY));
+
+echo throws(function() use ($server) {
+ $bulk = new MongoDB\Driver\BulkWrite();
+ $bulk->insert(['x' => 1]);
+ $server->executeBulkWrite(NS, $bulk, ['session' => 'foo']);
+}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
+
+echo throws(function() use ($server) {
+ $bulk = new MongoDB\Driver\BulkWrite();
+ $bulk->insert(['x' => 1]);
+ $server->executeBulkWrite(NS, $bulk, ['session' => new stdClass]);
+}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
+
+echo throws(function() use ($server) {
+ $bulk = new MongoDB\Driver\BulkWrite();
+ $bulk->insert(['x' => 1]);
+ $server->executeBulkWrite(NS, $bulk, ['writeConcern' => 'foo']);
+}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
+
+echo throws(function() use ($server) {
+ $bulk = new MongoDB\Driver\BulkWrite();
+ $bulk->insert(['x' => 1]);
+ $server->executeBulkWrite(NS, $bulk, ['writeConcern' => new stdClass]);
+}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECT--
+OK: Got MongoDB\Driver\Exception\InvalidArgumentException
+Expected "session" option to be MongoDB\Driver\Session, string given
+OK: Got MongoDB\Driver\Exception\InvalidArgumentException
+Expected "session" option to be MongoDB\Driver\Session, stdClass given
+OK: Got MongoDB\Driver\Exception\InvalidArgumentException
+Expected "writeConcern" option to be MongoDB\Driver\WriteConcern, string given
+OK: Got MongoDB\Driver\Exception\InvalidArgumentException
+Expected "writeConcern" option to be MongoDB\Driver\WriteConcern, stdClass given
+===DONE===
diff --git a/mongodb-1.3.4/tests/server/server-executeCommand-001.phpt b/mongodb-1.4.2/tests/server/server-executeCommand-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/server/server-executeCommand-001.phpt
rename to mongodb-1.4.2/tests/server/server-executeCommand-001.phpt
diff --git a/mongodb-1.3.4/tests/server/server-executeCommand-002.phpt b/mongodb-1.4.2/tests/server/server-executeCommand-002.phpt
similarity index 91%
rename from mongodb-1.3.4/tests/server/server-executeCommand-002.phpt
rename to mongodb-1.4.2/tests/server/server-executeCommand-002.phpt
index 9fdec58c..8de8cbd8 100644
--- a/mongodb-1.3.4/tests/server/server-executeCommand-002.phpt
+++ b/mongodb-1.4.2/tests/server/server-executeCommand-002.phpt
@@ -1,69 +1,73 @@
--TEST--
MongoDB\Driver\Server::executeCommand() takes a read preference
--SKIPIF--
<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
<?php NEEDS('REPLICASET'); CLEANUP(REPLICASET, DATABASE_NAME, 'system.profile'); ?>
--FILE--
<?php
require_once __DIR__ . "/../utils/basic.inc";
$manager = new MongoDB\Driver\Manager(REPLICASET);
$rp = new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_SECONDARY);
$secondary = $manager->selectServer($rp);
$command = new MongoDB\Driver\Command(array('profile' => 2));
$cursor = $secondary->executeCommand(DATABASE_NAME, $command);
$result = current($cursor->toArray());
printf("Set profile level to 2 successfully: %s\n", (empty($result->ok) ? 'no' : 'yes'));
-$command = new MongoDB\Driver\Command(array(
+$command = new MongoDB\Driver\Command([
'aggregate' => COLLECTION_NAME,
- 'pipeline' => array(array('$match' => array('x' => 1))),
-));
+ 'pipeline' => [ [ '$match' => [ 'x' => 1 ] ] ],
+ 'cursor' => (object) [],
+]);
$secondary->executeCommand(DATABASE_NAME, $command, $rp);
$query = new MongoDB\Driver\Query(
array(
'op' => 'command',
'ns' => DATABASE_NAME . '.' . COLLECTION_NAME,
),
array(
'sort' => array('ts' => -1),
'limit' => 1,
)
);
$cursor = $secondary->executeQuery(DATABASE_NAME . '.system.profile', $query, $rp);
$profileEntry = current($cursor->toArray());
var_dump($profileEntry->command);
$command = new MongoDB\Driver\Command(array('profile' => 0));
$cursor = $secondary->executeCommand(DATABASE_NAME, $command);
$result = current($cursor->toArray());
printf("Set profile level to 0 successfully: %s\n", (empty($result->ok) ? 'no' : 'yes'));
?>
===DONE===
<?php exit(0); ?>
--EXPECTF--
Set profile level to 2 successfully: yes
object(stdClass)#%d (%d) {
["aggregate"]=>
string(32) "server_server_executeCommand_002"
["pipeline"]=>
array(1) {
[0]=>
object(stdClass)#%d (%d) {
["$match"]=>
object(stdClass)#%d (%d) {
["x"]=>
int(1)
}
}
}
+ ["cursor"]=>
+ object(stdClass)#%d (%d) {
+ }%A
}
Set profile level to 0 successfully: yes
===DONE===
diff --git a/mongodb-1.3.4/tests/server/server-executeCommand-003.phpt b/mongodb-1.4.2/tests/server/server-executeCommand-003.phpt
similarity index 98%
rename from mongodb-1.3.4/tests/server/server-executeCommand-003.phpt
rename to mongodb-1.4.2/tests/server/server-executeCommand-003.phpt
index 00c4692e..9fe0b7b1 100644
--- a/mongodb-1.3.4/tests/server/server-executeCommand-003.phpt
+++ b/mongodb-1.4.2/tests/server/server-executeCommand-003.phpt
@@ -1,34 +1,34 @@
--TEST--
MongoDB\Driver\Server::executeCommand() with conflicting read preference for secondary
--SKIPIF--
<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
<?php NEEDS('REPLICASET'); ?>
--FILE--
<?php
require_once __DIR__ . "/../utils/basic.inc";
$manager = new MongoDB\Driver\Manager(REPLICASET);
$secondaryRp = new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_SECONDARY);
$secondary = $manager->selectServer($secondaryRp);
/* Note: this is testing that the read preference (even a conflicting one) has
* no effect when directly querying a server, since the slaveOk flag is always
* set for hinted commands. */
$primaryRp = new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_PRIMARY);
$cursor = $secondary->executeCommand(DATABASE_NAME, new MongoDB\Driver\Command(array('ping' => 1)), $primaryRp);
var_dump($cursor->toArray());
?>
===DONE===
<?php exit(0); ?>
--EXPECTF--
array(1) {
[0]=>
object(stdClass)#%d (%d) {
["ok"]=>
- float(1)
+ float(1)%A
}
}
===DONE===
diff --git a/mongodb-1.4.2/tests/server/server-executeCommand-004.phpt b/mongodb-1.4.2/tests/server/server-executeCommand-004.phpt
new file mode 100644
index 00000000..4d37ec51
--- /dev/null
+++ b/mongodb-1.4.2/tests/server/server-executeCommand-004.phpt
@@ -0,0 +1,43 @@
+--TEST--
+MongoDB\Driver\Server::executeCommand() takes a read preference in options array
+--SKIPIF--
+<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS('REPLICASET'); CLEANUP(REPLICASET); ?>
+--FILE--
+<?php
+require_once __DIR__ . "/../utils/basic.inc";
+
+$manager = new MongoDB\Driver\Manager(REPLICASET);
+
+$primaryRp = new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_PRIMARY);
+$secondaryRp = new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_SECONDARY);
+
+$primary = $manager->selectServer($primaryRp);
+$secondary = $manager->selectServer($secondaryRp);
+
+echo "Testing primary:\n";
+$command = new MongoDB\Driver\Command(['ping' => 1]);
+$cursor = $primary->executeCommand(DATABASE_NAME, $command, ['readPreference' => $primaryRp]);
+
+echo "is_primary: ", $cursor->getServer()->isPrimary() ? 'true' : 'false', "\n";
+echo "is_secondary: ", $cursor->getServer()->isSecondary() ? 'true' : 'false', "\n\n";
+
+echo "Testing secondary:\n";
+$command = new MongoDB\Driver\Command(['ping' => 1]);
+$cursor = $secondary->executeCommand(DATABASE_NAME, $command, ['readPreference' => $secondaryRp]);
+
+echo "is_primary: ", $cursor->getServer()->isPrimary() ? 'true' : 'false', "\n";
+echo "is_secondary: ", $cursor->getServer()->isSecondary() ? 'true' : 'false', "\n\n";
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECTF--
+Testing primary:
+is_primary: true
+is_secondary: false
+
+Testing secondary:
+is_primary: false
+is_secondary: true
+
+===DONE===
diff --git a/mongodb-1.4.2/tests/server/server-executeCommand-005.phpt b/mongodb-1.4.2/tests/server/server-executeCommand-005.phpt
new file mode 100644
index 00000000..33da0b1e
--- /dev/null
+++ b/mongodb-1.4.2/tests/server/server-executeCommand-005.phpt
@@ -0,0 +1,43 @@
+--TEST--
+MongoDB\Driver\Server::executeCommand() takes a read preference as legacy option
+--SKIPIF--
+<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS('REPLICASET'); CLEANUP(REPLICASET); ?>
+--FILE--
+<?php
+require_once __DIR__ . "/../utils/basic.inc";
+
+$manager = new MongoDB\Driver\Manager(REPLICASET);
+
+$primaryRp = new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_PRIMARY);
+$secondaryRp = new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_SECONDARY);
+
+$primary = $manager->selectServer($primaryRp);
+$secondary = $manager->selectServer($secondaryRp);
+
+echo "Testing primary:\n";
+$command = new MongoDB\Driver\Command(['ping' => 1]);
+$cursor = $primary->executeCommand(DATABASE_NAME, $command, $primaryRp);
+
+echo "is_primary: ", $cursor->getServer()->isPrimary() ? 'true' : 'false', "\n";
+echo "is_secondary: ", $cursor->getServer()->isSecondary() ? 'true' : 'false', "\n\n";
+
+echo "Testing secondary:\n";
+$command = new MongoDB\Driver\Command(['ping' => 1]);
+$cursor = $secondary->executeCommand(DATABASE_NAME, $command, $secondaryRp);
+
+echo "is_primary: ", $cursor->getServer()->isPrimary() ? 'true' : 'false', "\n";
+echo "is_secondary: ", $cursor->getServer()->isSecondary() ? 'true' : 'false', "\n\n";
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECTF--
+Testing primary:
+is_primary: true
+is_secondary: false
+
+Testing secondary:
+is_primary: false
+is_secondary: true
+
+===DONE===
diff --git a/mongodb-1.4.2/tests/server/server-executeCommand-006.phpt b/mongodb-1.4.2/tests/server/server-executeCommand-006.phpt
new file mode 100644
index 00000000..351abce8
--- /dev/null
+++ b/mongodb-1.4.2/tests/server/server-executeCommand-006.phpt
@@ -0,0 +1,48 @@
+--TEST--
+MongoDB\Driver\Server::executeCommand() options (MONGO_CMD_RAW)
+--SKIPIF--
+<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS('STANDALONE'); NEEDS_ATLEAST_MONGODB_VERSION(STANDALONE, "3.6"); CLEANUP(STANDALONE); ?>
+--FILE--
+<?php
+require_once __DIR__ . "/../utils/basic.inc";
+require_once __DIR__ . "/../utils/observer.php";
+
+$manager = new MongoDB\Driver\Manager(STANDALONE);
+$server = $manager->selectServer(new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_PRIMARY));
+
+(new CommandObserver)->observe(
+ function() use ($server) {
+ $command = new MongoDB\Driver\Command([
+ 'ping' => true,
+ ]);
+
+ try {
+ $server->executeCommand(
+ DATABASE_NAME,
+ $command,
+ [
+ 'readPreference' => new \MongoDB\Driver\ReadPreference(\MongoDB\Driver\ReadPreference::RP_SECONDARY),
+ 'readConcern' => new \MongoDB\Driver\ReadConcern(\MongoDB\Driver\ReadConcern::LOCAL),
+ 'writeConcern' => new \MongoDB\Driver\WriteConcern(\MongoDB\Driver\WriteConcern::MAJORITY),
+ ]
+ );
+ } catch ( Exception $e ) {
+ // Ignore exception that ping doesn't support writeConcern
+ }
+ },
+ function(stdClass $command) {
+ echo "Read Preference: ", $command->{'$readPreference'}->mode, "\n";
+ echo "Read Concern: ", $command->readConcern->level, "\n";
+ echo "Write Concern: ", $command->writeConcern->w, "\n";
+ }
+);
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECTF--
+Read Preference: secondary
+Read Concern: local
+Write Concern: majority
+===DONE===
diff --git a/mongodb-1.4.2/tests/server/server-executeCommand_error-001.phpt b/mongodb-1.4.2/tests/server/server-executeCommand_error-001.phpt
new file mode 100644
index 00000000..18c76b55
--- /dev/null
+++ b/mongodb-1.4.2/tests/server/server-executeCommand_error-001.phpt
@@ -0,0 +1,67 @@
+--TEST--
+MongoDB\Driver\Server::executeCommand() with invalid options (MONGOC_CMD_RAW)
+--SKIPIF--
+<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS('REPLICASET'); CLEANUP(REPLICASET); ?>
+--FILE--
+<?php
+require_once __DIR__ . "/../utils/basic.inc";
+
+$manager = new MongoDB\Driver\Manager(REPLICASET);
+$server = $manager->selectServer(new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_PRIMARY));
+
+$command = new MongoDB\Driver\Command(['ping' => 1]);
+
+echo throws(function() use ($server, $command) {
+ $server->executeCommand(DATABASE_NAME, $command, ['readConcern' => 'foo']);
+}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
+
+echo throws(function() use ($server, $command) {
+ $server->executeCommand(DATABASE_NAME, $command, ['readConcern' => new stdClass]);
+}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
+
+echo throws(function() use ($server, $command) {
+ $server->executeCommand(DATABASE_NAME, $command, ['readPreference' => 'foo']);
+}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
+
+echo throws(function() use ($server, $command) {
+ $server->executeCommand(DATABASE_NAME, $command, ['readPreference' => new stdClass]);
+}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
+
+echo throws(function() use ($server, $command) {
+ $server->executeCommand(DATABASE_NAME, $command, ['session' => 'foo']);
+}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
+
+echo throws(function() use ($server, $command) {
+ $server->executeCommand(DATABASE_NAME, $command, ['session' => new stdClass]);
+}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
+
+echo throws(function() use ($server, $command) {
+ $server->executeCommand(DATABASE_NAME, $command, ['writeConcern' => 'foo']);
+}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
+
+echo throws(function() use ($server, $command) {
+ $server->executeCommand(DATABASE_NAME, $command, ['writeConcern' => new stdClass]);
+}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECT--
+OK: Got MongoDB\Driver\Exception\InvalidArgumentException
+Expected "readConcern" option to be MongoDB\Driver\ReadConcern, string given
+OK: Got MongoDB\Driver\Exception\InvalidArgumentException
+Expected "readConcern" option to be MongoDB\Driver\ReadConcern, stdClass given
+OK: Got MongoDB\Driver\Exception\InvalidArgumentException
+Expected "readPreference" option to be MongoDB\Driver\ReadPreference, string given
+OK: Got MongoDB\Driver\Exception\InvalidArgumentException
+Expected "readPreference" option to be MongoDB\Driver\ReadPreference, stdClass given
+OK: Got MongoDB\Driver\Exception\InvalidArgumentException
+Expected "session" option to be MongoDB\Driver\Session, string given
+OK: Got MongoDB\Driver\Exception\InvalidArgumentException
+Expected "session" option to be MongoDB\Driver\Session, stdClass given
+OK: Got MongoDB\Driver\Exception\InvalidArgumentException
+Expected "writeConcern" option to be MongoDB\Driver\WriteConcern, string given
+OK: Got MongoDB\Driver\Exception\InvalidArgumentException
+Expected "writeConcern" option to be MongoDB\Driver\WriteConcern, stdClass given
+===DONE===
diff --git a/mongodb-1.3.4/tests/server/server-executeQuery-001.phpt b/mongodb-1.4.2/tests/server/server-executeQuery-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/server/server-executeQuery-001.phpt
rename to mongodb-1.4.2/tests/server/server-executeQuery-001.phpt
diff --git a/mongodb-1.3.4/tests/server/server-executeQuery-002.phpt b/mongodb-1.4.2/tests/server/server-executeQuery-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/server/server-executeQuery-002.phpt
rename to mongodb-1.4.2/tests/server/server-executeQuery-002.phpt
diff --git a/mongodb-1.3.4/tests/server/server-executeQuery-003.phpt b/mongodb-1.4.2/tests/server/server-executeQuery-003.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/server/server-executeQuery-003.phpt
rename to mongodb-1.4.2/tests/server/server-executeQuery-003.phpt
diff --git a/mongodb-1.3.4/tests/server/server-executeQuery-004.phpt b/mongodb-1.4.2/tests/server/server-executeQuery-004.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/server/server-executeQuery-004.phpt
rename to mongodb-1.4.2/tests/server/server-executeQuery-004.phpt
diff --git a/mongodb-1.3.4/tests/server/server-executeQuery-005.phpt b/mongodb-1.4.2/tests/server/server-executeQuery-005.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/server/server-executeQuery-005.phpt
rename to mongodb-1.4.2/tests/server/server-executeQuery-005.phpt
diff --git a/mongodb-1.3.4/tests/server/server-executeQuery-006.phpt b/mongodb-1.4.2/tests/server/server-executeQuery-006.phpt
similarity index 90%
rename from mongodb-1.3.4/tests/server/server-executeQuery-006.phpt
rename to mongodb-1.4.2/tests/server/server-executeQuery-006.phpt
index 9889bfc5..fbf85378 100644
--- a/mongodb-1.3.4/tests/server/server-executeQuery-006.phpt
+++ b/mongodb-1.4.2/tests/server/server-executeQuery-006.phpt
@@ -1,63 +1,60 @@
--TEST--
MongoDB\Driver\Server::executeQuery() takes a read preference (find command)
--SKIPIF--
<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
<?php NEEDS('REPLICASET'); CLEANUP(REPLICASET, DATABASE_NAME, 'system.profile'); ?>
--FILE--
<?php
require_once __DIR__ . "/../utils/basic.inc";
$manager = new MongoDB\Driver\Manager(REPLICASET);
$rp = new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_SECONDARY);
$secondary = $manager->selectServer($rp);
$command = new MongoDB\Driver\Command(array('profile' => 2));
$cursor = $secondary->executeCommand(DATABASE_NAME, $command);
$result = current($cursor->toArray());
printf("Set profile level to 2 successfully: %s\n", (empty($result->ok) ? 'no' : 'yes'));
if (empty($result->ok)) {
exit("Could not set profile level\n");
}
$secondary->executeQuery(NS, new MongoDB\Driver\Query(array("x" => 1)), $rp);
$query = new MongoDB\Driver\Query(
array(
'op' => 'query',
'ns' => NS,
),
array(
'sort' => array('ts' => -1),
'limit' => 1,
)
);
$cursor = $secondary->executeQuery(DATABASE_NAME . '.system.profile', $query, $rp);
$profileEntry = current($cursor->toArray());
-var_dump($profileEntry->query);
+var_dump($profileEntry->command->find);
+var_dump($profileEntry->command->filter);
$command = new MongoDB\Driver\Command(array('profile' => 0));
$cursor = $secondary->executeCommand(DATABASE_NAME, $command);
$result = current($cursor->toArray());
printf("Set profile level to 0 successfully: %s\n", (empty($result->ok) ? 'no' : 'yes'));
?>
===DONE===
<?php exit(0); ?>
--EXPECTF--
Set profile level to 2 successfully: yes
-object(stdClass)#%d (%d) {
- ["find"]=>
- string(%d) "%s"
- ["filter"]=>
- object(stdClass)#%d (1) {
- ["x"]=>
- int(1)
- }
+string(%d) "%s"
+object(stdClass)#%d (1) {
+ ["x"]=>
+ int(1)
}
Set profile level to 0 successfully: yes
===DONE===
diff --git a/mongodb-1.3.4/tests/server/server-executeQuery-007.phpt b/mongodb-1.4.2/tests/server/server-executeQuery-007.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/server/server-executeQuery-007.phpt
rename to mongodb-1.4.2/tests/server/server-executeQuery-007.phpt
diff --git a/mongodb-1.3.4/tests/server/server-executeQuery-008.phpt b/mongodb-1.4.2/tests/server/server-executeQuery-008.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/server/server-executeQuery-008.phpt
rename to mongodb-1.4.2/tests/server/server-executeQuery-008.phpt
diff --git a/mongodb-1.4.2/tests/server/server-executeQuery-009.phpt b/mongodb-1.4.2/tests/server/server-executeQuery-009.phpt
new file mode 100644
index 00000000..b6966552
--- /dev/null
+++ b/mongodb-1.4.2/tests/server/server-executeQuery-009.phpt
@@ -0,0 +1,48 @@
+--TEST--
+MongoDB\Driver\Server::executeQuery() takes a read preference in options array
+--SKIPIF--
+<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS('REPLICASET'); CLEANUP(REPLICASET); ?>
+--FILE--
+<?php
+require_once __DIR__ . "/../utils/basic.inc";
+
+$manager = new MongoDB\Driver\Manager(REPLICASET);
+
+// load fixtures for test
+$bulk = new MongoDB\Driver\BulkWrite();
+$bulk->insert(['_id' => 1, 'x' => 2, 'y' => 3]);
+$manager->executeBulkWrite(NS, $bulk);
+
+$primaryRp = new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_PRIMARY);
+$secondaryRp = new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_SECONDARY);
+
+$primary = $manager->selectServer($primaryRp);
+$secondary = $manager->selectServer($secondaryRp);
+
+echo "Testing primary:\n";
+$query = new MongoDB\Driver\Query(['x' => 3], ['projection' => ['y' => 1]]);
+$cursor = $manager->executeQuery(NS, $query, ['readPreference' => $primaryRp]);
+
+echo "is_primary: ", $cursor->getServer()->isPrimary() ? 'true' : 'false', "\n";
+echo "is_secondary: ", $cursor->getServer()->isSecondary() ? 'true' : 'false', "\n\n";
+
+echo "Testing secondary:\n";
+$query = new MongoDB\Driver\Query(['x' => 3], ['projection' => ['y' => 1]]);
+$cursor = $manager->executeQuery(NS, $query, ['readPreference' => $secondaryRp]);
+
+echo "is_primary: ", $cursor->getServer()->isPrimary() ? 'true' : 'false', "\n";
+echo "is_secondary: ", $cursor->getServer()->isSecondary() ? 'true' : 'false', "\n\n";
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECTF--
+Testing primary:
+is_primary: true
+is_secondary: false
+
+Testing secondary:
+is_primary: false
+is_secondary: true
+
+===DONE===
diff --git a/mongodb-1.4.2/tests/server/server-executeQuery-010.phpt b/mongodb-1.4.2/tests/server/server-executeQuery-010.phpt
new file mode 100644
index 00000000..43891bcc
--- /dev/null
+++ b/mongodb-1.4.2/tests/server/server-executeQuery-010.phpt
@@ -0,0 +1,48 @@
+--TEST--
+MongoDB\Driver\Server::executeQuery() takes a read preference as legacy option
+--SKIPIF--
+<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS('REPLICASET'); CLEANUP(REPLICASET); ?>
+--FILE--
+<?php
+require_once __DIR__ . "/../utils/basic.inc";
+
+$manager = new MongoDB\Driver\Manager(REPLICASET);
+
+// load fixtures for test
+$bulk = new MongoDB\Driver\BulkWrite();
+$bulk->insert(['_id' => 1, 'x' => 2, 'y' => 3]);
+$manager->executeBulkWrite(NS, $bulk);
+
+$primaryRp = new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_PRIMARY);
+$secondaryRp = new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_SECONDARY);
+
+$primary = $manager->selectServer($primaryRp);
+$secondary = $manager->selectServer($secondaryRp);
+
+echo "Testing primary:\n";
+$query = new MongoDB\Driver\Query(['x' => 3], ['projection' => ['y' => 1]]);
+$cursor = $manager->executeQuery(NS, $query, $primaryRp);
+
+echo "is_primary: ", $cursor->getServer()->isPrimary() ? 'true' : 'false', "\n";
+echo "is_secondary: ", $cursor->getServer()->isSecondary() ? 'true' : 'false', "\n\n";
+
+echo "Testing secondary:\n";
+$query = new MongoDB\Driver\Query(['x' => 3], ['projection' => ['y' => 1]]);
+$cursor = $manager->executeQuery(NS, $query, $secondaryRp);
+
+echo "is_primary: ", $cursor->getServer()->isPrimary() ? 'true' : 'false', "\n";
+echo "is_secondary: ", $cursor->getServer()->isSecondary() ? 'true' : 'false', "\n\n";
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECTF--
+Testing primary:
+is_primary: true
+is_secondary: false
+
+Testing secondary:
+is_primary: false
+is_secondary: true
+
+===DONE===
diff --git a/mongodb-1.4.2/tests/server/server-executeQuery_error-001.phpt b/mongodb-1.4.2/tests/server/server-executeQuery_error-001.phpt
new file mode 100644
index 00000000..a7723b34
--- /dev/null
+++ b/mongodb-1.4.2/tests/server/server-executeQuery_error-001.phpt
@@ -0,0 +1,43 @@
+--TEST--
+MongoDB\Driver\Server::executeQuery() with invalid options
+--SKIPIF--
+<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS('REPLICASET'); CLEANUP(REPLICASET); ?>
+--FILE--
+<?php
+require_once __DIR__ . "/../utils/basic.inc";
+
+$manager = new MongoDB\Driver\Manager(REPLICASET);
+$server = $manager->selectServer(new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_PRIMARY));
+
+$query = new MongoDB\Driver\Query(['x' => 3], ['projection' => ['y' => 1]]);
+
+echo throws(function() use ($server, $query) {
+ $server->executeQuery(NS, $query, ['readPreference' => 'foo']);
+}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
+
+echo throws(function() use ($server, $query) {
+ $server->executeQuery(NS, $query, ['readPreference' => new stdClass]);
+}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
+
+echo throws(function() use ($server, $query) {
+ $server->executeQuery(NS, $query, ['session' => 'foo']);
+}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
+
+echo throws(function() use ($server, $query) {
+ $server->executeQuery(NS, $query, ['session' => new stdClass]);
+}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECT--
+OK: Got MongoDB\Driver\Exception\InvalidArgumentException
+Expected "readPreference" option to be MongoDB\Driver\ReadPreference, string given
+OK: Got MongoDB\Driver\Exception\InvalidArgumentException
+Expected "readPreference" option to be MongoDB\Driver\ReadPreference, stdClass given
+OK: Got MongoDB\Driver\Exception\InvalidArgumentException
+Expected "session" option to be MongoDB\Driver\Session, string given
+OK: Got MongoDB\Driver\Exception\InvalidArgumentException
+Expected "session" option to be MongoDB\Driver\Session, stdClass given
+===DONE===
diff --git a/mongodb-1.4.2/tests/server/server-executeReadCommand-001.phpt b/mongodb-1.4.2/tests/server/server-executeReadCommand-001.phpt
new file mode 100644
index 00000000..3e73d760
--- /dev/null
+++ b/mongodb-1.4.2/tests/server/server-executeReadCommand-001.phpt
@@ -0,0 +1,43 @@
+--TEST--
+MongoDB\Driver\Server::executeReadCommand()
+--SKIPIF--
+<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS('STANDALONE'); CLEANUP(STANDALONE); NEEDS_ATLEAST_MONGODB_VERSION(STANDALONE, "3.6"); NEEDS_STORAGE_ENGINE(STANDALONE, "wiredTiger"); ?>
+--FILE--
+<?php
+require_once __DIR__ . "/../utils/basic.inc";
+require_once __DIR__ . "/../utils/observer.php";
+
+$manager = new MongoDB\Driver\Manager(STANDALONE);
+$server = $manager->selectServer(new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_SECONDARY));
+
+(new CommandObserver)->observe(
+ function() use ($server) {
+ $command = new MongoDB\Driver\Command( [
+ 'aggregate' => NS,
+ 'pipeline' => [],
+ 'cursor' => new stdClass(),
+ ] );
+ $server->executeReadCommand(
+ DATABASE_NAME,
+ $command,
+ [
+ 'readPreference' => new \MongoDB\Driver\ReadPreference(\MongoDB\Driver\ReadPreference::RP_SECONDARY),
+ 'readConcern' => new \MongoDB\Driver\ReadConcern(\MongoDB\Driver\ReadConcern::MAJORITY),
+ ]
+ );
+ },
+ function(stdClass $command) {
+ echo "Read Preference: ", $command->{'$readPreference'}->mode, "\n";
+ echo "Read Concern: ", $command->readConcern->level, "\n";
+ }
+);
+
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECTF--
+Read Preference: secondary
+Read Concern: majority
+===DONE===
diff --git a/mongodb-1.4.2/tests/server/server-executeReadCommand_error-001.phpt b/mongodb-1.4.2/tests/server/server-executeReadCommand_error-001.phpt
new file mode 100644
index 00000000..ece8318f
--- /dev/null
+++ b/mongodb-1.4.2/tests/server/server-executeReadCommand_error-001.phpt
@@ -0,0 +1,55 @@
+--TEST--
+MongoDB\Driver\Server::executeReadCommand() with invalid options
+--SKIPIF--
+<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS('STANDALONE'); CLEANUP(STANDALONE); ?>
+--FILE--
+<?php
+require_once __DIR__ . "/../utils/basic.inc";
+
+$manager = new MongoDB\Driver\Manager(STANDALONE);
+$server = $manager->selectServer(new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_SECONDARY));
+
+$command = new MongoDB\Driver\Command(['ping' => 1]);
+
+echo throws(function() use ($server, $command) {
+ $server->executeReadCommand(DATABASE_NAME, $command, ['readConcern' => 'foo']);
+}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
+
+echo throws(function() use ($server, $command) {
+ $server->executeReadCommand(DATABASE_NAME, $command, ['readConcern' => new stdClass]);
+}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
+
+echo throws(function() use ($server, $command) {
+ $server->executeReadCommand(DATABASE_NAME, $command, ['readPreference' => 'foo']);
+}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
+
+echo throws(function() use ($server, $command) {
+ $server->executeReadCommand(DATABASE_NAME, $command, ['readPreference' => new stdClass]);
+}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
+
+echo throws(function() use ($server, $command) {
+ $server->executeReadCommand(DATABASE_NAME, $command, ['session' => 'foo']);
+}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
+
+echo throws(function() use ($server, $command) {
+ $server->executeReadCommand(DATABASE_NAME, $command, ['session' => new stdClass]);
+}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECT--
+OK: Got MongoDB\Driver\Exception\InvalidArgumentException
+Expected "readConcern" option to be MongoDB\Driver\ReadConcern, string given
+OK: Got MongoDB\Driver\Exception\InvalidArgumentException
+Expected "readConcern" option to be MongoDB\Driver\ReadConcern, stdClass given
+OK: Got MongoDB\Driver\Exception\InvalidArgumentException
+Expected "readPreference" option to be MongoDB\Driver\ReadPreference, string given
+OK: Got MongoDB\Driver\Exception\InvalidArgumentException
+Expected "readPreference" option to be MongoDB\Driver\ReadPreference, stdClass given
+OK: Got MongoDB\Driver\Exception\InvalidArgumentException
+Expected "session" option to be MongoDB\Driver\Session, string given
+OK: Got MongoDB\Driver\Exception\InvalidArgumentException
+Expected "session" option to be MongoDB\Driver\Session, stdClass given
+===DONE===
diff --git a/mongodb-1.4.2/tests/server/server-executeReadWriteCommand-001.phpt b/mongodb-1.4.2/tests/server/server-executeReadWriteCommand-001.phpt
new file mode 100644
index 00000000..d2ba16d6
--- /dev/null
+++ b/mongodb-1.4.2/tests/server/server-executeReadWriteCommand-001.phpt
@@ -0,0 +1,41 @@
+--TEST--
+MongoDB\Driver\Server::executeReadWriteCommand()
+--SKIPIF--
+<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS('STANDALONE'); CLEANUP(STANDALONE); ?>
+--FILE--
+<?php
+require_once __DIR__ . "/../utils/basic.inc";
+require_once __DIR__ . "/../utils/observer.php";
+
+$manager = new MongoDB\Driver\Manager(STANDALONE);
+$server = $manager->selectServer(new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_SECONDARY));
+
+(new CommandObserver)->observe(
+ function() use ($server) {
+ $command = new MongoDB\Driver\Command( [
+ 'findAndModify' => NS,
+ 'update' => [ '$set' => [ 'foo' => 'bar' ] ],
+ ] );
+ $server->executeReadWriteCommand(
+ DATABASE_NAME,
+ $command,
+ [
+ 'readConcern' => new \MongoDB\Driver\ReadConcern(\MongoDB\Driver\ReadConcern::LOCAL),
+ 'writeConcern' => new \MongoDB\Driver\WriteConcern(\MongoDB\Driver\WriteConcern::MAJORITY),
+ ]
+ );
+ },
+ function(stdClass $command) {
+ echo "Read Concern: ", $command->readConcern->level, "\n";
+ echo "Write Concern: ", $command->writeConcern->w, "\n";
+ }
+);
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECTF--
+Read Concern: local
+Write Concern: majority
+===DONE===
diff --git a/mongodb-1.4.2/tests/server/server-executeReadWriteCommand_error-001.phpt b/mongodb-1.4.2/tests/server/server-executeReadWriteCommand_error-001.phpt
new file mode 100644
index 00000000..7735cec6
--- /dev/null
+++ b/mongodb-1.4.2/tests/server/server-executeReadWriteCommand_error-001.phpt
@@ -0,0 +1,55 @@
+--TEST--
+MongoDB\Driver\Server::executeReadWriteCommand() with invalid options
+--SKIPIF--
+<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS('STANDALONE'); CLEANUP(STANDALONE); ?>
+--FILE--
+<?php
+require_once __DIR__ . "/../utils/basic.inc";
+
+$manager = new MongoDB\Driver\Manager(STANDALONE);
+$server = $manager->selectServer(new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_SECONDARY));
+
+$command = new MongoDB\Driver\Command(['ping' => 1]);
+
+echo throws(function() use ($server, $command) {
+ $server->executeReadWriteCommand(DATABASE_NAME, $command, ['readConcern' => 'foo']);
+}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
+
+echo throws(function() use ($server, $command) {
+ $server->executeReadWriteCommand(DATABASE_NAME, $command, ['readConcern' => new stdClass]);
+}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
+
+echo throws(function() use ($server, $command) {
+ $server->executeReadWriteCommand(DATABASE_NAME, $command, ['session' => 'foo']);
+}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
+
+echo throws(function() use ($server, $command) {
+ $server->executeReadWriteCommand(DATABASE_NAME, $command, ['session' => new stdClass]);
+}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
+
+echo throws(function() use ($server, $command) {
+ $server->executeReadWriteCommand(DATABASE_NAME, $command, ['writeConcern' => 'foo']);
+}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
+
+echo throws(function() use ($server, $command) {
+ $server->executeReadWriteCommand(DATABASE_NAME, $command, ['writeConcern' => new stdClass]);
+}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECT--
+OK: Got MongoDB\Driver\Exception\InvalidArgumentException
+Expected "readConcern" option to be MongoDB\Driver\ReadConcern, string given
+OK: Got MongoDB\Driver\Exception\InvalidArgumentException
+Expected "readConcern" option to be MongoDB\Driver\ReadConcern, stdClass given
+OK: Got MongoDB\Driver\Exception\InvalidArgumentException
+Expected "session" option to be MongoDB\Driver\Session, string given
+OK: Got MongoDB\Driver\Exception\InvalidArgumentException
+Expected "session" option to be MongoDB\Driver\Session, stdClass given
+OK: Got MongoDB\Driver\Exception\InvalidArgumentException
+Expected "writeConcern" option to be MongoDB\Driver\WriteConcern, string given
+OK: Got MongoDB\Driver\Exception\InvalidArgumentException
+Expected "writeConcern" option to be MongoDB\Driver\WriteConcern, stdClass given
+===DONE===
diff --git a/mongodb-1.4.2/tests/server/server-executeWriteCommand-001.phpt b/mongodb-1.4.2/tests/server/server-executeWriteCommand-001.phpt
new file mode 100644
index 00000000..f003a268
--- /dev/null
+++ b/mongodb-1.4.2/tests/server/server-executeWriteCommand-001.phpt
@@ -0,0 +1,41 @@
+--TEST--
+MongoDB\Driver\Server::executeWriteCommand()
+--SKIPIF--
+<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS('STANDALONE'); CLEANUP(STANDALONE); NEEDS_ATLEAST_MONGODB_VERSION(STANDALONE, "3.6"); ?>
+--FILE--
+<?php
+require_once __DIR__ . "/../utils/basic.inc";
+require_once __DIR__ . "/../utils/observer.php";
+
+$manager = new MongoDB\Driver\Manager(STANDALONE);
+$server = $manager->selectServer(new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_SECONDARY));
+
+$bw = new MongoDB\Driver\BulkWrite();
+$bw->insert(['a' => 1]);
+$manager->executeBulkWrite(NS, $bw);
+
+(new CommandObserver)->observe(
+ function() use ($server) {
+ $command = new MongoDB\Driver\Command([
+ 'drop' => COLLECTION_NAME,
+ ]);
+ $server->executeWriteCommand(
+ DATABASE_NAME,
+ $command,
+ [
+ 'writeConcern' => new \MongoDB\Driver\WriteConcern(\MongoDB\Driver\WriteConcern::MAJORITY),
+ ]
+ );
+ },
+ function(stdClass $command) {
+ echo "Write Concern: ", $command->writeConcern->w, "\n";
+ }
+);
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECTF--
+Write Concern: majority
+===DONE===
diff --git a/mongodb-1.4.2/tests/server/server-executeWriteCommand_error-001.phpt b/mongodb-1.4.2/tests/server/server-executeWriteCommand_error-001.phpt
new file mode 100644
index 00000000..7ca75926
--- /dev/null
+++ b/mongodb-1.4.2/tests/server/server-executeWriteCommand_error-001.phpt
@@ -0,0 +1,44 @@
+--TEST--
+MongoDB\Driver\Server::executeWriteCommand() with invalid options
+--SKIPIF--
+<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS('STANDALONE'); CLEANUP(STANDALONE); ?>
+--FILE--
+<?php
+require_once __DIR__ . "/../utils/basic.inc";
+require_once __DIR__ . "/../utils/observer.php";
+
+$manager = new MongoDB\Driver\Manager(STANDALONE);
+$server = $manager->selectServer(new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_SECONDARY));
+
+$command = new MongoDB\Driver\Command([]);
+
+echo throws(function() use ($server, $command) {
+ $server->executeWriteCommand(DATABASE_NAME, $command, ['session' => 'foo']);
+}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
+
+echo throws(function() use ($server, $command) {
+ $server->executeWriteCommand(DATABASE_NAME, $command, ['session' => new stdClass]);
+}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
+
+echo throws(function() use ($server, $command) {
+ $server->executeWriteCommand(DATABASE_NAME, $command, ['writeConcern' => 'foo']);
+}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
+
+echo throws(function() use ($server, $command) {
+ $server->executeWriteCommand(DATABASE_NAME, $command, ['writeConcern' => new stdClass]);
+}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECT--
+OK: Got MongoDB\Driver\Exception\InvalidArgumentException
+Expected "session" option to be MongoDB\Driver\Session, string given
+OK: Got MongoDB\Driver\Exception\InvalidArgumentException
+Expected "session" option to be MongoDB\Driver\Session, stdClass given
+OK: Got MongoDB\Driver\Exception\InvalidArgumentException
+Expected "writeConcern" option to be MongoDB\Driver\WriteConcern, string given
+OK: Got MongoDB\Driver\Exception\InvalidArgumentException
+Expected "writeConcern" option to be MongoDB\Driver\WriteConcern, stdClass given
+===DONE===
diff --git a/mongodb-1.3.4/tests/server/server-getInfo-001.phpt b/mongodb-1.4.2/tests/server/server-getInfo-001.phpt
similarity index 57%
rename from mongodb-1.3.4/tests/server/server-getInfo-001.phpt
rename to mongodb-1.4.2/tests/server/server-getInfo-001.phpt
index e8c5f335..9d1f9b10 100644
--- a/mongodb-1.3.4/tests/server/server-getInfo-001.phpt
+++ b/mongodb-1.4.2/tests/server/server-getInfo-001.phpt
@@ -1,40 +1,22 @@
--TEST--
MongoDB\Driver\Server::getInfo()
--SKIPIF--
<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
<?php NEEDS('STANDALONE'); ?>
--FILE--
<?php
require_once __DIR__ . "/../utils/basic.inc";
$manager = new MongoDB\Driver\Manager(STANDALONE);
try{
var_dump($manager->selectServer(new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_PRIMARY))->getInfo());
} catch (Exception $e) {}
?>
===DONE===
<?php exit(0); ?>
--EXPECTF--
array(%d) {
- ["ismaster"]=>
- bool(true)
- ["maxBsonObjectSize"]=>
- int(16777216)
- ["maxMessageSizeBytes"]=>
- int(48000000)
- ["maxWriteBatchSize"]=>
- int(1000)
- ["localTime"]=>
- object(%s\UTCDateTime)#%d (%d) {
- ["milliseconds"]=>
- %r(int\(\d+\)|string\(\d+\) "\d+")%r
- }
- ["maxWireVersion"]=>
- int(%d)
- ["minWireVersion"]=>
- int(0)
%a
- float(1)
}
===DONE===
diff --git a/mongodb-1.3.4/tests/server/server-getTags-001.phpt b/mongodb-1.4.2/tests/server/server-getTags-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/server/server-getTags-001.phpt
rename to mongodb-1.4.2/tests/server/server-getTags-001.phpt
diff --git a/mongodb-1.3.4/tests/server/server-getTags-002.phpt b/mongodb-1.4.2/tests/server/server-getTags-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/server/server-getTags-002.phpt
rename to mongodb-1.4.2/tests/server/server-getTags-002.phpt
diff --git a/mongodb-1.3.4/tests/server/server_error-001.phpt b/mongodb-1.4.2/tests/server/server_error-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/server/server_error-001.phpt
rename to mongodb-1.4.2/tests/server/server_error-001.phpt
diff --git a/mongodb-1.4.2/tests/session/session-001.phpt b/mongodb-1.4.2/tests/session/session-001.phpt
new file mode 100644
index 00000000..7516e7e3
--- /dev/null
+++ b/mongodb-1.4.2/tests/session/session-001.phpt
@@ -0,0 +1,28 @@
+--TEST--
+MongoDB\Driver\Session spec test: Pool is LIFO
+--SKIPIF--
+<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS_CRYPTO(); ?>
+<?php NEEDS('STANDALONE'); NEEDS_ATLEAST_MONGODB_VERSION(STANDALONE, "3.6"); ?>
+--FILE--
+<?php
+require_once __DIR__ . "/../utils/basic.inc";
+
+$manager = new MongoDB\Driver\Manager(STANDALONE);
+
+$firstSession = $manager->startSession();
+$firstSessionId = $firstSession->getLogicalSessionId();
+
+unset($firstSession);
+
+$secondSession = $manager->startSession();
+$secondSessionId = $secondSession->getLogicalSessionId();
+
+var_dump($firstSessionId == $secondSessionId);
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECT--
+bool(true)
+===DONE===
diff --git a/mongodb-1.4.2/tests/session/session-002.phpt b/mongodb-1.4.2/tests/session/session-002.phpt
new file mode 100644
index 00000000..9521e7da
--- /dev/null
+++ b/mongodb-1.4.2/tests/session/session-002.phpt
@@ -0,0 +1,171 @@
+--TEST--
+MongoDB\Driver\Session spec test: $clusterTime in commands
+--SKIPIF--
+<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS_CRYPTO(); ?>
+<?php NEEDS('REPLICASET'); CLEANUP(REPLICASET); ?>
+--FILE--
+<?php
+require_once __DIR__ . "/../utils/basic.inc";
+
+class Test implements MongoDB\Driver\Monitoring\CommandSubscriber
+{
+ private $lastSeenClusterTime;
+
+ public function aggregate()
+ {
+ $this->lastSeenClusterTime = null;
+
+ MongoDB\Driver\Monitoring\addSubscriber($this);
+
+ $manager = new MongoDB\Driver\Manager(REPLICASET);
+ $session = $manager->startSession();
+
+ $command = new MongoDB\Driver\Command([
+ 'aggregate' => COLLECTION_NAME,
+ 'pipeline' => [],
+ 'cursor' => new stdClass(),
+ ]);
+ $manager->executeReadWriteCommand(DATABASE_NAME, $command, ['session' => $session]);
+ $manager->executeReadWriteCommand(DATABASE_NAME, $command, ['session' => $session]);
+
+ printf("Session reports last seen \$clusterTime: %s\n", ($session->getClusterTime() == $this->lastSeenClusterTime) ? 'yes' : 'no');
+
+ MongoDB\Driver\Monitoring\removeSubscriber($this);
+ }
+
+ public function find()
+ {
+ $this->lastSeenClusterTime = null;
+
+ MongoDB\Driver\Monitoring\addSubscriber($this);
+
+ $manager = new MongoDB\Driver\Manager(REPLICASET);
+ $session = $manager->startSession();
+
+ $query = new MongoDB\Driver\Query([]);
+ $manager->executeQuery(NS, $query, ['session' => $session]);
+ $manager->executeQuery(NS, $query, ['session' => $session]);
+
+ printf("Session reports last seen \$clusterTime: %s\n", ($session->getClusterTime() == $this->lastSeenClusterTime) ? 'yes' : 'no');
+
+ MongoDB\Driver\Monitoring\removeSubscriber($this);
+ }
+
+ public function insert()
+ {
+ $this->lastSeenClusterTime = null;
+
+ MongoDB\Driver\Monitoring\addSubscriber($this);
+
+ $manager = new MongoDB\Driver\Manager(REPLICASET);
+ $session = $manager->startSession();
+
+ $bulk = new MongoDB\Driver\BulkWrite();
+ $bulk->insert(['x' => 1]);
+ $manager->executeBulkWrite(NS, $bulk, ['session' => $session]);
+
+ $bulk = new MongoDB\Driver\BulkWrite();
+ $bulk->insert(['x' => 2]);
+ $manager->executeBulkWrite(NS, $bulk, ['session' => $session]);
+
+ printf("Session reports last seen \$clusterTime: %s\n", ($session->getClusterTime() == $this->lastSeenClusterTime) ? 'yes' : 'no');
+
+ MongoDB\Driver\Monitoring\removeSubscriber($this);
+ }
+
+ public function ping()
+ {
+ $this->lastSeenClusterTime = null;
+
+ MongoDB\Driver\Monitoring\addSubscriber($this);
+
+ $manager = new MongoDB\Driver\Manager(REPLICASET);
+ $session = $manager->startSession();
+
+ $command = new MongoDB\Driver\Command(['ping' => 1]);
+ $manager->executeCommand(DATABASE_NAME, $command, ['session' => $session]);
+ $manager->executeCommand(DATABASE_NAME, $command, ['session' => $session]);
+
+ printf("Session reports last seen \$clusterTime: %s\n", ($session->getClusterTime() == $this->lastSeenClusterTime) ? 'yes' : 'no');
+
+ MongoDB\Driver\Monitoring\removeSubscriber($this);
+ }
+
+ public function commandStarted(MongoDB\Driver\Monitoring\CommandStartedEvent $event)
+ {
+ $command = $event->getCommand();
+ $hasClusterTime = isset($command->{'$clusterTime'});
+
+ printf("%s command includes \$clusterTime: %s\n", $event->getCommandName(), $hasClusterTime ? 'yes' : 'no');
+
+ if ($hasClusterTime && $this->lastSeenClusterTime !== null) {
+ printf("%s command uses last seen \$clusterTime: %s\n", $event->getCommandName(), ($command->{'$clusterTime'} == $this->lastSeenClusterTime) ? 'yes' : 'no');
+ }
+ }
+
+ public function commandSucceeded(MongoDB\Driver\Monitoring\CommandSucceededEvent $event)
+ {
+ $reply = $event->getReply();
+ $hasClusterTime = isset($reply->{'$clusterTime'});
+
+ printf("%s command reply includes \$clusterTime: %s\n", $event->getCommandName(), $hasClusterTime ? 'yes' : 'no');
+
+ if ($hasClusterTime) {
+ $this->lastSeenClusterTime = $reply->{'$clusterTime'};
+ }
+ }
+
+ public function commandFailed(MongoDB\Driver\Monitoring\CommandFailedEvent $event)
+ {
+ }
+}
+
+echo "\nTesting aggregate command\n";
+(new Test)->aggregate();
+
+echo "\nTesting find command\n";
+(new Test)->find();
+
+echo "\nTesting insert command\n";
+(new Test)->insert();
+
+echo "\nTesting ping command\n";
+(new Test)->ping();
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECT--
+Testing aggregate command
+aggregate command includes $clusterTime: yes
+aggregate command reply includes $clusterTime: yes
+aggregate command includes $clusterTime: yes
+aggregate command uses last seen $clusterTime: yes
+aggregate command reply includes $clusterTime: yes
+Session reports last seen $clusterTime: yes
+
+Testing find command
+find command includes $clusterTime: yes
+find command reply includes $clusterTime: yes
+find command includes $clusterTime: yes
+find command uses last seen $clusterTime: yes
+find command reply includes $clusterTime: yes
+Session reports last seen $clusterTime: yes
+
+Testing insert command
+insert command includes $clusterTime: yes
+insert command reply includes $clusterTime: yes
+insert command includes $clusterTime: yes
+insert command uses last seen $clusterTime: yes
+insert command reply includes $clusterTime: yes
+Session reports last seen $clusterTime: yes
+
+Testing ping command
+ping command includes $clusterTime: yes
+ping command reply includes $clusterTime: yes
+ping command includes $clusterTime: yes
+ping command uses last seen $clusterTime: yes
+ping command reply includes $clusterTime: yes
+Session reports last seen $clusterTime: yes
+===DONE===
diff --git a/mongodb-1.4.2/tests/session/session-003.phpt b/mongodb-1.4.2/tests/session/session-003.phpt
new file mode 100644
index 00000000..7e638035
--- /dev/null
+++ b/mongodb-1.4.2/tests/session/session-003.phpt
@@ -0,0 +1,52 @@
+--TEST--
+MongoDB\Driver\Session spec test: session cannot be used for different clients
+--SKIPIF--
+<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS_CRYPTO(); ?>
+<?php NEEDS('STANDALONE'); NEEDS_ATLEAST_MONGODB_VERSION(STANDALONE, "3.6"); CLEANUP(STANDALONE); ?>
+--FILE--
+<?php
+require_once __DIR__ . "/../utils/basic.inc";
+
+// Vary heartbeatFrequencyMS to ensure each Manager gets a different client
+$manager = new MongoDB\Driver\Manager(STANDALONE, ['heartbeatFrequencyMS' => 60000]);
+$otherManager = new MongoDB\Driver\Manager(STANDALONE, ['heartbeatFrequencyMS' => 90000]);
+
+// Create a session with the second Manager (associated with different client)
+$session = $otherManager->startSession();
+
+echo "\nTesting executeBulkWrite()\n";
+echo throws(function() use ($manager, $session) {
+ $bulk = new MongoDB\Driver\BulkWrite();
+ $bulk->insert(['x' => 1]);
+ $manager->executeBulkWrite(NS, $bulk, ['session' => $session]);
+}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
+
+echo "\nTesting executeCommand()\n";
+echo throws(function() use ($manager, $session) {
+ $command = new MongoDB\Driver\Command(['ping' => 1]);
+ $manager->executeCommand(DATABASE_NAME, $command, ['session' => $session]);
+}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
+
+echo "\nTesting executeQuery()\n";
+echo throws(function() use ($manager, $session) {
+ $query = new MongoDB\Driver\Query([]);
+ $manager->executeQuery(NS, $query, ['session' => $session]);
+}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECT--
+Testing executeBulkWrite()
+OK: Got MongoDB\Driver\Exception\InvalidArgumentException
+Cannot use Session started from a different Manager
+
+Testing executeCommand()
+OK: Got MongoDB\Driver\Exception\InvalidArgumentException
+Cannot use Session started from a different Manager
+
+Testing executeQuery()
+OK: Got MongoDB\Driver\Exception\InvalidArgumentException
+Cannot use Session started from a different Manager
+===DONE===
diff --git a/mongodb-1.4.2/tests/session/session-advanceClusterTime-001.phpt b/mongodb-1.4.2/tests/session/session-advanceClusterTime-001.phpt
new file mode 100644
index 00000000..2cc4f098
--- /dev/null
+++ b/mongodb-1.4.2/tests/session/session-advanceClusterTime-001.phpt
@@ -0,0 +1,51 @@
+--TEST--
+MongoDB\Driver\Session::advanceClusterTime()
+--SKIPIF--
+<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS_CRYPTO(); ?>
+<?php NEEDS('REPLICASET'); ?>
+--FILE--
+<?php
+require_once __DIR__ . "/../utils/basic.inc";
+
+$manager = new MongoDB\Driver\Manager(REPLICASET);
+$sessionA = $manager->startSession();
+$sessionB = $manager->startSession();
+
+$command = new MongoDB\Driver\Command(['ping' => 1]);
+$manager->executeCommand(DATABASE_NAME, $command, ['session' => $sessionA]);
+
+echo "Initial cluster time of session B:\n";
+var_dump($sessionB->getClusterTime());
+
+$sessionB->advanceClusterTime($sessionA->getClusterTime());
+
+echo "\nCluster time after advancing session B:\n";
+var_dump($sessionB->getClusterTime());
+
+echo "\nSessions A and B have equivalent cluster times:\n";
+var_dump($sessionA->getClusterTime() == $sessionB->getClusterTime());
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECTF--
+Initial cluster time of session B:
+NULL
+
+Cluster time after advancing session B:
+object(stdClass)#%d (%d) {
+ ["clusterTime"]=>
+ object(MongoDB\BSON\Timestamp)#%d (%d) {
+ ["increment"]=>
+ string(%d) "%d"
+ ["timestamp"]=>
+ string(%d) "%d"
+ }
+ ["signature"]=>
+ %a
+}
+
+Sessions A and B have equivalent cluster times:
+bool(true)
+===DONE===
diff --git a/mongodb-1.4.2/tests/session/session-advanceOperationTime-001.phpt b/mongodb-1.4.2/tests/session/session-advanceOperationTime-001.phpt
new file mode 100644
index 00000000..494a12f9
--- /dev/null
+++ b/mongodb-1.4.2/tests/session/session-advanceOperationTime-001.phpt
@@ -0,0 +1,46 @@
+--TEST--
+MongoDB\Driver\Session::advanceOperationTime()
+--SKIPIF--
+<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS_CRYPTO(); ?>
+<?php NEEDS('REPLICASET'); ?>
+--FILE--
+<?php
+require_once __DIR__ . "/../utils/basic.inc";
+
+$manager = new MongoDB\Driver\Manager(REPLICASET);
+$sessionA = $manager->startSession();
+$sessionB = $manager->startSession();
+
+$command = new MongoDB\Driver\Command(['ping' => 1]);
+$manager->executeCommand(DATABASE_NAME, $command, ['session' => $sessionA]);
+
+echo "Initial operation time of session B:\n";
+var_dump($sessionB->getOperationTime());
+
+$sessionB->advanceOperationTime($sessionA->getOperationTime());
+
+echo "\nOperation time after advancing session B:\n";
+var_dump($sessionB->getOperationTime());
+
+echo "\nSessions A and B have equivalent operation times:\n";
+var_dump($sessionA->getOperationTime() == $sessionB->getOperationTime());
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECTF--
+Initial operation time of session B:
+NULL
+
+Operation time after advancing session B:
+object(MongoDB\BSON\Timestamp)#%d (%d) {
+ ["increment"]=>
+ string(%d) "%d"
+ ["timestamp"]=>
+ string(%d) "%d"
+}
+
+Sessions A and B have equivalent operation times:
+bool(true)
+===DONE===
diff --git a/mongodb-1.4.2/tests/session/session-advanceOperationTime-002.phpt b/mongodb-1.4.2/tests/session/session-advanceOperationTime-002.phpt
new file mode 100644
index 00000000..c19e5b8b
--- /dev/null
+++ b/mongodb-1.4.2/tests/session/session-advanceOperationTime-002.phpt
@@ -0,0 +1,36 @@
+--TEST--
+MongoDB\Driver\Session::advanceOperationTime() with Timestamp
+--SKIPIF--
+<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS_CRYPTO(); ?>
+<?php NEEDS('REPLICASET'); ?>
+--FILE--
+<?php
+require_once __DIR__ . "/../utils/basic.inc";
+
+$manager = new MongoDB\Driver\Manager(REPLICASET);
+$session = $manager->startSession();
+
+echo "Initial operation time of session:\n";
+var_dump($session->getOperationTime());
+
+$session->advanceOperationTime(new MongoDB\BSON\Timestamp(5678, 1234));
+
+echo "\nOperation time after advancing session:\n";
+var_dump($session->getOperationTime());
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECTF--
+Initial operation time of session:
+NULL
+
+Operation time after advancing session:
+object(MongoDB\BSON\Timestamp)#%d (%d) {
+ ["increment"]=>
+ string(4) "5678"
+ ["timestamp"]=>
+ string(4) "1234"
+}
+===DONE===
diff --git a/mongodb-1.4.2/tests/session/session-advanceOperationTime-003.phpt b/mongodb-1.4.2/tests/session/session-advanceOperationTime-003.phpt
new file mode 100644
index 00000000..de8a0225
--- /dev/null
+++ b/mongodb-1.4.2/tests/session/session-advanceOperationTime-003.phpt
@@ -0,0 +1,54 @@
+--TEST--
+MongoDB\Driver\Session::advanceOperationTime() with TimestampInterface
+--SKIPIF--
+<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS_CRYPTO(); ?>
+<?php NEEDS('REPLICASET'); ?>
+--FILE--
+<?php
+require_once __DIR__ . "/../utils/basic.inc";
+
+class MyTimestamp implements MongoDB\BSON\TimestampInterface
+{
+ public function getIncrement()
+ {
+ return 5678;
+ }
+
+ public function getTimestamp()
+ {
+ return 1234;
+ }
+
+ public function __toString()
+ {
+ return sprintf('[%d:%d]', $this->getIncrement(), $this->getTimestamp());
+ }
+}
+
+$manager = new MongoDB\Driver\Manager(REPLICASET);
+$session = $manager->startSession();
+
+echo "Initial operation time of session:\n";
+var_dump($session->getOperationTime());
+
+$session->advanceOperationTime(new MyTimestamp);
+
+echo "\nOperation time after advancing session:\n";
+var_dump($session->getOperationTime());
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECTF--
+Initial operation time of session:
+NULL
+
+Operation time after advancing session:
+object(MongoDB\BSON\Timestamp)#%d (%d) {
+ ["increment"]=>
+ string(4) "5678"
+ ["timestamp"]=>
+ string(4) "1234"
+}
+===DONE===
diff --git a/mongodb-1.4.2/tests/session/session-advanceOperationTime_error-001.phpt b/mongodb-1.4.2/tests/session/session-advanceOperationTime_error-001.phpt
new file mode 100644
index 00000000..deb16db3
--- /dev/null
+++ b/mongodb-1.4.2/tests/session/session-advanceOperationTime_error-001.phpt
@@ -0,0 +1,91 @@
+--TEST--
+MongoDB\Driver\Session::advanceOperationTime() with TimestampInterface exceptions
+--SKIPIF--
+<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS_CRYPTO(); ?>
+<?php NEEDS('REPLICASET'); ?>
+--FILE--
+<?php
+require_once __DIR__ . "/../utils/basic.inc";
+
+class MyTimestamp implements MongoDB\BSON\TimestampInterface
+{
+ private $failIncrement;
+ private $failTimestamp;
+
+ public function __construct($failIncrement, $failTimestamp)
+ {
+ $this->failIncrement = $failIncrement;
+ $this->failTimestamp = $failTimestamp;
+ }
+
+ public function getIncrement()
+ {
+ if ($this->failIncrement) {
+ throw new Exception('getIncrement() failed');
+ }
+
+ return 5678;
+ }
+
+ public function getTimestamp()
+ {
+ if ($this->failTimestamp) {
+ throw new Exception('getTimestamp() failed');
+ }
+
+ return 1234;
+ }
+
+ public function __toString()
+ {
+ return sprintf('[%d:%d]', $this->getIncrement(), $this->getTimestamp());
+ }
+}
+
+$manager = new MongoDB\Driver\Manager(REPLICASET);
+$session = $manager->startSession();
+
+echo "Initial operation time of session:\n";
+var_dump($session->getOperationTime());
+
+$timestamps = [
+ new MyTimestamp(true, false),
+ new MyTimestamp(false, true),
+ new MyTimestamp(true, true),
+];
+
+foreach ($timestamps as $timestamp) {
+ echo "\n", throws(function() use ($session, $timestamp) {
+ $session->advanceOperationTime($timestamp);
+ }, 'Exception'), "\n";
+
+ echo "\nOperation time after advancing session fails:\n";
+ var_dump($session->getOperationTime());
+}
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECT--
+Initial operation time of session:
+NULL
+
+OK: Got Exception
+getIncrement() failed
+
+Operation time after advancing session fails:
+NULL
+
+OK: Got Exception
+getTimestamp() failed
+
+Operation time after advancing session fails:
+NULL
+
+OK: Got Exception
+getTimestamp() failed
+
+Operation time after advancing session fails:
+NULL
+===DONE===
diff --git a/mongodb-1.4.2/tests/session/session-debug-001.phpt b/mongodb-1.4.2/tests/session/session-debug-001.phpt
new file mode 100644
index 00000000..89d95ba0
--- /dev/null
+++ b/mongodb-1.4.2/tests/session/session-debug-001.phpt
@@ -0,0 +1,38 @@
+--TEST--
+MongoDB\Driver\Session debug output (before an operation)
+--SKIPIF--
+<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS_CRYPTO(); ?>
+<?php NEEDS('STANDALONE'); NEEDS_ATLEAST_MONGODB_VERSION(STANDALONE, "3.6"); ?>
+--FILE--
+<?php
+require_once __DIR__ . "/../utils/basic.inc";
+
+$manager = new MongoDB\Driver\Manager(STANDALONE);
+$session = $manager->startSession();
+
+var_dump($session);
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECTF--
+object(MongoDB\Driver\Session)#%d (%d) {
+ ["logicalSessionId"]=>
+ array(1) {
+ ["id"]=>
+ object(MongoDB\BSON\Binary)#%d (%d) {
+ ["data"]=>
+ string(16) "%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c"
+ ["type"]=>
+ int(4)
+ }
+ }
+ ["clusterTime"]=>
+ NULL
+ ["causalConsistency"]=>
+ bool(true)
+ ["operationTime"]=>
+ NULL
+}
+===DONE===
diff --git a/mongodb-1.4.2/tests/session/session-debug-002.phpt b/mongodb-1.4.2/tests/session/session-debug-002.phpt
new file mode 100644
index 00000000..0de46fec
--- /dev/null
+++ b/mongodb-1.4.2/tests/session/session-debug-002.phpt
@@ -0,0 +1,56 @@
+--TEST--
+MongoDB\Driver\Session debug output (after an operation)
+--SKIPIF--
+<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS_CRYPTO(); ?>
+<?php NEEDS('REPLICASET'); ?>
+--FILE--
+<?php
+require_once __DIR__ . "/../utils/basic.inc";
+
+$manager = new MongoDB\Driver\Manager(REPLICASET);
+$session = $manager->startSession();
+
+$command = new MongoDB\Driver\Command(['ping' => 1]);
+$manager->executeCommand(DATABASE_NAME, $command, ['session' => $session]);
+
+var_dump($session);
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECTF--
+object(MongoDB\Driver\Session)#%d (%d) {
+ ["logicalSessionId"]=>
+ array(1) {
+ ["id"]=>
+ object(MongoDB\BSON\Binary)#%d (%d) {
+ ["data"]=>
+ string(16) "%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c"
+ ["type"]=>
+ int(4)
+ }
+ }
+ ["clusterTime"]=>
+ array(2) {
+ ["clusterTime"]=>
+ object(MongoDB\BSON\Timestamp)#%d (%d) {
+ ["increment"]=>
+ string(%d) "%d"
+ ["timestamp"]=>
+ string(%d) "%d"
+ }
+ ["signature"]=>
+ %a
+ }
+ ["causalConsistency"]=>
+ bool(true)
+ ["operationTime"]=>
+ object(MongoDB\BSON\Timestamp)#%d (%d) {
+ ["increment"]=>
+ string(%d) "%d"
+ ["timestamp"]=>
+ string(%d) "%d"
+ }
+}
+===DONE===
diff --git a/mongodb-1.4.2/tests/session/session-debug-003.phpt b/mongodb-1.4.2/tests/session/session-debug-003.phpt
new file mode 100644
index 00000000..eb61cfa6
--- /dev/null
+++ b/mongodb-1.4.2/tests/session/session-debug-003.phpt
@@ -0,0 +1,38 @@
+--TEST--
+MongoDB\Driver\Session debug output (causalConsistency=false)
+--SKIPIF--
+<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS_CRYPTO(); ?>
+<?php NEEDS('STANDALONE'); NEEDS_ATLEAST_MONGODB_VERSION(STANDALONE, "3.6"); ?>
+--FILE--
+<?php
+require_once __DIR__ . "/../utils/basic.inc";
+
+$manager = new MongoDB\Driver\Manager(STANDALONE);
+$session = $manager->startSession(['causalConsistency' => false]);
+
+var_dump($session);
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECTF--
+object(MongoDB\Driver\Session)#%d (%d) {
+ ["logicalSessionId"]=>
+ array(1) {
+ ["id"]=>
+ object(MongoDB\BSON\Binary)#%d (%d) {
+ ["data"]=>
+ string(16) "%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c"
+ ["type"]=>
+ int(4)
+ }
+ }
+ ["clusterTime"]=>
+ NULL
+ ["causalConsistency"]=>
+ bool(false)
+ ["operationTime"]=>
+ NULL
+}
+===DONE===
diff --git a/mongodb-1.4.2/tests/session/session-getClusterTime-001.phpt b/mongodb-1.4.2/tests/session/session-getClusterTime-001.phpt
new file mode 100644
index 00000000..374714f8
--- /dev/null
+++ b/mongodb-1.4.2/tests/session/session-getClusterTime-001.phpt
@@ -0,0 +1,42 @@
+--TEST--
+MongoDB\Driver\Session::getClusterTime()
+--SKIPIF--
+<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS_CRYPTO(); ?>
+<?php NEEDS('REPLICASET'); ?>
+--FILE--
+<?php
+require_once __DIR__ . "/../utils/basic.inc";
+
+$manager = new MongoDB\Driver\Manager(REPLICASET);
+$session = $manager->startSession();
+
+echo "Initial cluster time:\n";
+var_dump($session->getClusterTime());
+
+$command = new MongoDB\Driver\Command(['ping' => 1]);
+$manager->executeCommand(DATABASE_NAME, $command, ['session' => $session]);
+
+echo "\nCluster time after command:\n";
+var_dump($session->getClusterTime());
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECTF--
+Initial cluster time:
+NULL
+
+Cluster time after command:
+object(stdClass)#%d (%d) {
+ ["clusterTime"]=>
+ object(MongoDB\BSON\Timestamp)#%d (%d) {
+ ["increment"]=>
+ string(%d) "%d"
+ ["timestamp"]=>
+ string(%d) "%d"
+ }
+ ["signature"]=>
+ %a
+}
+===DONE===
diff --git a/mongodb-1.4.2/tests/session/session-getLogicalSessionId-001.phpt b/mongodb-1.4.2/tests/session/session-getLogicalSessionId-001.phpt
new file mode 100644
index 00000000..ae0ebaed
--- /dev/null
+++ b/mongodb-1.4.2/tests/session/session-getLogicalSessionId-001.phpt
@@ -0,0 +1,29 @@
+--TEST--
+MongoDB\Driver\Session::getLogicalSessionId()
+--SKIPIF--
+<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS_CRYPTO(); ?>
+<?php NEEDS('STANDALONE'); NEEDS_ATLEAST_MONGODB_VERSION(STANDALONE, "3.6"); ?>
+--FILE--
+<?php
+require_once __DIR__ . "/../utils/basic.inc";
+
+$manager = new MongoDB\Driver\Manager(STANDALONE);
+$session = $manager->startSession();
+
+var_dump($session->getLogicalSessionId());
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECTF--
+object(stdClass)#%d (%d) {
+ ["id"]=>
+ object(MongoDB\BSON\Binary)#%d (%d) {
+ ["data"]=>
+ string(16) "%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c"
+ ["type"]=>
+ int(4)
+ }
+}
+===DONE===
diff --git a/mongodb-1.4.2/tests/session/session-getOperationTime-001.phpt b/mongodb-1.4.2/tests/session/session-getOperationTime-001.phpt
new file mode 100644
index 00000000..0add568d
--- /dev/null
+++ b/mongodb-1.4.2/tests/session/session-getOperationTime-001.phpt
@@ -0,0 +1,37 @@
+--TEST--
+MongoDB\Driver\Session::getOperationTime()
+--SKIPIF--
+<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
+<?php NEEDS_CRYPTO(); ?>
+<?php NEEDS('REPLICASET'); ?>
+--FILE--
+<?php
+require_once __DIR__ . "/../utils/basic.inc";
+
+$manager = new MongoDB\Driver\Manager(REPLICASET);
+$session = $manager->startSession();
+
+echo "Initial operation time:\n";
+var_dump($session->getOperationTime());
+
+$command = new MongoDB\Driver\Command(['ping' => 1]);
+$manager->executeCommand(DATABASE_NAME, $command, ['session' => $session]);
+
+echo "\nOperation time after command:\n";
+var_dump($session->getOperationTime());
+
+?>
+===DONE===
+<?php exit(0); ?>
+--EXPECTF--
+Initial operation time:
+NULL
+
+Operation time after command:
+object(MongoDB\BSON\Timestamp)#%d (%d) {
+ ["increment"]=>
+ string(%d) "%d"
+ ["timestamp"]=>
+ string(%d) "%d"
+}
+===DONE===
diff --git a/mongodb-1.3.4/tests/standalone/bug0166.phpt b/mongodb-1.4.2/tests/standalone/bug0166.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/standalone/bug0166.phpt
rename to mongodb-1.4.2/tests/standalone/bug0166.phpt
diff --git a/mongodb-1.3.4/tests/standalone/bug0231.phpt b/mongodb-1.4.2/tests/standalone/bug0231.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/standalone/bug0231.phpt
rename to mongodb-1.4.2/tests/standalone/bug0231.phpt
diff --git a/mongodb-1.3.4/tests/standalone/bug0357.phpt b/mongodb-1.4.2/tests/standalone/bug0357.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/standalone/bug0357.phpt
rename to mongodb-1.4.2/tests/standalone/bug0357.phpt
diff --git a/mongodb-1.3.4/tests/standalone/bug0545.phpt b/mongodb-1.4.2/tests/standalone/bug0545.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/standalone/bug0545.phpt
rename to mongodb-1.4.2/tests/standalone/bug0545.phpt
diff --git a/mongodb-1.3.4/tests/standalone/bug0655.phpt b/mongodb-1.4.2/tests/standalone/bug0655.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/standalone/bug0655.phpt
rename to mongodb-1.4.2/tests/standalone/bug0655.phpt
diff --git a/mongodb-1.3.4/tests/standalone/command-aggregate-001.phpt b/mongodb-1.4.2/tests/standalone/command-aggregate-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/standalone/command-aggregate-001.phpt
rename to mongodb-1.4.2/tests/standalone/command-aggregate-001.phpt
diff --git a/mongodb-1.3.4/tests/standalone/connectiontimeoutexception-001.phpt b/mongodb-1.4.2/tests/standalone/connectiontimeoutexception-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/standalone/connectiontimeoutexception-001.phpt
rename to mongodb-1.4.2/tests/standalone/connectiontimeoutexception-001.phpt
diff --git a/mongodb-1.3.4/tests/standalone/executiontimeoutexception-001.phpt b/mongodb-1.4.2/tests/standalone/executiontimeoutexception-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/standalone/executiontimeoutexception-001.phpt
rename to mongodb-1.4.2/tests/standalone/executiontimeoutexception-001.phpt
diff --git a/mongodb-1.3.4/tests/standalone/executiontimeoutexception-002.phpt b/mongodb-1.4.2/tests/standalone/executiontimeoutexception-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/standalone/executiontimeoutexception-002.phpt
rename to mongodb-1.4.2/tests/standalone/executiontimeoutexception-002.phpt
diff --git a/mongodb-1.3.4/tests/standalone/manager-as-singleton.phpt b/mongodb-1.4.2/tests/standalone/manager-as-singleton.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/standalone/manager-as-singleton.phpt
rename to mongodb-1.4.2/tests/standalone/manager-as-singleton.phpt
diff --git a/mongodb-1.3.4/tests/standalone/query-errors.phpt b/mongodb-1.4.2/tests/standalone/query-errors.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/standalone/query-errors.phpt
rename to mongodb-1.4.2/tests/standalone/query-errors.phpt
diff --git a/mongodb-1.3.4/tests/standalone/update-multi-001.phpt b/mongodb-1.4.2/tests/standalone/update-multi-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/standalone/update-multi-001.phpt
rename to mongodb-1.4.2/tests/standalone/update-multi-001.phpt
diff --git a/mongodb-1.3.4/tests/standalone/write-error-001.phpt b/mongodb-1.4.2/tests/standalone/write-error-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/standalone/write-error-001.phpt
rename to mongodb-1.4.2/tests/standalone/write-error-001.phpt
diff --git a/mongodb-1.3.4/tests/standalone/writeresult-isacknowledged-001.phpt b/mongodb-1.4.2/tests/standalone/writeresult-isacknowledged-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/standalone/writeresult-isacknowledged-001.phpt
rename to mongodb-1.4.2/tests/standalone/writeresult-isacknowledged-001.phpt
diff --git a/mongodb-1.3.4/tests/standalone/writeresult-isacknowledged-002.phpt b/mongodb-1.4.2/tests/standalone/writeresult-isacknowledged-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/standalone/writeresult-isacknowledged-002.phpt
rename to mongodb-1.4.2/tests/standalone/writeresult-isacknowledged-002.phpt
diff --git a/mongodb-1.3.4/tests/standalone/writeresult-isacknowledged-003.phpt b/mongodb-1.4.2/tests/standalone/writeresult-isacknowledged-003.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/standalone/writeresult-isacknowledged-003.phpt
rename to mongodb-1.4.2/tests/standalone/writeresult-isacknowledged-003.phpt
diff --git a/mongodb-1.3.4/tests/utils/PHONGO-FIXTURES.json.gz b/mongodb-1.4.2/tests/utils/PHONGO-FIXTURES.json.gz
similarity index 100%
rename from mongodb-1.3.4/tests/utils/PHONGO-FIXTURES.json.gz
rename to mongodb-1.4.2/tests/utils/PHONGO-FIXTURES.json.gz
diff --git a/mongodb-1.3.4/tests/utils/basic-skipif.inc b/mongodb-1.4.2/tests/utils/basic-skipif.inc
similarity index 100%
rename from mongodb-1.3.4/tests/utils/basic-skipif.inc
rename to mongodb-1.4.2/tests/utils/basic-skipif.inc
diff --git a/mongodb-1.3.4/tests/utils/basic.inc b/mongodb-1.4.2/tests/utils/basic.inc
similarity index 88%
rename from mongodb-1.3.4/tests/utils/basic.inc
rename to mongodb-1.4.2/tests/utils/basic.inc
index 1292fc65..1c10600f 100644
--- a/mongodb-1.3.4/tests/utils/basic.inc
+++ b/mongodb-1.4.2/tests/utils/basic.inc
@@ -1,39 +1,38 @@
<?php
require __DIR__ . "/" . "tools.php";
if (($FILENAME = getenv("PHONGO_SERVERS")) === false) {
- $FILENAME = sys_get_temp_dir() . "/PHONGO-SERVERS.json";
+ $FILENAME = sys_get_temp_dir() . "/PHONGO-SERVERS.json";
}
$json = file_get_contents($FILENAME);
$config = json_decode($json, true);
if (!$config) {
exit("skip Couldn't json_decode(file_get_contents($FILENAME));");
}
$servers = array(
"STANDALONE" => "",
- "STANDALONE_24" => "",
- "STANDALONE_26" => "",
"STANDALONE_30" => "",
"STANDALONE_SSL" => "",
"STANDALONE_AUTH" => "",
"STANDALONE_X509" => "",
"STANDALONE_PLAIN" => "",
"REPLICASET" => "",
"REPLICASET_30" => "",
+ "REPLICASET_DNS" => "",
);
$servers = array_merge($servers, $config);
def($servers);
$consts = array(
"DATABASE_NAME" => "phongo",
"COLLECTION_NAME" => makeCollectionNameFromFilename($_SERVER["SCRIPT_FILENAME"]),
);
def($consts);
// These use values from constants defined above
$consts = array(
"NS" => DATABASE_NAME . "." . COLLECTION_NAME,
);
def($consts);
diff --git a/mongodb-1.3.4/tests/utils/classes.inc b/mongodb-1.4.2/tests/utils/classes.inc
similarity index 100%
rename from mongodb-1.3.4/tests/utils/classes.inc
rename to mongodb-1.4.2/tests/utils/classes.inc
diff --git a/mongodb-1.4.2/tests/utils/observer.php b/mongodb-1.4.2/tests/utils/observer.php
new file mode 100644
index 00000000..cb424a09
--- /dev/null
+++ b/mongodb-1.4.2/tests/utils/observer.php
@@ -0,0 +1,36 @@
+<?php
+use MongoDB\Driver\Monitoring\CommandFailedEvent;
+use MongoDB\Driver\Monitoring\CommandStartedEvent;
+use MongoDB\Driver\Monitoring\CommandSucceededEvent;
+use MongoDB\Driver\Monitoring\CommandSubscriber;
+/**
+ * Observes command documents using the driver's monitoring API.
+ */
+class CommandObserver implements CommandSubscriber
+{
+ private $commands = [];
+ public function observe(callable $execution, callable $commandCallback)
+ {
+ $this->commands = [];
+ \MongoDB\Driver\Monitoring\addSubscriber($this);
+ try {
+ call_user_func($execution);
+ } finally {
+ \MongoDB\Driver\Monitoring\removeSubscriber($this);
+ foreach ($this->commands as $command) {
+ call_user_func($commandCallback, $command);
+ }
+ }
+ }
+ public function commandStarted(CommandStartedEvent $event)
+ {
+ $this->commands[] = $event->getCommand();
+ }
+ public function commandSucceeded(CommandSucceededEvent $event)
+ {
+ }
+ public function commandFailed(CommandFailedEvent $event)
+ {
+ }
+}
+?>
diff --git a/mongodb-1.3.4/tests/utils/tools.php b/mongodb-1.4.2/tests/utils/tools.php
similarity index 78%
rename from mongodb-1.3.4/tests/utils/tools.php
rename to mongodb-1.4.2/tests/utils/tools.php
index 9e4fa99b..19e0cded 100644
--- a/mongodb-1.3.4/tests/utils/tools.php
+++ b/mongodb-1.4.2/tests/utils/tools.php
@@ -1,418 +1,517 @@
<?php
/**
* Prints a traditional hex dump of byte values and printable characters.
*
* @see http://stackoverflow.com/a/4225813/162228
* @param string $data Binary data
* @param integer $width Bytes displayed per line
*/
function hex_dump($data, $width = 16)
{
static $pad = '.'; // Placeholder for non-printable characters
static $from = '';
static $to = '';
if ($from === '') {
for ($i = 0; $i <= 0xFF; $i++) {
$from .= chr($i);
$to .= ($i >= 0x20 && $i <= 0x7E) ? chr($i) : $pad;
}
}
$hex = str_split(bin2hex($data), $width * 2);
$chars = str_split(strtr($data, $from, $to), $width);
$offset = 0;
$length = $width * 3;
foreach ($hex as $i => $line) {
printf("%6X : %-{$length}s [%s]\n", $offset, implode(' ', str_split($line, 2)), $chars[$i]);
$offset += $width;
}
}
/**
* Canonicalizes a JSON string.
*
* @param string $json
* @return string
*/
function json_canonicalize($json)
{
$json = json_encode(json_decode($json));
/* Versions of PHP before 7.1 replace empty JSON keys with "_empty_" when
* decoding to a stdClass (see: https://bugs.php.net/bug.php?id=46600). Work
* around this by replacing "_empty_" keys before returning.
*/
return str_replace('"_empty_":', '"":', $json);
}
/**
* Return a collection name to use for the test file.
*
* The filename will be stripped of the base path to the test suite (prefix) as
* well as the PHP file extension (suffix). Special characters (including hyphen
* for shell compatibility) will be replaced with underscores.
*
* @param string $filename
* @return string
*/
function makeCollectionNameFromFilename($filename)
{
$filename = realpath($filename);
$prefix = realpath(dirname(__FILE__) . '/..') . DIRECTORY_SEPARATOR;
$replacements = array(
// Strip test path prefix
sprintf('/^%s/', preg_quote($prefix, '/')) => '',
// Strip file extension suffix
'/\.php$/' => '',
// SKIPIFs add ".skip" between base name and extension
'/\.skip$/' => '',
// Replace special characters with underscores
sprintf('/[%s]/', preg_quote('-$/\\', '/')) => '_',
);
return preg_replace(array_keys($replacements), array_values($replacements), $filename);
}
function TESTCOMMANDS($uri) {
$cmd = array(
"configureFailPoint" => 1,
);
$command = new MongoDB\Driver\Command($cmd);
$manager = new MongoDB\Driver\Manager($uri);
try {
$result = $manager->executeCommand("test", $command);
} catch(Exception $e) {
/* command not found */
if ($e->getCode() == 59) {
die("skip this test requires mongod with enableTestCommands");
}
}
}
-function NEEDS($uri) {
- if (!constant($uri)) {
- exit("skip -- need '$uri' defined");
+function NEEDS($configuration) {
+ if (!constant($configuration)) {
+ exit("skip -- need '$configuration' defined");
}
}
function PREDICTABLE() {
global $servers;
foreach($servers as $k => $v) {
if (!defined($k) || !constant($k)) {
exit("skip - needs predictable environment (e.g. vagrant)\n");
}
}
}
function SLOW() {
if (getenv("SKIP_SLOW_TESTS")) {
exit("skip SKIP_SLOW_TESTS");
}
}
function LOAD($uri, $dbname = DATABASE_NAME, $collname = COLLECTION_NAME, $filename = null) {
if (!$filename) {
$filename = "compress.zlib://" . __DIR__ . "/" . "PHONGO-FIXTURES.json.gz";
}
$manager = new MongoDB\Driver\Manager($uri);
$bulk = new MongoDB\Driver\BulkWrite(['ordered' => false]);
$server = $manager->selectServer(new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_PRIMARY));
$data = file_get_contents($filename);
$array = json_decode($data);
foreach($array as $document) {
$bulk->insert($document);
}
$retval = $server->executeBulkWrite("$dbname.$collname", $bulk);
if ($retval->getInsertedCount() !== count($array)) {
exit(sprintf('skip Fixtures were not loaded (expected: %d, actual: %d)', $total, $retval->getInsertedCount()));
}
}
+function NEEDS_ATLEAST_MONGODB_VERSION($uri, $version) {
+ $manager = new MongoDB\Driver\Manager($uri);
+ $cmd = new MongoDB\Driver\Command(["buildInfo" => 1]);
+ $rp = new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_PRIMARY);
+
+ try {
+ $cursor = $manager->executeCommand("admin", $cmd, $rp);
+ $cursor->setTypeMap(['root' => 'array', 'document' => 'array']);
+ $document = current($cursor->toArray());
+
+ if (version_compare($document['version'], $version, '<')) {
+ echo "skip Needs version >= $version, but is {$document['version']}";
+ }
+ } catch(Exception $e) {
+ echo "skip (needs version); $uri ($version): " . $e->getCode(), ": ", $e->getMessage();
+ exit(1);
+ }
+}
+
+function NEEDS_STORAGE_ENGINE($uri, $engine) {
+ $manager = new MongoDB\Driver\Manager($uri);
+ $cmd = new MongoDB\Driver\Command(["serverStatus" => 1]);
+ $rp = new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_PRIMARY);
+
+ try {
+ $cursor = $manager->executeCommand("admin", $cmd, $rp);
+ $cursor->setTypeMap(['root' => 'array', 'document' => 'array']);
+ $document = current($cursor->toArray());
+
+ if ($document['storageEngine']['name'] != $engine) {
+ echo "skip Needs storage engine '$engine', but is '{$document['storageEngine']['name']}'";
+ }
+ } catch(Exception $e) {
+ echo "skip (needs version); $uri ($version): " . $e->getCode(), ": ", $e->getMessage();
+ exit(1);
+ }
+}
+
+/* Checks that libmongoc supports crypto. If one or more libaries are provided,
+ * additionally check the value of "libmongoc crypto library" as reported by
+ * phpinfo(). Possible values are "libcrypto", "Common Crypto", and "CNG". */
+function NEEDS_CRYPTO(array $libs = [])
+{
+ ob_start();
+ phpinfo(INFO_MODULES);
+ $info = ob_get_clean();
+
+ $pattern = sprintf('/^%s$/m', preg_quote('libmongoc crypto => enabled'));
+
+ if (preg_match($pattern, $info) !== 1) {
+ exit('skip Crypto is not supported');
+ }
+
+ if (empty($libs)) {
+ return;
+ }
+
+ $pattern = sprintf('/^%s([\w ]+)$/m', preg_quote('libmongoc crypto library => '));
+
+ if (preg_match($pattern, $info, $matches) !== 1) {
+ exit('skip Could not determine crypto library');
+ }
+
+ if (!in_array($matches[1], $libs)) {
+ exit('skip Needs crypto library ' . implode(', ', $libs) . ', but found ' . $matches[1]);
+ }
+}
+
+/* Checks that libmongoc supports SSL. If one or more libaries are provided,
+ * additionally check the value of "libmongoc SSL library" as reported by
+ * phpinfo(). Possible values are "OpenSSL", "LibreSSL", "Secure Transport", and
+ * "Secure Channel". */
+function NEEDS_SSL(array $libs = [])
+{
+ ob_start();
+ phpinfo(INFO_MODULES);
+ $info = ob_get_clean();
+
+ $pattern = sprintf('/^%s$/m', preg_quote('libmongoc SSL => enabled'));
+
+ if (preg_match($pattern, $info) !== 1) {
+ exit('skip SSL is not supported');
+ }
+
+ if (empty($libs)) {
+ return;
+ }
+
+ $pattern = sprintf('/^%s([\w ]+)$/m', preg_quote('libmongoc SSL library => '));
+
+ if (preg_match($pattern, $info, $matches) !== 1) {
+ exit('skip Could not determine SSL library');
+ }
+
+ if (!in_array($matches[1], $libs)) {
+ exit('skip Needs SSL library ' . implode(', ', $libs) . ', but found ' . $matches[1]);
+ }
+}
+
function CLEANUP($uri, $dbname = DATABASE_NAME, $collname = COLLECTION_NAME) {
try {
$manager = new MongoDB\Driver\Manager($uri);
$cmd = new MongoDB\Driver\Command(array("drop" => $collname));
$rp = new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_PRIMARY);
try {
$manager->executeCommand($dbname, $cmd, $rp);
} catch(Exception $e) {
do {
/* ns not found */
if ($e->getCode() == 59 || $e->getCode() == 26) {
continue;
}
throw $e;
} while (0);
}
} catch(Exception $e) {
echo "skip (cleanup); $uri: " . $e->getCode(), ": ", $e->getMessage();
exit(1);
}
}
function START($id, array $options = array()) {
/* starting/stopping servers only works using the Vagrant setup */
PREDICTABLE();
$options += array("name" => "mongod", "id" => $id);
$opts = array(
"http" => array(
"timeout" => 60,
"method" => "PUT",
"header" => "Accept: application/json\r\n" .
"Content-type: application/x-www-form-urlencoded",
"content" => json_encode($options),
"ignore_errors" => true,
),
);
$ctx = stream_context_create($opts);
$json = file_get_contents(getMOUri() . "/servers/$id", false, $ctx);
$result = json_decode($json, true);
/* Failed -- or was already started */
if (!isset($result["mongodb_uri"])) {
DELETE($id);
define($id, false);
} else {
define($id, $result["mongodb_uri"]);
$FILENAME = sys_get_temp_dir() . "/PHONGO-SERVERS.json";
$json = file_get_contents($FILENAME);
$config = json_decode($json, true);
$config[$id] = constant($id);
file_put_contents($FILENAME, json_encode($config, JSON_PRETTY_PRINT));
}
}
function DELETE($id) {
$opts = array(
"http" => array(
"timeout" => 60,
"method" => "DELETE",
"header" => "Accept: application/json\r\n",
"ignore_errors" => true,
),
);
$ctx = stream_context_create($opts);
$json = file_get_contents(getMOUri() . "/servers/$id", false, $ctx);
$FILENAME = sys_get_temp_dir() . "/PHONGO-SERVERS.json";
$json = file_get_contents($FILENAME);
$config = json_decode($json, true);
unset($config[$id]);
file_put_contents($FILENAME, json_encode($config, JSON_PRETTY_PRINT));
}
function severityToString($type) {
switch($type) {
case E_WARNING:
return "E_WARNING";
default:
return "Some other #_$type";
}
}
function raises($function, $type, $infunction = null) {
$errhandler = function($severity, $message, $file, $line, $errcontext) {
throw new ErrorException($message, 0, $severity, $file, $line);
};
set_error_handler($errhandler, $type);
try {
$function();
} catch(Exception $e) {
if ($e instanceof ErrorException && $e->getSeverity() & $type) {
if ($infunction) {
$trace = $e->getTrace();
$function = $trace[0]["function"];
if (strcasecmp($function, $infunction) == 0) {
printf("OK: Got %s thrown from %s\n", $exceptionname, $infunction);
} else {
printf("ALMOST: Got %s - but was thrown in %s, not %s\n", $exceptionname, $function, $infunction);
}
restore_error_handler();
return $e->getMessage();
}
printf("OK: Got %s\n", severityToString($type));
} else {
printf("ALMOST: Got %s - expected %s\n", get_class($e), $exceptionname);
}
restore_error_handler();
return $e->getMessage();
}
echo "FAILED: Expected $exceptionname thrown!\n";
restore_error_handler();
}
function throws($function, $exceptionname, $infunction = null) {
try {
$function();
} catch(Exception $e) {
$message = str_replace(array("\n", "\r"), ' ', $e->getMessage());
if ($e instanceof $exceptionname) {
if ($infunction) {
$trace = $e->getTrace();
$function = $trace[0]["function"];
if (strcasecmp($function, $infunction) == 0) {
printf("OK: Got %s thrown from %s\n", $exceptionname, $infunction);
} else {
printf("ALMOST: Got %s - but was thrown in %s, not %s (%s)\n", $exceptionname, $function, $infunction, $message);
}
return $e->getMessage();
}
printf("OK: Got %s\n", $exceptionname);
} else {
printf("ALMOST: Got %s (%s) - expected %s\n", get_class($e), $message, $exceptionname);
}
return $e->getMessage();
}
echo "FAILED: Expected $exceptionname thrown, but no exception thrown!\n";
}
function printServer(MongoDB\Driver\Server $server)
{
printf("server: %s:%d\n", $server->getHost(), $server->getPort());
}
function printWriteResult(MongoDB\Driver\WriteResult $result, $details = true)
{
printServer($result->getServer());
printf("insertedCount: %d\n", $result->getInsertedCount());
printf("matchedCount: %d\n", $result->getMatchedCount());
printf("modifiedCount: %d\n", $result->getModifiedCount());
printf("upsertedCount: %d\n", $result->getUpsertedCount());
printf("deletedCount: %d\n", $result->getDeletedCount());
foreach ($result->getUpsertedIds() as $index => $id) {
printf("upsertedId[%d]: ", $index);
var_dump($id);
}
$writeConcernError = $result->getWriteConcernError();
printWriteConcernError($writeConcernError ? $writeConcernError : null, $details);
foreach ($result->getWriteErrors() as $writeError) {
printWriteError($writeError);
}
}
function printWriteConcernError(MongoDB\Driver\WriteConcernError $error = null, $details)
{
if ($error) {
/* This stuff is generated by the server, no need for us to test it */
if (!$details) {
printf("writeConcernError: %s (%d)\n", $error->getMessage(), $error->getCode());
return;
}
var_dump($error);
printf("writeConcernError.message: %s\n", $error->getMessage());
printf("writeConcernError.code: %d\n", $error->getCode());
printf("writeConcernError.info: ");
var_dump($error->getInfo());
}
}
function printWriteError(MongoDB\Driver\WriteError $error)
{
var_dump($error);
printf("writeError[%d].message: %s\n", $error->getIndex(), $error->getMessage());
printf("writeError[%d].code: %d\n", $error->getIndex(), $error->getCode());
}
function getInsertCount($retval) {
return $retval->getInsertedCount();
}
function getModifiedCount($retval) {
return $retval->getModifiedCount();
}
function getDeletedCount($retval) {
return $retval->getDeletedCount();
}
function getUpsertedCount($retval) {
return $retval->getUpsertedCount();
}
function getWriteErrors($retval) {
return (array)$retval->getWriteErrors();
}
function def($arr) {
foreach($arr as $const => $value) {
define($const, getenv("PHONGO_TEST_$const") ?: $value);
}
}
function configureFailPoint(MongoDB\Driver\Manager $manager, $failPoint, $mode, $data = array()) {
$doc = array(
"configureFailPoint" => $failPoint,
"mode" => $mode,
);
if ($data) {
$doc["data"] = $data;
}
$cmd = new MongoDB\Driver\Command($doc);
$result = $manager->executeCommand("admin", $cmd);
$arr = current($result->toArray());
if (empty($arr->ok)) {
var_dump($result);
throw new RuntimeException("Failpoint failed");
}
return true;
}
function failMaxTimeMS(MongoDB\Driver\Manager $manager) {
return configureFailPoint($manager, "maxTimeAlwaysTimeOut", array("times" => 1));
}
function getMOUri() {
if (!($HOST = getenv("MONGODB_ORCHESTRATION_HOST"))) {
$HOST = "192.168.112.10";
}
if (!($PORT = getenv("MONGODB_ORCHESTRATION_PORT"))) {
$PORT = "8889";
}
$MO = "http://$HOST:$PORT/v1";
return $MO;
}
function getMOPresetBase() {
if (!($BASE = getenv("mongodb_orchestration_base"))) {
$BASE = "/phongo/";
}
return $BASE;
}
function toPHP($var, $typemap = array()) {
return MongoDB\BSON\toPHP($var, $typemap);
}
function fromPHP($var) {
return MongoDB\BSON\fromPHP($var);
}
function toJSON($var) {
return MongoDB\BSON\toJSON($var);
}
function toCanonicalExtendedJSON($var) {
return MongoDB\BSON\toCanonicalExtendedJSON($var);
}
function toRelaxedExtendedJSON($var) {
return MongoDB\BSON\toRelaxedExtendedJSON($var);
}
function fromJSON($var) {
return MongoDB\BSON\fromJSON($var);
}
/* Note: this fail point may terminate the mongod process, so you may want to
* use this in conjunction with a throwaway server. */
function failGetMore(MongoDB\Driver\Manager $manager) {
return configureFailPoint($manager, "failReceivedGetmore", "alwaysOn");
}
diff --git a/mongodb-1.3.4/tests/writeConcern/writeconcern-bsonserialize-001.phpt b/mongodb-1.4.2/tests/writeConcern/writeconcern-bsonserialize-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/writeConcern/writeconcern-bsonserialize-001.phpt
rename to mongodb-1.4.2/tests/writeConcern/writeconcern-bsonserialize-001.phpt
diff --git a/mongodb-1.3.4/tests/writeConcern/writeconcern-bsonserialize-002.phpt b/mongodb-1.4.2/tests/writeConcern/writeconcern-bsonserialize-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/writeConcern/writeconcern-bsonserialize-002.phpt
rename to mongodb-1.4.2/tests/writeConcern/writeconcern-bsonserialize-002.phpt
diff --git a/mongodb-1.3.4/tests/writeConcern/writeconcern-constants.phpt b/mongodb-1.4.2/tests/writeConcern/writeconcern-constants.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/writeConcern/writeconcern-constants.phpt
rename to mongodb-1.4.2/tests/writeConcern/writeconcern-constants.phpt
diff --git a/mongodb-1.3.4/tests/writeConcern/writeconcern-ctor-001.phpt b/mongodb-1.4.2/tests/writeConcern/writeconcern-ctor-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/writeConcern/writeconcern-ctor-001.phpt
rename to mongodb-1.4.2/tests/writeConcern/writeconcern-ctor-001.phpt
diff --git a/mongodb-1.3.4/tests/writeConcern/writeconcern-ctor_error-001.phpt b/mongodb-1.4.2/tests/writeConcern/writeconcern-ctor_error-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/writeConcern/writeconcern-ctor_error-001.phpt
rename to mongodb-1.4.2/tests/writeConcern/writeconcern-ctor_error-001.phpt
diff --git a/mongodb-1.3.4/tests/writeConcern/writeconcern-ctor_error-002.phpt b/mongodb-1.4.2/tests/writeConcern/writeconcern-ctor_error-002.phpt
similarity index 95%
rename from mongodb-1.3.4/tests/writeConcern/writeconcern-ctor_error-002.phpt
rename to mongodb-1.4.2/tests/writeConcern/writeconcern-ctor_error-002.phpt
index 6eb47c60..8c090ebd 100644
--- a/mongodb-1.3.4/tests/writeConcern/writeconcern-ctor_error-002.phpt
+++ b/mongodb-1.4.2/tests/writeConcern/writeconcern-ctor_error-002.phpt
@@ -1,37 +1,37 @@
--TEST--
MongoDB\Driver\WriteConcern construction (invalid w type)
--SKIPIF--
<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
--FILE--
<?php
require_once __DIR__ . "/../utils/basic.inc";
$tests = array(
1.0,
true,
array(),
new stdClass,
null,
);
foreach ($tests as $test) {
echo throws(function() use ($test) {
new MongoDB\Driver\WriteConcern($test);
}, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n";
}
?>
===DONE===
<?php exit(0); ?>
--EXPECTF--
OK: Got MongoDB\Driver\Exception\InvalidArgumentException
Expected w to be integer or string, %r(double|float)%r given
OK: Got MongoDB\Driver\Exception\InvalidArgumentException
Expected w to be integer or string, boolean given
OK: Got MongoDB\Driver\Exception\InvalidArgumentException
Expected w to be integer or string, array given
OK: Got MongoDB\Driver\Exception\InvalidArgumentException
-Expected w to be integer or string, object given
+Expected w to be integer or string, stdClass given
OK: Got MongoDB\Driver\Exception\InvalidArgumentException
Expected w to be integer or string, %r(null|NULL)%r given
===DONE===
diff --git a/mongodb-1.3.4/tests/writeConcern/writeconcern-ctor_error-003.phpt b/mongodb-1.4.2/tests/writeConcern/writeconcern-ctor_error-003.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/writeConcern/writeconcern-ctor_error-003.phpt
rename to mongodb-1.4.2/tests/writeConcern/writeconcern-ctor_error-003.phpt
diff --git a/mongodb-1.3.4/tests/writeConcern/writeconcern-ctor_error-004.phpt b/mongodb-1.4.2/tests/writeConcern/writeconcern-ctor_error-004.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/writeConcern/writeconcern-ctor_error-004.phpt
rename to mongodb-1.4.2/tests/writeConcern/writeconcern-ctor_error-004.phpt
diff --git a/mongodb-1.3.4/tests/writeConcern/writeconcern-ctor_error-005.phpt b/mongodb-1.4.2/tests/writeConcern/writeconcern-ctor_error-005.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/writeConcern/writeconcern-ctor_error-005.phpt
rename to mongodb-1.4.2/tests/writeConcern/writeconcern-ctor_error-005.phpt
diff --git a/mongodb-1.3.4/tests/writeConcern/writeconcern-debug-001.phpt b/mongodb-1.4.2/tests/writeConcern/writeconcern-debug-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/writeConcern/writeconcern-debug-001.phpt
rename to mongodb-1.4.2/tests/writeConcern/writeconcern-debug-001.phpt
diff --git a/mongodb-1.3.4/tests/writeConcern/writeconcern-debug-002.phpt b/mongodb-1.4.2/tests/writeConcern/writeconcern-debug-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/writeConcern/writeconcern-debug-002.phpt
rename to mongodb-1.4.2/tests/writeConcern/writeconcern-debug-002.phpt
diff --git a/mongodb-1.3.4/tests/writeConcern/writeconcern-debug-003.phpt b/mongodb-1.4.2/tests/writeConcern/writeconcern-debug-003.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/writeConcern/writeconcern-debug-003.phpt
rename to mongodb-1.4.2/tests/writeConcern/writeconcern-debug-003.phpt
diff --git a/mongodb-1.3.4/tests/writeConcern/writeconcern-getjournal-001.phpt b/mongodb-1.4.2/tests/writeConcern/writeconcern-getjournal-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/writeConcern/writeconcern-getjournal-001.phpt
rename to mongodb-1.4.2/tests/writeConcern/writeconcern-getjournal-001.phpt
diff --git a/mongodb-1.3.4/tests/writeConcern/writeconcern-getw-001.phpt b/mongodb-1.4.2/tests/writeConcern/writeconcern-getw-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/writeConcern/writeconcern-getw-001.phpt
rename to mongodb-1.4.2/tests/writeConcern/writeconcern-getw-001.phpt
diff --git a/mongodb-1.3.4/tests/writeConcern/writeconcern-getwtimeout-001.phpt b/mongodb-1.4.2/tests/writeConcern/writeconcern-getwtimeout-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/writeConcern/writeconcern-getwtimeout-001.phpt
rename to mongodb-1.4.2/tests/writeConcern/writeconcern-getwtimeout-001.phpt
diff --git a/mongodb-1.3.4/tests/writeConcern/writeconcern-isdefault-001.phpt b/mongodb-1.4.2/tests/writeConcern/writeconcern-isdefault-001.phpt
similarity index 95%
rename from mongodb-1.3.4/tests/writeConcern/writeconcern-isdefault-001.phpt
rename to mongodb-1.4.2/tests/writeConcern/writeconcern-isdefault-001.phpt
index a4426c2d..515c6451 100644
--- a/mongodb-1.3.4/tests/writeConcern/writeconcern-isdefault-001.phpt
+++ b/mongodb-1.4.2/tests/writeConcern/writeconcern-isdefault-001.phpt
@@ -1,67 +1,66 @@
--TEST--
MongoDB\Driver\WriteConcern::isDefault()
--FILE--
<?php
$tests = [
new MongoDB\Driver\WriteConcern(-3), // MONGOC_WRITE_CONCERN_W_MAJORITY
new MongoDB\Driver\WriteConcern(-2), // MONGOC_WRITE_CONCERN_W_DEFAULT
new MongoDB\Driver\WriteConcern(-1), // MONGOC_WRITE_CONCERN_W_ERRORS_IGNORED
new MongoDB\Driver\WriteConcern(0), // MONGOC_WRITE_CONCERN_W_UNACKNOWLEDGED
new MongoDB\Driver\WriteConcern(1),
new MongoDB\Driver\WriteConcern(2),
new MongoDB\Driver\WriteConcern('tag'),
new MongoDB\Driver\WriteConcern(MongoDB\Driver\WriteConcern::MAJORITY),
// mongoc_uri_parse_option() ignores empty string for w
(new MongoDB\Driver\Manager('mongodb://127.0.0.1/?w='))->getWriteConcern(),
- // Cannot test "w=-3" since libmongoc URI parsing expects integers >= -2
- (new MongoDB\Driver\Manager('mongodb://127.0.0.1/?w=-2'))->getWriteConcern(),
+ // Cannot test "w=-3" since libmongoc URI parsing expects integers >= -1
+ // Cannot test "w=-2" since libmongoc URI parsing expects integers >= -1, and throws an error otherwise
(new MongoDB\Driver\Manager('mongodb://127.0.0.1/?w=-1'))->getWriteConcern(),
(new MongoDB\Driver\Manager('mongodb://127.0.0.1/?w=0'))->getWriteConcern(),
(new MongoDB\Driver\Manager('mongodb://127.0.0.1/?w=1'))->getWriteConcern(),
(new MongoDB\Driver\Manager('mongodb://127.0.0.1/?w=2'))->getWriteConcern(),
(new MongoDB\Driver\Manager('mongodb://127.0.0.1/?w=tag'))->getWriteConcern(),
(new MongoDB\Driver\Manager('mongodb://127.0.0.1/?w=majority'))->getWriteConcern(),
// Cannot test ['w' => null] since an integer or string type is expected (PHPC-887)
// Cannot test ['w' => -3] or ['w' => -2] since php_phongo_apply_wc_options_to_uri() expects integers >= -1
(new MongoDB\Driver\Manager(null, ['w' => -1]))->getWriteConcern(),
(new MongoDB\Driver\Manager(null, ['w' => 0]))->getWriteConcern(),
(new MongoDB\Driver\Manager(null, ['w' => 1]))->getWriteConcern(),
(new MongoDB\Driver\Manager(null, ['w' => 2]))->getWriteConcern(),
(new MongoDB\Driver\Manager(null, ['w' => 'tag']))->getWriteConcern(),
(new MongoDB\Driver\Manager(null, ['w' => 'majority']))->getWriteConcern(),
(new MongoDB\Driver\Manager)->getWriteConcern(),
];
foreach ($tests as $wc) {
var_dump($wc->isDefault());
}
?>
===DONE===
<?php exit(0); ?>
--EXPECT--
bool(false)
bool(true)
bool(false)
bool(false)
bool(false)
bool(false)
bool(false)
bool(false)
bool(true)
-bool(true)
bool(false)
bool(false)
bool(false)
bool(false)
bool(false)
bool(false)
bool(false)
bool(false)
bool(false)
bool(false)
bool(false)
bool(false)
bool(true)
===DONE===
diff --git a/mongodb-1.3.4/tests/writeConcern/writeconcern_error-001.phpt b/mongodb-1.4.2/tests/writeConcern/writeconcern_error-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/writeConcern/writeconcern_error-001.phpt
rename to mongodb-1.4.2/tests/writeConcern/writeconcern_error-001.phpt
diff --git a/mongodb-1.3.4/tests/writeConcernError/writeconcernerror-debug-001.phpt b/mongodb-1.4.2/tests/writeConcernError/writeconcernerror-debug-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/writeConcernError/writeconcernerror-debug-001.phpt
rename to mongodb-1.4.2/tests/writeConcernError/writeconcernerror-debug-001.phpt
diff --git a/mongodb-1.3.4/tests/writeConcernError/writeconcernerror-debug-002.phpt b/mongodb-1.4.2/tests/writeConcernError/writeconcernerror-debug-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/writeConcernError/writeconcernerror-debug-002.phpt
rename to mongodb-1.4.2/tests/writeConcernError/writeconcernerror-debug-002.phpt
diff --git a/mongodb-1.3.4/tests/writeConcernError/writeconcernerror-getcode-001.phpt b/mongodb-1.4.2/tests/writeConcernError/writeconcernerror-getcode-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/writeConcernError/writeconcernerror-getcode-001.phpt
rename to mongodb-1.4.2/tests/writeConcernError/writeconcernerror-getcode-001.phpt
diff --git a/mongodb-1.3.4/tests/writeConcernError/writeconcernerror-getinfo-001.phpt b/mongodb-1.4.2/tests/writeConcernError/writeconcernerror-getinfo-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/writeConcernError/writeconcernerror-getinfo-001.phpt
rename to mongodb-1.4.2/tests/writeConcernError/writeconcernerror-getinfo-001.phpt
diff --git a/mongodb-1.3.4/tests/writeConcernError/writeconcernerror-getinfo-002.phpt b/mongodb-1.4.2/tests/writeConcernError/writeconcernerror-getinfo-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/writeConcernError/writeconcernerror-getinfo-002.phpt
rename to mongodb-1.4.2/tests/writeConcernError/writeconcernerror-getinfo-002.phpt
diff --git a/mongodb-1.3.4/tests/writeConcernError/writeconcernerror-getmessage-001.phpt b/mongodb-1.4.2/tests/writeConcernError/writeconcernerror-getmessage-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/writeConcernError/writeconcernerror-getmessage-001.phpt
rename to mongodb-1.4.2/tests/writeConcernError/writeconcernerror-getmessage-001.phpt
diff --git a/mongodb-1.3.4/tests/writeConcernError/writeconcernerror_error-001.phpt b/mongodb-1.4.2/tests/writeConcernError/writeconcernerror_error-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/writeConcernError/writeconcernerror_error-001.phpt
rename to mongodb-1.4.2/tests/writeConcernError/writeconcernerror_error-001.phpt
diff --git a/mongodb-1.3.4/tests/writeError/writeerror-debug-001.phpt b/mongodb-1.4.2/tests/writeError/writeerror-debug-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/writeError/writeerror-debug-001.phpt
rename to mongodb-1.4.2/tests/writeError/writeerror-debug-001.phpt
diff --git a/mongodb-1.3.4/tests/writeError/writeerror-getCode-001.phpt b/mongodb-1.4.2/tests/writeError/writeerror-getCode-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/writeError/writeerror-getCode-001.phpt
rename to mongodb-1.4.2/tests/writeError/writeerror-getCode-001.phpt
diff --git a/mongodb-1.3.4/tests/writeError/writeerror-getIndex-001.phpt b/mongodb-1.4.2/tests/writeError/writeerror-getIndex-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/writeError/writeerror-getIndex-001.phpt
rename to mongodb-1.4.2/tests/writeError/writeerror-getIndex-001.phpt
diff --git a/mongodb-1.3.4/tests/writeError/writeerror-getInfo-001.phpt b/mongodb-1.4.2/tests/writeError/writeerror-getInfo-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/writeError/writeerror-getInfo-001.phpt
rename to mongodb-1.4.2/tests/writeError/writeerror-getInfo-001.phpt
diff --git a/mongodb-1.3.4/tests/writeError/writeerror-getMessage-001.phpt b/mongodb-1.4.2/tests/writeError/writeerror-getMessage-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/writeError/writeerror-getMessage-001.phpt
rename to mongodb-1.4.2/tests/writeError/writeerror-getMessage-001.phpt
diff --git a/mongodb-1.3.4/tests/writeError/writeerror_error-001.phpt b/mongodb-1.4.2/tests/writeError/writeerror_error-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/writeError/writeerror_error-001.phpt
rename to mongodb-1.4.2/tests/writeError/writeerror_error-001.phpt
diff --git a/mongodb-1.3.4/tests/writeResult/bug0671-003.phpt b/mongodb-1.4.2/tests/writeResult/bug0671-003.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/writeResult/bug0671-003.phpt
rename to mongodb-1.4.2/tests/writeResult/bug0671-003.phpt
diff --git a/mongodb-1.3.4/tests/writeResult/writeresult-debug-001.phpt b/mongodb-1.4.2/tests/writeResult/writeresult-debug-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/writeResult/writeresult-debug-001.phpt
rename to mongodb-1.4.2/tests/writeResult/writeresult-debug-001.phpt
diff --git a/mongodb-1.3.4/tests/writeResult/writeresult-debug-002.phpt b/mongodb-1.4.2/tests/writeResult/writeresult-debug-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/writeResult/writeresult-debug-002.phpt
rename to mongodb-1.4.2/tests/writeResult/writeresult-debug-002.phpt
diff --git a/mongodb-1.3.4/tests/writeResult/writeresult-getdeletedcount-001.phpt b/mongodb-1.4.2/tests/writeResult/writeresult-getdeletedcount-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/writeResult/writeresult-getdeletedcount-001.phpt
rename to mongodb-1.4.2/tests/writeResult/writeresult-getdeletedcount-001.phpt
diff --git a/mongodb-1.3.4/tests/writeResult/writeresult-getdeletedcount-002.phpt b/mongodb-1.4.2/tests/writeResult/writeresult-getdeletedcount-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/writeResult/writeresult-getdeletedcount-002.phpt
rename to mongodb-1.4.2/tests/writeResult/writeresult-getdeletedcount-002.phpt
diff --git a/mongodb-1.3.4/tests/writeResult/writeresult-getinsertedcount-001.phpt b/mongodb-1.4.2/tests/writeResult/writeresult-getinsertedcount-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/writeResult/writeresult-getinsertedcount-001.phpt
rename to mongodb-1.4.2/tests/writeResult/writeresult-getinsertedcount-001.phpt
diff --git a/mongodb-1.3.4/tests/writeResult/writeresult-getinsertedcount-002.phpt b/mongodb-1.4.2/tests/writeResult/writeresult-getinsertedcount-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/writeResult/writeresult-getinsertedcount-002.phpt
rename to mongodb-1.4.2/tests/writeResult/writeresult-getinsertedcount-002.phpt
diff --git a/mongodb-1.3.4/tests/writeResult/writeresult-getmatchedcount-001.phpt b/mongodb-1.4.2/tests/writeResult/writeresult-getmatchedcount-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/writeResult/writeresult-getmatchedcount-001.phpt
rename to mongodb-1.4.2/tests/writeResult/writeresult-getmatchedcount-001.phpt
diff --git a/mongodb-1.3.4/tests/writeResult/writeresult-getmatchedcount-002.phpt b/mongodb-1.4.2/tests/writeResult/writeresult-getmatchedcount-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/writeResult/writeresult-getmatchedcount-002.phpt
rename to mongodb-1.4.2/tests/writeResult/writeresult-getmatchedcount-002.phpt
diff --git a/mongodb-1.3.4/tests/writeResult/writeresult-getmodifiedcount-001.phpt b/mongodb-1.4.2/tests/writeResult/writeresult-getmodifiedcount-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/writeResult/writeresult-getmodifiedcount-001.phpt
rename to mongodb-1.4.2/tests/writeResult/writeresult-getmodifiedcount-001.phpt
diff --git a/mongodb-1.3.4/tests/writeResult/writeresult-getmodifiedcount-002.phpt b/mongodb-1.4.2/tests/writeResult/writeresult-getmodifiedcount-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/writeResult/writeresult-getmodifiedcount-002.phpt
rename to mongodb-1.4.2/tests/writeResult/writeresult-getmodifiedcount-002.phpt
diff --git a/mongodb-1.3.4/tests/writeResult/writeresult-getserver-001.phpt b/mongodb-1.4.2/tests/writeResult/writeresult-getserver-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/writeResult/writeresult-getserver-001.phpt
rename to mongodb-1.4.2/tests/writeResult/writeresult-getserver-001.phpt
diff --git a/mongodb-1.3.4/tests/writeResult/writeresult-getupsertedcount-001.phpt b/mongodb-1.4.2/tests/writeResult/writeresult-getupsertedcount-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/writeResult/writeresult-getupsertedcount-001.phpt
rename to mongodb-1.4.2/tests/writeResult/writeresult-getupsertedcount-001.phpt
diff --git a/mongodb-1.3.4/tests/writeResult/writeresult-getupsertedcount-002.phpt b/mongodb-1.4.2/tests/writeResult/writeresult-getupsertedcount-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/writeResult/writeresult-getupsertedcount-002.phpt
rename to mongodb-1.4.2/tests/writeResult/writeresult-getupsertedcount-002.phpt
diff --git a/mongodb-1.3.4/tests/writeResult/writeresult-getupsertedids-001.phpt b/mongodb-1.4.2/tests/writeResult/writeresult-getupsertedids-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/writeResult/writeresult-getupsertedids-001.phpt
rename to mongodb-1.4.2/tests/writeResult/writeresult-getupsertedids-001.phpt
diff --git a/mongodb-1.3.4/tests/writeResult/writeresult-getupsertedids-002.phpt b/mongodb-1.4.2/tests/writeResult/writeresult-getupsertedids-002.phpt
similarity index 98%
rename from mongodb-1.3.4/tests/writeResult/writeresult-getupsertedids-002.phpt
rename to mongodb-1.4.2/tests/writeResult/writeresult-getupsertedids-002.phpt
index 5bd57d96..a05e29da 100644
--- a/mongodb-1.3.4/tests/writeResult/writeresult-getupsertedids-002.phpt
+++ b/mongodb-1.4.2/tests/writeResult/writeresult-getupsertedids-002.phpt
@@ -1,99 +1,99 @@
--TEST--
MongoDB\Driver\WriteResult::getUpsertedIds() with client-generated values
--SKIPIF--
<?php require __DIR__ . "/../utils/basic-skipif.inc"; ?>
<?php NEEDS('STANDALONE'); CLEANUP(STANDALONE); ?>
--FILE--
<?php
require_once __DIR__ . "/../utils/basic.inc";
/* Do not test Decimal128, since it is only supported by MongoDB 3.4+.
*
* Do not test array or Regex types, which are not permitted to be used as an
* ID. If a regular expression is used in upsert criteria and does not match an
* existing document, the server generates a new ObjectId. */
$tests = [
null,
true,
1,
- 3.14,
+ 4.125,
'foo',
(object) [],
new MongoDB\BSON\Binary('foo', MongoDB\BSON\Binary::TYPE_GENERIC),
new MongoDB\BSON\Javascript('function(){}'),
new MongoDB\BSON\MaxKey,
new MongoDB\BSON\MinKey,
new MongoDB\BSON\ObjectId('586c18d86118fd6c9012dec1'),
new MongoDB\BSON\Timestamp(1234, 5678),
new MongoDB\BSON\UTCDateTime('1483479256924'),
];
$manager = new MongoDB\Driver\Manager(STANDALONE);
$bulk = new MongoDB\Driver\BulkWrite;
foreach ($tests as $value) {
$bulk->update(['_id' => $value], ['$set' => ['x' => 1]], ['upsert' => true]);
}
$result = $manager->executeBulkWrite(NS, $bulk);
var_dump($result->getUpsertedIds());
?>
===DONE===
<?php exit(0); ?>
--EXPECTF--
array(13) {
[0]=>
NULL
[1]=>
bool(true)
[2]=>
int(1)
[3]=>
- float(3.14)
+ float(4.125)
[4]=>
string(3) "foo"
[5]=>
object(stdClass)#%d (%d) {
}
[6]=>
object(MongoDB\BSON\Binary)#%d (%d) {
["data"]=>
string(3) "foo"
["type"]=>
int(0)
}
[7]=>
object(MongoDB\BSON\Javascript)#%d (%d) {
["code"]=>
string(12) "function(){}"
["scope"]=>
NULL
}
[8]=>
object(MongoDB\BSON\MaxKey)#%d (%d) {
}
[9]=>
object(MongoDB\BSON\MinKey)#%d (%d) {
}
[10]=>
object(MongoDB\BSON\ObjectId)#%d (%d) {
["oid"]=>
string(24) "586c18d86118fd6c9012dec1"
}
[11]=>
object(MongoDB\BSON\Timestamp)#%d (%d) {
["increment"]=>
string(4) "1234"
["timestamp"]=>
string(4) "5678"
}
[12]=>
object(MongoDB\BSON\UTCDateTime)#%d (%d) {
["milliseconds"]=>
string(13) "1483479256924"
}
}
===DONE===
diff --git a/mongodb-1.3.4/tests/writeResult/writeresult-getwriteconcernerror-001.phpt b/mongodb-1.4.2/tests/writeResult/writeresult-getwriteconcernerror-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/writeResult/writeresult-getwriteconcernerror-001.phpt
rename to mongodb-1.4.2/tests/writeResult/writeresult-getwriteconcernerror-001.phpt
diff --git a/mongodb-1.3.4/tests/writeResult/writeresult-getwriteerrors-001.phpt b/mongodb-1.4.2/tests/writeResult/writeresult-getwriteerrors-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/writeResult/writeresult-getwriteerrors-001.phpt
rename to mongodb-1.4.2/tests/writeResult/writeresult-getwriteerrors-001.phpt
diff --git a/mongodb-1.3.4/tests/writeResult/writeresult-getwriteerrors-002.phpt b/mongodb-1.4.2/tests/writeResult/writeresult-getwriteerrors-002.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/writeResult/writeresult-getwriteerrors-002.phpt
rename to mongodb-1.4.2/tests/writeResult/writeresult-getwriteerrors-002.phpt
diff --git a/mongodb-1.3.4/tests/writeResult/writeresult-isacknowledged-001.phpt b/mongodb-1.4.2/tests/writeResult/writeresult-isacknowledged-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/writeResult/writeresult-isacknowledged-001.phpt
rename to mongodb-1.4.2/tests/writeResult/writeresult-isacknowledged-001.phpt
diff --git a/mongodb-1.3.4/tests/writeResult/writeresult_error-001.phpt b/mongodb-1.4.2/tests/writeResult/writeresult_error-001.phpt
similarity index 100%
rename from mongodb-1.3.4/tests/writeResult/writeresult_error-001.phpt
rename to mongodb-1.4.2/tests/writeResult/writeresult_error-001.phpt
diff --git a/package.xml b/package.xml
index 361d0fe1..ed657230 100644
--- a/package.xml
+++ b/package.xml
@@ -1,2028 +1,2219 @@
<?xml version="1.0" encoding="UTF-8"?>
<package packagerversion="1.10.5" version="2.1" xmlns="http://pear.php.net/dtd/package-2.1" xmlns:tasks="http://pear.php.net/dtd/tasks-1.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://pear.php.net/dtd/tasks-1.0 http://pear.php.net/dtd/tasks-1.0.xsd http://pear.php.net/dtd/package-2.1 http://pear.php.net/dtd/package-2.1.xsd">
<name>mongodb</name>
<channel>pecl.php.net</channel>
<summary>MongoDB driver for PHP</summary>
<description>The purpose of this driver is to provide exceptionally thin glue between MongoDB
and PHP, implementing only fundamental and performance-critical components
necessary to build a fully-functional MongoDB driver.</description>
<lead>
<name>Jeremy Mikola</name>
<user>jmikola</user>
<email>jmikola@php.net</email>
<active>yes</active>
</lead>
<lead>
<name>Derick Rethans</name>
<user>derick</user>
<email>derick@php.net</email>
<active>yes</active>
</lead>
<lead>
<name>Hannes Magnusson</name>
<user>bjori</user>
<email>bjori@php.net</email>
<active>no</active>
</lead>
- <date>2017-12-01</date>
- <time>18:41:35</time>
+ <developer>
+ <name>Katherine Walker</name>
+ <user>kvwalker</user>
+ <email>kvwalker@php.net</email>
+ <active>yes</active>
+ </developer>
+ <date>2018-03-06</date>
+ <time>20:50:22</time>
<version>
- <release>1.3.4</release>
- <api>1.3.4</api>
+ <release>1.4.2</release>
+ <api>1.4.2</api>
</version>
<stability>
<release>stable</release>
<api>stable</api>
</stability>
<license uri="http://www.apache.org/licenses/LICENSE-2.0">Apache License</license>
<notes>
** Bug
- * [PHPC-1053] - UTCDateTime constructor reports that its single argument is required
+ * [PHPC-1128] - Aggregation pipelines are broken on sharded collections
+
+** Task
+ * [PHPC-962] - Document example of tailable cursor iteration
</notes>
<contents>
<dir name="/">
+ <file md5sum="ad8d52d54e0f97c0e4e385376ea73bc0" name="scripts/build/autotools/m4/pkg.m4" role="src" />
+ <file md5sum="3e75702248de395aa8c75f4c5ef09505" name="scripts/build/autotools/CheckHost.m4" role="src" />
+ <file md5sum="d26fc6e3ada2d01457096f4f29089c87" name="scripts/build/autotools/CheckSSL.m4" role="src" />
<file md5sum="b301ead064e031b76fa08488a1055594" name="scripts/centos/ldap/Domain.ldif" role="test" />
<file md5sum="4daa783214593b6d7deb42f35c6e027c" name="scripts/centos/ldap/Users.ldif" role="test" />
<file md5sum="001e9cfe2c64b4afe413c12da3a75103" name="scripts/centos/ldap/basics.ldif" role="test" />
<file md5sum="624db776065d2ce0e76c3dd252c86c27" name="scripts/centos/ldap/install.sh" role="test" />
<file md5sum="a36639292f69b5a3ed651ce2b91c9e51" name="scripts/centos/ldap/ldapconfig.py" role="test" />
<file md5sum="3373471c13615482fcb5abd156a99013" name="scripts/centos/ldap/mongod.ldif" role="test" />
<file md5sum="79641e9055dc9a4c810cdc580d420ccf" name="scripts/centos/ldap/pw.ldif" role="test" />
<file md5sum="7069ce3fbf9612eb20df4de56e2915e7" name="scripts/centos/ldap/saslauthd.conf" role="test" />
<file md5sum="c2d1c7b3b12d970c295ebceda4bd429f" name="scripts/centos/ldap/users" role="test" />
<file md5sum="9add0018a9ebebb32e6f9d689d53ce14" name="scripts/centos/essentials.sh" role="test" />
<file md5sum="43a925c212fc965e90d89951d04945c1" name="scripts/freebsd/essentials.sh" role="test" />
<file md5sum="18b03fd810bde00c7493002a94e24865" name="scripts/freebsd/phongo.sh" role="test" />
- <file md5sum="0e6d3baeb0ffedee6a4d5500a725331b" name="scripts/presets/replicaset-30.json" role="test" />
- <file md5sum="b5c14f08571fcfe2aa5641a7a18d4c3f" name="scripts/presets/replicaset.json" role="test" />
- <file md5sum="d850cacbf5527fd7b4a46958e66ed5c9" name="scripts/presets/standalone-24.json" role="test" />
- <file md5sum="d9b0cfc2035c3d5c0baae6959a90c36e" name="scripts/presets/standalone-26.json" role="test" />
+ <file md5sum="6f0781ed65c1dac12d95d690f0be44cb" name="scripts/presets/replicaset-30.json" role="test" />
+ <file md5sum="90e229f7bd0523f0dab36965f72c2671" name="scripts/presets/replicaset-dns.json" role="test" />
+ <file md5sum="9909d557e39b2e52aa2b64fb45a33801" name="scripts/presets/replicaset.json" role="test" />
<file md5sum="60c5e499c429eb07e1f96b37baa42adf" name="scripts/presets/standalone-30.json" role="test" />
<file md5sum="b8cc887e5bccc9bc1f715d041d2d657b" name="scripts/presets/standalone-auth.json" role="test" />
<file md5sum="2bc383470aa5dfc60185ef19a8a80276" name="scripts/presets/standalone-plain.json" role="test" />
- <file md5sum="82e65a04030826c41d713787833fb52a" name="scripts/presets/standalone-ssl.json" role="test" />
- <file md5sum="c5acd3fbc060927121ff626f3b730cfa" name="scripts/presets/standalone-x509.json" role="test" />
+ <file md5sum="b871fd425e7acdb38ef72ebb4017ce80" name="scripts/presets/standalone-ssl.json" role="test" />
+ <file md5sum="6bc26d82415e183545849d4b676bf3a2" name="scripts/presets/standalone-x509.json" role="test" />
<file md5sum="17927182a5fb8fb9d1ad8c8571c32b29" name="scripts/presets/standalone.json" role="test" />
<file md5sum="06b59105c25447470ca9f7c589b80b46" name="scripts/ssl/ca.pem" role="test" />
<file md5sum="6bdc883fdbe5a1c736518e5ec5f67964" name="scripts/ssl/client.pem" role="test" />
<file md5sum="94a4db9c47b58aa3d00b70b4bd178601" name="scripts/ssl/crl.pem" role="test" />
<file md5sum="bfccd366ca201a01f8f5bea8c900abc9" name="scripts/ssl/server.pem" role="test" />
<file md5sum="cb636b47cf37dace58be73272a74efc3" name="scripts/ubuntu/ldap/install.sh" role="test" />
<file md5sum="c4c1a6c234d983da8fe0382bcb8c1420" name="scripts/ubuntu/ldap/saslauthd.conf" role="test" />
<file md5sum="b199baa1ee52bc252773485de56b10f6" name="scripts/ubuntu/essentials.sh" role="test" />
- <file md5sum="b4a9eb39b992590f39b56e4fdbabe757" name="scripts/ubuntu/mongo-orchestration.sh" role="test" />
+ <file md5sum="e4b6ac22486545cec3268e13d5be9f0f" name="scripts/ubuntu/mongo-orchestration.sh" role="test" />
<file md5sum="903c38a7e9d59fec770c1138de85d7be" name="scripts/ubuntu/phongo.sh" role="test" />
<file md5sum="a51616233fb311f458391b668e0254e9" name="scripts/vmware/kernel.sh" role="test" />
- <file md5sum="9b14bbe9f6b4ada5e84f478e84805fa8" name="scripts/convert-bson-corpus-tests.php" role="test" />
+ <file md5sum="2592064a5a2770fd15c8f22dddb571ab" name="scripts/convert-bson-corpus-tests.php" role="test" />
<file md5sum="28868809c797d2b57b0f21bfc66b4862" name="scripts/convert-mo-tests.php" role="test" />
<file md5sum="35671ea9b24b02610710ef676bc539a2" name="scripts/list-servers.php" role="test" />
<file md5sum="59b4cf65e9412c174b2641a6a1602d95" name="scripts/run-tests-on.sh" role="test" />
- <file md5sum="43b39f67f12cacf6f21de522b51a2900" name="scripts/start-servers.php" role="test" />
+ <file md5sum="3c22b16954dc3a46f210457be026be52" name="scripts/start-servers.php" role="test" />
<file md5sum="d14a6269eb6e416a771a424f11756f8b" name="src/BSON/Binary.c" role="src" />
<file md5sum="7df993f9c816c4e09a58fc2e1816a7c3" name="src/BSON/BinaryInterface.c" role="src" />
+ <file md5sum="cf51c8132a17195040294badbbe67190" name="src/BSON/DBPointer.c" role="src" />
<file md5sum="55d044b0ff815cbdd0476fc2a734f115" name="src/BSON/Decimal128.c" role="src" />
<file md5sum="16d04e4ef3a1e2b15e69065fad890923" name="src/BSON/Decimal128Interface.c" role="src" />
<file md5sum="1bb08faf36b65492cd0a96659035c361" name="src/BSON/Javascript.c" role="src" />
<file md5sum="cafff6b5cc46e189b1fc888a669ec8be" name="src/BSON/JavascriptInterface.c" role="src" />
<file md5sum="eab435547b4f7c6f346d957b791e095a" name="src/BSON/MaxKey.c" role="src" />
<file md5sum="7a7b804261e8e7379f59766715e501e0" name="src/BSON/MaxKeyInterface.c" role="src" />
<file md5sum="b871a13de35771e56ee1051d9d3cb615" name="src/BSON/MinKey.c" role="src" />
<file md5sum="07ce2a7198ab852f320db1747abdca0f" name="src/BSON/MinKeyInterface.c" role="src" />
- <file md5sum="5ab0cf7a482da9442249640c36e5d329" name="src/BSON/ObjectId.c" role="src" />
+ <file md5sum="f9e48a6c7af62935f13514f2cc0fa741" name="src/BSON/ObjectId.c" role="src" />
<file md5sum="8e3e19e00d2d8b7ce998a965cf46e2b3" name="src/BSON/ObjectIdInterface.c" role="src" />
<file md5sum="0336ec56b66060a844a6b31a216d730c" name="src/BSON/Persistable.c" role="src" />
<file md5sum="843133a6ba2518faaff4fc785d76136b" name="src/BSON/Regex.c" role="src" />
<file md5sum="5180db0f96e6beb29fc3b772adb9158e" name="src/BSON/RegexInterface.c" role="src" />
<file md5sum="fb4b90f3e3f9bd1915bc9c7c4f2a1244" name="src/BSON/Serializable.c" role="src" />
- <file md5sum="41f94edc50bb66df105e4d2b3f1b7baa" name="src/BSON/Timestamp.c" role="src" />
+ <file md5sum="429914a3f3cb030d6cd932fbdb040546" name="src/BSON/Symbol.c" role="src" />
+ <file md5sum="9b41847ae96900bc80d8099a8e18bef9" name="src/BSON/Timestamp.c" role="src" />
<file md5sum="266d3c2484c57bf167265f05f4ad5af3" name="src/BSON/TimestampInterface.c" role="src" />
<file md5sum="17063cbd826534efd4b81788b6201be7" name="src/BSON/Type.c" role="src" />
- <file md5sum="ff48a4039584f325da612d6a4c0e16cc" name="src/BSON/UTCDateTime.c" role="src" />
+ <file md5sum="305d7433a2d5691a7cc0117c9c7d2a13" name="src/BSON/UTCDateTime.c" role="src" />
<file md5sum="a0cbd3acd8640bbd1fd0c1c115ce9fea" name="src/BSON/UTCDateTimeInterface.c" role="src" />
+ <file md5sum="1ace80dae779334beb1a742ce22df12a" name="src/BSON/Undefined.c" role="src" />
<file md5sum="39985e558097d1a920bbf72beacb2e3d" name="src/BSON/Unserializable.c" role="src" />
<file md5sum="0945986beea0d9d0713126d749210084" name="src/BSON/functions.c" role="src" />
<file md5sum="7a1b4345826f34b9f26c93333bf8ad49" name="src/BSON/functions.h" role="src" />
<file md5sum="74f5b1f8252bda44030eb0a86a35e2b7" name="src/MongoDB/Exception/AuthenticationException.c" role="src" />
<file md5sum="a55b2f9706714666458ee0f9064e3267" name="src/MongoDB/Exception/BulkWriteException.c" role="src" />
<file md5sum="3f8c706e90900defbd9ec044a6df0991" name="src/MongoDB/Exception/ConnectionException.c" role="src" />
<file md5sum="2f97221a8446686d69a6bc6f3cf0461a" name="src/MongoDB/Exception/ConnectionTimeoutException.c" role="src" />
<file md5sum="7bec617d060545019ce32c1d32d0d1b5" name="src/MongoDB/Exception/Exception.c" role="src" />
<file md5sum="88cec8fbb76fb9bbba9aa570051d56e7" name="src/MongoDB/Exception/ExecutionTimeoutException.c" role="src" />
<file md5sum="ff92a29b34c9488fa0c4d7fe1ff307e1" name="src/MongoDB/Exception/InvalidArgumentException.c" role="src" />
<file md5sum="49f24f546f72b5a3c42365acdb045021" name="src/MongoDB/Exception/LogicException.c" role="src" />
<file md5sum="c9bfbd21b54c406361be456ab651fff0" name="src/MongoDB/Exception/RuntimeException.c" role="src" />
<file md5sum="c3bef22695e437503db39f96af831b3d" name="src/MongoDB/Exception/SSLConnectionException.c" role="src" />
<file md5sum="ca86241ea9380ba759be1f0e9ed40389" name="src/MongoDB/Exception/UnexpectedValueException.c" role="src" />
<file md5sum="130065c832f252a8b190a8f1ec9cdecb" name="src/MongoDB/Exception/WriteException.c" role="src" />
- <file md5sum="c11a2d655636e86c62fd868156d4d7ae" name="src/MongoDB/Monitoring/CommandFailedEvent.c" role="src" />
+ <file md5sum="8cb50066ed7d907a1a1a385daae7d896" name="src/MongoDB/Monitoring/CommandFailedEvent.c" role="src" />
<file md5sum="c64fc99c9ab6828010adee8f860dfc6c" name="src/MongoDB/Monitoring/CommandStartedEvent.c" role="src" />
<file md5sum="0866a6818df23aece2d32318b6e29655" name="src/MongoDB/Monitoring/CommandSubscriber.c" role="src" />
- <file md5sum="8d51f4c1a530b9bc11566c55d8b95ca2" name="src/MongoDB/Monitoring/CommandSucceededEvent.c" role="src" />
+ <file md5sum="6ea74897444e93b5fff349e4d623d820" name="src/MongoDB/Monitoring/CommandSucceededEvent.c" role="src" />
<file md5sum="faeedc19cbfadcf784a090c5d78916b6" name="src/MongoDB/Monitoring/Subscriber.c" role="src" />
<file md5sum="5b178b398d8e8df37173904878702c2e" name="src/MongoDB/Monitoring/functions.c" role="src" />
<file md5sum="96694180b415de1ecec41c7ae9dc3a44" name="src/MongoDB/Monitoring/functions.h" role="src" />
- <file md5sum="c06fc2f8e7c1bde733f983aa5b4c86bb" name="src/MongoDB/BulkWrite.c" role="src" />
- <file md5sum="77158a16551affce6f281d0d88e350f1" name="src/MongoDB/Command.c" role="src" />
- <file md5sum="82abd413563a99b2ea92b821d4e8dd20" name="src/MongoDB/Cursor.c" role="src" />
+ <file md5sum="a080e8cf1bab6dff209a08e69f8bffda" name="src/MongoDB/BulkWrite.c" role="src" />
+ <file md5sum="68fe0468e54c982bc862c7619f49a33c" name="src/MongoDB/Command.c" role="src" />
+ <file md5sum="eaa08ca3274a377689b8e3a2d84243f8" name="src/MongoDB/Cursor.c" role="src" />
<file md5sum="2e90713814d53608c141a17ad77610c2" name="src/MongoDB/CursorId.c" role="src" />
- <file md5sum="58bd04a563f18dca12e38198215ead02" name="src/MongoDB/Manager.c" role="src" />
- <file md5sum="bdad9c7bc1a96275b4afd01a46ccb2fc" name="src/MongoDB/Query.c" role="src" />
- <file md5sum="09f90c52a90e34666f2a70313c90d71a" name="src/MongoDB/ReadConcern.c" role="src" />
- <file md5sum="7c515359c6e834c3f5e647f029ee97b0" name="src/MongoDB/ReadPreference.c" role="src" />
- <file md5sum="64e8f8a17378421b018bf380e3d9e41f" name="src/MongoDB/Server.c" role="src" />
- <file md5sum="8230944f5109e5ff565bdd4fb88efd93" name="src/MongoDB/WriteConcern.c" role="src" />
+ <file md5sum="1557a3fc2ea8bdb1c181907ad75a3714" name="src/MongoDB/Manager.c" role="src" />
+ <file md5sum="56bf7a148731f00d13f18e9653d7ca78" name="src/MongoDB/Query.c" role="src" />
+ <file md5sum="4f1449f73c2dc3ba49e565df55714417" name="src/MongoDB/ReadConcern.c" role="src" />
+ <file md5sum="62314461edce88f6acf9441d3c85d7d5" name="src/MongoDB/ReadPreference.c" role="src" />
+ <file md5sum="11cca8e6e68cc824d040a67e87a357d4" name="src/MongoDB/Server.c" role="src" />
+ <file md5sum="357dbd43dabe879c9f190cd89e466274" name="src/MongoDB/Session.c" role="src" />
+ <file md5sum="72184b9958399309260d96db026eede6" name="src/MongoDB/WriteConcern.c" role="src" />
<file md5sum="9a8c6bc1bcd40f41c66dbc5ff158eae4" name="src/MongoDB/WriteConcernError.c" role="src" />
<file md5sum="18d550e20c3ee1c61c0c72c3076b507c" name="src/MongoDB/WriteError.c" role="src" />
<file md5sum="f1a59d89c0aed68b9d3f8882f79254af" name="src/MongoDB/WriteResult.c" role="src" />
<file md5sum="d159204be1f00a1da4bfceb3de74127a" name="src/contrib/php_array_api.h" role="src" />
<file md5sum="1066d5c9a05b1b1cb8f4de5a7e291574" name="src/libbson/build/autotools/m4/ac_check_typedef.m4" role="src" />
- <file md5sum="e40a5f02f4d82a68b3df43d99dd92ed6" name="src/libbson/build/autotools/m4/ac_compile_check_sizeof.m4" role="src" />
+ <file md5sum="3b18aa7bc3e6b3154812f31409cfd243" name="src/libbson/build/autotools/m4/ac_compile_check_sizeof.m4" role="src" />
<file md5sum="b96bb8fad4ea5d66c6b3af718186e2e0" name="src/libbson/build/autotools/m4/ac_create_stdint_h.m4" role="src" />
<file md5sum="7b5ee49855c90a9da6a0b1f1f0b8ef8f" name="src/libbson/build/autotools/m4/as-compiler-flag.m4" role="src" />
<file md5sum="8d942f69b5f3c15ecae4b75bb7e80614" name="src/libbson/build/autotools/m4/ax_check_compile_flag.m4" role="src" />
<file md5sum="05bd24609268702a36d50d5edd3661ac" name="src/libbson/build/autotools/m4/ax_check_link_flag.m4" role="src" />
<file md5sum="b5114dfcf027b0f9a47b6e6841015be6" name="src/libbson/build/autotools/m4/ax_pthread.m4" role="src" />
<file md5sum="ad8d52d54e0f97c0e4e385376ea73bc0" name="src/libbson/build/autotools/m4/pkg.m4" role="src" />
<file md5sum="ff788a8b5eea35b1cc226bb2686c3537" name="src/libbson/build/autotools/m4/silent.m4" role="src" />
<file md5sum="c78c5e9b49b67a725e831823492642da" name="src/libbson/build/autotools/CheckAtomics.m4" role="src" />
<file md5sum="4cb0e36b71da6fefc55aaa925a7ed090" name="src/libbson/build/autotools/CheckCompiler.m4" role="src" />
<file md5sum="7321a188b1b870fb49f5a003ef426f7a" name="src/libbson/build/autotools/CheckHeaders.m4" role="src" />
<file md5sum="81b2b641c597d18daa4066ed7c37d7b7" name="src/libbson/build/autotools/CheckHost.m4" role="src" />
<file md5sum="0a6502f6d302fb02d36edbbdda12e869" name="src/libbson/build/autotools/CheckProgs.m4" role="src" />
<file md5sum="fa4a5cc1c9475999c468488eb19e4ffe" name="src/libbson/build/autotools/CheckTarget.m4" role="src" />
<file md5sum="760f59b300da59088cde1fec10892516" name="src/libbson/build/autotools/Coverage.m4" role="src" />
<file md5sum="46768d326c5f994af7e1ffbb0db32a4e" name="src/libbson/build/autotools/Endian.m4" role="src" />
- <file md5sum="690ddc4c25caf6c265a186e491b757dc" name="src/libbson/build/autotools/FindDependencies.m4" role="src" />
+ <file md5sum="17dc7a49c79ce5f44419312740f742fe" name="src/libbson/build/autotools/FindDependencies.m4" role="src" />
<file md5sum="35896e731d3e5cf5c43bbf0f692ff4d0" name="src/libbson/build/autotools/MaintainerFlags.m4" role="src" />
<file md5sum="6a2e174f53c50c3bddfb31444cf24174" name="src/libbson/build/autotools/Optimizations.m4" role="src" />
<file md5sum="98c586cc0577a30efcf22b03eca1e35e" name="src/libbson/build/autotools/PrintBuildConfiguration.m4" role="src" />
<file md5sum="b9f914f2e4297c914e0bc543ff2e752d" name="src/libbson/build/autotools/ReadCommandLineArguments.m4" role="src" />
<file md5sum="2ceb791b5f1e651cf2c9a90fd0745fc6" name="src/libbson/build/autotools/SetupAutomake.m4" role="src" />
<file md5sum="f229599b4333a2cdf5be29136cc6f2e2" name="src/libbson/build/autotools/SetupLibtool.m4" role="src" />
<file md5sum="11f96f71f11600b5fa8c9e51ae5b3e2b" name="src/libbson/build/autotools/Versions.m4" role="src" />
<file md5sum="35179115716a34618f54785afcd1dc2e" name="src/libbson/src/bson/b64_ntop.h" role="src" />
<file md5sum="aca78bd4d4f53250b59f27ceda49c1d3" name="src/libbson/src/bson/b64_pton.h" role="src" />
<file md5sum="99a683c36efadf01a789a04a267e818a" name="src/libbson/src/bson/bcon.c" role="src" />
<file md5sum="1d0147953321dbbbde6a710190a9e750" name="src/libbson/src/bson/bcon.h" role="src" />
<file md5sum="d56b6329106f234baf874a951bac928c" name="src/libbson/src/bson/bson-atomic.c" role="src" />
<file md5sum="5cf09303f44eb31e457bee6909660f45" name="src/libbson/src/bson/bson-atomic.h" role="src" />
<file md5sum="1c2e6bba1496dad7ce787f3f28f018f4" name="src/libbson/src/bson/bson-clock.c" role="src" />
<file md5sum="d21986aed6bc60037d7181385c4309e1" name="src/libbson/src/bson/bson-clock.h" role="src" />
- <file md5sum="d16a9e1a57cf9d8df9d1dd76a501e4cc" name="src/libbson/src/bson/bson-compat.h" role="src" />
- <file md5sum="a1a6423760ec9ae584324ecfa785a254" name="src/libbson/src/bson/bson-config.h" role="src" />
- <file md5sum="3227d326939c834044a05da8f9ff4b98" name="src/libbson/src/bson/bson-config.h.in" role="src" />
- <file md5sum="7c83d5bfb328b194fd2db6bab7258d2d" name="src/libbson/src/bson/bson-context-private.h" role="src" />
- <file md5sum="1a5bdcb82012de030330f1b37f01712a" name="src/libbson/src/bson/bson-context.c" role="src" />
+ <file md5sum="e21daae4e12e4fb8602f2e66a5850d8e" name="src/libbson/src/bson/bson-compat.h" role="src" />
+ <file md5sum="728eede6ab691a526eb073d862ecb6ab" name="src/libbson/src/bson/bson-config.h" role="src" />
+ <file md5sum="5ba0d121b6090ad42acd3038842875ba" name="src/libbson/src/bson/bson-config.h.in" role="src" />
+ <file md5sum="d12747c1a3b3ab5dd6500350efb8c46c" name="src/libbson/src/bson/bson-context-private.h" role="src" />
+ <file md5sum="5a501e0e1bf9b5c74967d25447d70860" name="src/libbson/src/bson/bson-context.c" role="src" />
<file md5sum="0ce7235528898e9386eee49f2fbedf72" name="src/libbson/src/bson/bson-context.h" role="src" />
- <file md5sum="7cd1fd9fd724a8b963f273fe54136fe5" name="src/libbson/src/bson/bson-decimal128.c" role="src" />
+ <file md5sum="ffd07c2cf3711aa6ed2ce6e0f59018d2" name="src/libbson/src/bson/bson-decimal128.c" role="src" />
<file md5sum="defa133b608377cedca59d2a10025861" name="src/libbson/src/bson/bson-decimal128.h" role="src" />
- <file md5sum="f810c3173565d1a4226924fa96cc4fb4" name="src/libbson/src/bson/bson-endian.h" role="src" />
+ <file md5sum="ad8560dbd09529645f9e2468f563bdf5" name="src/libbson/src/bson/bson-endian.h" role="src" />
<file md5sum="4f643560fbb549b900ea02d2a7b34484" name="src/libbson/src/bson/bson-error.c" role="src" />
<file md5sum="5630664f91304e1b8a3045eb643028f1" name="src/libbson/src/bson/bson-error.h" role="src" />
<file md5sum="0e9bf56e26c2c47bcdf903008965d1dc" name="src/libbson/src/bson/bson-iso8601-private.h" role="src" />
<file md5sum="c548fa583ea182327feecccff1305387" name="src/libbson/src/bson/bson-iso8601.c" role="src" />
- <file md5sum="d757d780b373f02085159851ebcceaa1" name="src/libbson/src/bson/bson-iter.c" role="src" />
+ <file md5sum="5e9e5863daed6d977eace14358e76674" name="src/libbson/src/bson/bson-iter.c" role="src" />
<file md5sum="dd83170ce4318311ab54b2b119353a6f" name="src/libbson/src/bson/bson-iter.h" role="src" />
- <file md5sum="29cc4a28ea2371840e2b7dea19446f3a" name="src/libbson/src/bson/bson-json.c" role="src" />
+ <file md5sum="623f1f8e31e5634cfa2763e8b3ff7b84" name="src/libbson/src/bson/bson-json.c" role="src" />
<file md5sum="4abec4ba499934eb616cec886b76ce87" name="src/libbson/src/bson/bson-json.h" role="src" />
<file md5sum="5addb8761faac2e8d52f4ded8ecdc0c0" name="src/libbson/src/bson/bson-keys.c" role="src" />
<file md5sum="48b815b8961a8eb65578415eb1a0713c" name="src/libbson/src/bson/bson-keys.h" role="src" />
- <file md5sum="e67d5f8b8b0e0f36dbfbb44b14b8eab0" name="src/libbson/src/bson/bson-macros.h" role="src" />
+ <file md5sum="f17b60322e01579527eaaa6697de96d4" name="src/libbson/src/bson/bson-macros.h" role="src" />
<file md5sum="655fcba49b9204462e57fdbc1cb68f36" name="src/libbson/src/bson/bson-md5.c" role="src" />
<file md5sum="5185c3fde67a07b5c7b3a7e3645dfa68" name="src/libbson/src/bson/bson-md5.h" role="src" />
<file md5sum="e70000535f99e5069d962d2626d218fc" name="src/libbson/src/bson/bson-memory.c" role="src" />
<file md5sum="68ae6217ff81f89a9828b4f89826bdc6" name="src/libbson/src/bson/bson-memory.h" role="src" />
<file md5sum="26412140d8bb2423e3c237762a54287a" name="src/libbson/src/bson/bson-oid.c" role="src" />
<file md5sum="d00b107347be18f9f734b16f9139ee76" name="src/libbson/src/bson/bson-oid.h" role="src" />
- <file md5sum="df1003919fbc78f442e20f2d67210b3e" name="src/libbson/src/bson/bson-private.h" role="src" />
+ <file md5sum="52b1a33ea7176b3b1a3c8b600330ee03" name="src/libbson/src/bson/bson-private.h" role="src" />
<file md5sum="e3ac042bb90dcc74a1ae9f91ec1e4eab" name="src/libbson/src/bson/bson-reader.c" role="src" />
<file md5sum="f12eccf815f6a0d7b29a8626f8308262" name="src/libbson/src/bson/bson-reader.h" role="src" />
<file md5sum="24ea8bc94dbb3b4fe28454b79c203daa" name="src/libbson/src/bson/bson-stdint-win32.h" role="src" />
- <file md5sum="60c6c3ec0213fa427f04e185942e059d" name="src/libbson/src/bson/bson-stdint.h" role="src" />
+ <file md5sum="192287aa47e5e95b1cd8e6640c0ac459" name="src/libbson/src/bson/bson-stdint.h" role="src" />
<file md5sum="779e4a3ae6dac33b13c6db2aab7f23d6" name="src/libbson/src/bson/bson-string.c" role="src" />
<file md5sum="41165866f48ddd46854141731aa0de4e" name="src/libbson/src/bson/bson-string.h" role="src" />
<file md5sum="3bbd698645cbf18ca5d506143596b9e5" name="src/libbson/src/bson/bson-thread-private.h" role="src" />
<file md5sum="5de0e6d0924140172b760b2eac696d70" name="src/libbson/src/bson/bson-timegm-private.h" role="src" />
<file md5sum="cf1148af5b260d787b5a8de5ba82de77" name="src/libbson/src/bson/bson-timegm.c" role="src" />
- <file md5sum="6dd71b19418e554c1d79a449f9bf9905" name="src/libbson/src/bson/bson-types.h" role="src" />
- <file md5sum="a899768438fe70effd3b3a4bad2d7e15" name="src/libbson/src/bson/bson-utf8.c" role="src" />
+ <file md5sum="6b218491ee85babf32f29c1d1fc5ec5b" name="src/libbson/src/bson/bson-types.h" role="src" />
+ <file md5sum="358a642772ea96fbf58fbd958571f4cf" name="src/libbson/src/bson/bson-utf8.c" role="src" />
<file md5sum="150695a215cba353bcbabe624e99079e" name="src/libbson/src/bson/bson-utf8.h" role="src" />
<file md5sum="7e401956be29c9120b710f29b8025a01" name="src/libbson/src/bson/bson-value.c" role="src" />
<file md5sum="5b35eb0ca414856e3fb63ecc2ac2e93e" name="src/libbson/src/bson/bson-value.h" role="src" />
<file md5sum="0bcfe98e683ae4fa5a6321fd6cafd9cd" name="src/libbson/src/bson/bson-version-functions.c" role="src" />
<file md5sum="db11f3671abe92f321d610ff164bbba3" name="src/libbson/src/bson/bson-version-functions.h" role="src" />
- <file md5sum="938dcda562e819c6637c977d75b7144c" name="src/libbson/src/bson/bson-version.h" role="src" />
+ <file md5sum="2e027e2fb82a7becb858a25824a447fc" name="src/libbson/src/bson/bson-version.h" role="src" />
<file md5sum="9036341d3488d5aec2b028fae66c8c6f" name="src/libbson/src/bson/bson-version.h.in" role="src" />
<file md5sum="64d0f30e1a6a512ce152af3dae1263e1" name="src/libbson/src/bson/bson-writer.c" role="src" />
<file md5sum="ad34cf6fe3c6f7ec508501b7a16b8555" name="src/libbson/src/bson/bson-writer.h" role="src" />
- <file md5sum="fe99d7148748772118e353440c742d5a" name="src/libbson/src/bson/bson.c" role="src" />
+ <file md5sum="6c03f2675ae52a8a76b007c6e7adbc16" name="src/libbson/src/bson/bson.c" role="src" />
<file md5sum="1faa57e8eae6663c9afc8282ffdf353a" name="src/libbson/src/bson/bson.h" role="src" />
<file md5sum="913a26899a259e2365a8d7dce81fae36" name="src/libbson/src/jsonsl/jsonsl.c" role="src" />
<file md5sum="4caafeb1cc1bb7f3561c5b51635d98cc" name="src/libbson/src/jsonsl/jsonsl.h" role="src" />
- <file md5sum="b14b05b0fd5ab1c867397927b67c1bd3" name="src/libbson/VERSION_CURRENT" role="src" />
- <file md5sum="b14b05b0fd5ab1c867397927b67c1bd3" name="src/libbson/VERSION_RELEASED" role="src" />
+ <file md5sum="3f1b82dd2ff6d570bac503930a74e609" name="src/libbson/VERSION_CURRENT" role="src" />
+ <file md5sum="3f1b82dd2ff6d570bac503930a74e609" name="src/libbson/VERSION_RELEASED" role="src" />
<file md5sum="1066d5c9a05b1b1cb8f4de5a7e291574" name="src/libmongoc/build/autotools/m4/ac_check_typedef.m4" role="src" />
<file md5sum="e40a5f02f4d82a68b3df43d99dd92ed6" name="src/libmongoc/build/autotools/m4/ac_compile_check_sizeof.m4" role="src" />
<file md5sum="b96bb8fad4ea5d66c6b3af718186e2e0" name="src/libmongoc/build/autotools/m4/ac_create_stdint_h.m4" role="src" />
<file md5sum="7b5ee49855c90a9da6a0b1f1f0b8ef8f" name="src/libmongoc/build/autotools/m4/as-compiler-flag.m4" role="src" />
<file md5sum="8d942f69b5f3c15ecae4b75bb7e80614" name="src/libmongoc/build/autotools/m4/ax_check_compile_flag.m4" role="src" />
<file md5sum="05bd24609268702a36d50d5edd3661ac" name="src/libmongoc/build/autotools/m4/ax_check_link_flag.m4" role="src" />
<file md5sum="c2221efd4309e58ff7e2ef989c8e8ac4" name="src/libmongoc/build/autotools/m4/ax_prototype.m4" role="src" />
<file md5sum="5535e823ec44002b522873b9c0e2e0bf" name="src/libmongoc/build/autotools/m4/ax_pthread.m4" role="src" />
<file md5sum="ad8d52d54e0f97c0e4e385376ea73bc0" name="src/libmongoc/build/autotools/m4/pkg.m4" role="src" />
<file md5sum="ff788a8b5eea35b1cc226bb2686c3537" name="src/libmongoc/build/autotools/m4/silent.m4" role="src" />
<file md5sum="7c20548ba18d4b99f89203d853809bfc" name="src/libmongoc/build/autotools/AutomaticInitAndCleanup.m4" role="src" />
<file md5sum="3480ac890714989f605b6d61c8708401" name="src/libmongoc/build/autotools/CheckCompiler.m4" role="src" />
<file md5sum="f1f5b2f380cdf6c555f360c234aeca3a" name="src/libmongoc/build/autotools/CheckHost.m4" role="src" />
<file md5sum="35ce193f4cf9dbfcdd1627163c75cc71" name="src/libmongoc/build/autotools/CheckProgs.m4" role="src" />
<file md5sum="70e9ccaf09eb1ec462e3bd431d5a4dcf" name="src/libmongoc/build/autotools/CheckSSL.m4" role="src" />
<file md5sum="4a5dabcbff686b0b144aed72a30a9cd5" name="src/libmongoc/build/autotools/CheckSasl.m4" role="src" />
- <file md5sum="8c49a0bd38c73f5841d0030bfa0f3ef4" name="src/libmongoc/build/autotools/CheckSnappy.m4" role="src" />
+ <file md5sum="aaf9c4e52c4ee1c58acf172ce10e71ca" name="src/libmongoc/build/autotools/CheckSnappy.m4" role="src" />
<file md5sum="fa4a5cc1c9475999c468488eb19e4ffe" name="src/libmongoc/build/autotools/CheckTarget.m4" role="src" />
- <file md5sum="9a3a05a5685775347460840adb9c4486" name="src/libmongoc/build/autotools/CheckZlib.m4" role="src" />
+ <file md5sum="3f4f21d64e5461eeff4e5b9b3d73e5eb" name="src/libmongoc/build/autotools/CheckZlib.m4" role="src" />
<file md5sum="760f59b300da59088cde1fec10892516" name="src/libmongoc/build/autotools/Coverage.m4" role="src" />
<file md5sum="4a237ac84b7e32b71012b7500d625a00" name="src/libmongoc/build/autotools/FindDependencies.m4" role="src" />
+ <file md5sum="9f2d233fe8895be1e5eec46671cdb8fb" name="src/libmongoc/build/autotools/FindResSearch.m4" role="src" />
<file md5sum="a8977370dbf28d606e4a6628bba94fca" name="src/libmongoc/build/autotools/Libbson.m4" role="src" />
<file md5sum="35896e731d3e5cf5c43bbf0f692ff4d0" name="src/libmongoc/build/autotools/MaintainerFlags.m4" role="src" />
<file md5sum="4628165f19e500c1f48dc94ee6442f87" name="src/libmongoc/build/autotools/Optimizations.m4" role="src" />
<file md5sum="46230e2d1a5a66ed0fd493329f453ad3" name="src/libmongoc/build/autotools/PlatformFlags.m4" role="src" />
- <file md5sum="5feb147681d6ac164a81301ad514c041" name="src/libmongoc/build/autotools/PrintBuildConfiguration.m4" role="src" />
- <file md5sum="2af8294d29c13625ff7dd4eece5acfcb" name="src/libmongoc/build/autotools/ReadCommandLineArguments.m4" role="src" />
+ <file md5sum="8addda99bd6e21b41d19fa80d25334f2" name="src/libmongoc/build/autotools/PrintBuildConfiguration.m4" role="src" />
+ <file md5sum="e1cf39a64c221b9998e084bb4f45528d" name="src/libmongoc/build/autotools/ReadCommandLineArguments.m4" role="src" />
<file md5sum="36ecefb0f36ef55cdca6867d27640a70" name="src/libmongoc/build/autotools/SetupAutomake.m4" role="src" />
<file md5sum="f229599b4333a2cdf5be29136cc6f2e2" name="src/libmongoc/build/autotools/SetupLibtool.m4" role="src" />
<file md5sum="7147aed24085b6842f1032bc34e89045" name="src/libmongoc/build/autotools/Versions.m4" role="src" />
<file md5sum="76e2c1a2aa19f5fab3661c992ac603fa" name="src/libmongoc/build/autotools/WeakSymbols.m4" role="src" />
- <file md5sum="ea2e2423008beb81a1cf53f40bdbe16c" name="src/libmongoc/src/mongoc/mongoc-apm-private.h" role="src" />
- <file md5sum="74f1108d26c0f509868694bb493f8a02" name="src/libmongoc/src/mongoc/mongoc-apm.c" role="src" />
+ <file md5sum="9de9a78644f4100f71b9ec49b36196be" name="src/libmongoc/src/mongoc/mongoc-apm-private.h" role="src" />
+ <file md5sum="ce8cdd246f972fc1f662cc8c8202d171" name="src/libmongoc/src/mongoc/mongoc-apm.c" role="src" />
<file md5sum="207fee8a513629bd31565b116747de5c" name="src/libmongoc/src/mongoc/mongoc-apm.h" role="src" />
<file md5sum="bd11e316611f37bf4fd78322cd317583" name="src/libmongoc/src/mongoc/mongoc-array-private.h" role="src" />
<file md5sum="475df6bf5cd4e55e0d4b26db29ea71fc" name="src/libmongoc/src/mongoc/mongoc-array.c" role="src" />
<file md5sum="a3a4317be8be49e3c2b5feedf4baea6d" name="src/libmongoc/src/mongoc/mongoc-async-cmd-private.h" role="src" />
<file md5sum="ca55b22165d67fe66219e4844ca6a35a" name="src/libmongoc/src/mongoc/mongoc-async-cmd.c" role="src" />
<file md5sum="fcd024016dd5159dd106bca4fafe4d1f" name="src/libmongoc/src/mongoc/mongoc-async-private.h" role="src" />
<file md5sum="988dc35e9f8d4fd8e173a7eed16766ba" name="src/libmongoc/src/mongoc/mongoc-async.c" role="src" />
<file md5sum="b53def4c8da742e64018a45a4d15c36b" name="src/libmongoc/src/mongoc/mongoc-b64-private.h" role="src" />
<file md5sum="82a5c521649fe2860c1d3326adc3a826" name="src/libmongoc/src/mongoc/mongoc-b64.c" role="src" />
- <file md5sum="3d51cd816b3ffb6b465f4b7816c1ffbc" name="src/libmongoc/src/mongoc/mongoc-buffer-private.h" role="src" />
- <file md5sum="0025869fd12130777fcafa96cff8588f" name="src/libmongoc/src/mongoc/mongoc-buffer.c" role="src" />
- <file md5sum="722d1792405f3a2c9cde281dc753de54" name="src/libmongoc/src/mongoc/mongoc-bulk-operation-private.h" role="src" />
- <file md5sum="6f28ff3686df3ce60d9955997dab83ae" name="src/libmongoc/src/mongoc/mongoc-bulk-operation.c" role="src" />
- <file md5sum="b18326ba0bbb5122e2ef51563c4d2f0c" name="src/libmongoc/src/mongoc/mongoc-bulk-operation.h" role="src" />
+ <file md5sum="8c7d67dca2b0adb3943c5626e2ab1445" name="src/libmongoc/src/mongoc/mongoc-buffer-private.h" role="src" />
+ <file md5sum="4c40d9be4b083e48429629bbf922d277" name="src/libmongoc/src/mongoc/mongoc-buffer.c" role="src" />
+ <file md5sum="ce7e365452f983071961183b5dd0e1b9" name="src/libmongoc/src/mongoc/mongoc-bulk-operation-private.h" role="src" />
+ <file md5sum="9fb3b5540b465609217db04890e026f9" name="src/libmongoc/src/mongoc/mongoc-bulk-operation.c" role="src" />
+ <file md5sum="370d2e2fd9c1cdc33e4770c24016f7c2" name="src/libmongoc/src/mongoc/mongoc-bulk-operation.h" role="src" />
+ <file md5sum="59d433001a69344ff5ec658c42045680" name="src/libmongoc/src/mongoc/mongoc-change-stream-private.h" role="src" />
+ <file md5sum="3d0beb7924844d5d544d5edeb9f9960d" name="src/libmongoc/src/mongoc/mongoc-change-stream.c" role="src" />
+ <file md5sum="b9911cf65210794de62cf204f5e72b1a" name="src/libmongoc/src/mongoc/mongoc-change-stream.h" role="src" />
<file md5sum="8029211c59091738fac377bb832c4587" name="src/libmongoc/src/mongoc/mongoc-client-pool-private.h" role="src" />
- <file md5sum="5c96ea67c95255f3d7776612f1630faf" name="src/libmongoc/src/mongoc/mongoc-client-pool.c" role="src" />
- <file md5sum="5ed58d3f17f3f9ee6c316e15a2462f7e" name="src/libmongoc/src/mongoc/mongoc-client-pool.h" role="src" />
- <file md5sum="10856dad9ac49bf8c363d6cad4a100d8" name="src/libmongoc/src/mongoc/mongoc-client-private.h" role="src" />
- <file md5sum="60c8defe98b9452e5150c7650bba3fec" name="src/libmongoc/src/mongoc/mongoc-client.c" role="src" />
- <file md5sum="dc0a933b77f6085327f188e47e274010" name="src/libmongoc/src/mongoc/mongoc-client.h" role="src" />
- <file md5sum="0bd9be299caf9f263d94ed7552c3de6f" name="src/libmongoc/src/mongoc/mongoc-cluster-cyrus-private.h" role="src" />
- <file md5sum="c06fdd5e03e44059c8710c3989be445b" name="src/libmongoc/src/mongoc/mongoc-cluster-cyrus.c" role="src" />
- <file md5sum="0ea28874f2586a7776308f24c50f84e2" name="src/libmongoc/src/mongoc/mongoc-cluster-gssapi-private.h" role="src" />
- <file md5sum="d18dc845f402ee97d24fb80d55fb3c80" name="src/libmongoc/src/mongoc/mongoc-cluster-gssapi.c" role="src" />
- <file md5sum="e24006fc3a96d3f2a75bdbcfd28feeb2" name="src/libmongoc/src/mongoc/mongoc-cluster-private.h" role="src" />
- <file md5sum="7472ad5e2a3213da913dbc44ddbd8a5e" name="src/libmongoc/src/mongoc/mongoc-cluster-sasl-private.h" role="src" />
- <file md5sum="27cedac4a829541e02757a926b20fca5" name="src/libmongoc/src/mongoc/mongoc-cluster-sasl.c" role="src" />
- <file md5sum="60a1f3b8eda853e0d9cec76a2da75675" name="src/libmongoc/src/mongoc/mongoc-cluster-sspi-private.h" role="src" />
- <file md5sum="070def04eb71e7f5b7cf62c617342a07" name="src/libmongoc/src/mongoc/mongoc-cluster-sspi.c" role="src" />
- <file md5sum="614991f2da444cb2455d52c74aa8077d" name="src/libmongoc/src/mongoc/mongoc-cluster.c" role="src" />
- <file md5sum="1d1dbda12341f4664ef40a3914d375e1" name="src/libmongoc/src/mongoc/mongoc-cmd-private.h" role="src" />
- <file md5sum="a1dedaa7ea1be26a84a0a337991a9bed" name="src/libmongoc/src/mongoc/mongoc-cmd.c" role="src" />
- <file md5sum="5c498ffb859a099b392ce32633578ab6" name="src/libmongoc/src/mongoc/mongoc-collection-private.h" role="src" />
- <file md5sum="826bcef10c6f5b85ff3c0d1d8b676e16" name="src/libmongoc/src/mongoc/mongoc-collection.c" role="src" />
- <file md5sum="8d6931dbc0b54428419691902f41f7db" name="src/libmongoc/src/mongoc/mongoc-collection.h" role="src" />
+ <file md5sum="41347d42f40998b1f5152a69989f0330" name="src/libmongoc/src/mongoc/mongoc-client-pool.c" role="src" />
+ <file md5sum="a40d76b30d937b0ee28faf6dbec61820" name="src/libmongoc/src/mongoc/mongoc-client-pool.h" role="src" />
+ <file md5sum="e3525b6193720c353e169ae57d7d09dd" name="src/libmongoc/src/mongoc/mongoc-client-private.h" role="src" />
+ <file md5sum="6b7b8729aada78dd70501ba2ffd6dda7" name="src/libmongoc/src/mongoc/mongoc-client-session-private.h" role="src" />
+ <file md5sum="582f76234bf790b160386b936e7301ee" name="src/libmongoc/src/mongoc/mongoc-client-session.c" role="src" />
+ <file md5sum="507ecc13b4b765831f8d0cd808beaa4c" name="src/libmongoc/src/mongoc/mongoc-client-session.h" role="src" />
+ <file md5sum="c219f56bcb2f06812509ca42ca5214b3" name="src/libmongoc/src/mongoc/mongoc-client.c" role="src" />
+ <file md5sum="71a318d4cf8fd9ee0f6ff30976c9a679" name="src/libmongoc/src/mongoc/mongoc-client.h" role="src" />
+ <file md5sum="0f7266845534239e1ca200943e20b203" name="src/libmongoc/src/mongoc/mongoc-cluster-cyrus-private.h" role="src" />
+ <file md5sum="ad7f5316b39288ac93d7fc6b53e89388" name="src/libmongoc/src/mongoc/mongoc-cluster-cyrus.c" role="src" />
+ <file md5sum="95927bceb030f4cbadeb4cae8d91ffc6" name="src/libmongoc/src/mongoc/mongoc-cluster-gssapi-private.h" role="src" />
+ <file md5sum="aa7ed0c6f7762d8f9a72543f21672887" name="src/libmongoc/src/mongoc/mongoc-cluster-gssapi.c" role="src" />
+ <file md5sum="490f4a45a39c15e1a3b56981ff85ff2f" name="src/libmongoc/src/mongoc/mongoc-cluster-private.h" role="src" />
+ <file md5sum="ca236c4b650901b82abdf1692702e566" name="src/libmongoc/src/mongoc/mongoc-cluster-sasl-private.h" role="src" />
+ <file md5sum="14da691f7bd13c067d424245ee9280da" name="src/libmongoc/src/mongoc/mongoc-cluster-sasl.c" role="src" />
+ <file md5sum="8b425eab41ba9ce67e9de23201a08794" name="src/libmongoc/src/mongoc/mongoc-cluster-sspi-private.h" role="src" />
+ <file md5sum="5cc55741170453ae77f875f4a00b1756" name="src/libmongoc/src/mongoc/mongoc-cluster-sspi.c" role="src" />
+ <file md5sum="7e8b4c7ba168a6f890fb206b56e17790" name="src/libmongoc/src/mongoc/mongoc-cluster.c" role="src" />
+ <file md5sum="65291d503906e4b5641e65bc5fadc855" name="src/libmongoc/src/mongoc/mongoc-cmd-private.h" role="src" />
+ <file md5sum="5df1b412434e1893655d22b193bf2e17" name="src/libmongoc/src/mongoc/mongoc-cmd.c" role="src" />
+ <file md5sum="c65e1b8be72426169d1485bafe62ef67" name="src/libmongoc/src/mongoc/mongoc-collection-private.h" role="src" />
+ <file md5sum="53ad91f98dfd4d35e117ac4ddea0cfd5" name="src/libmongoc/src/mongoc/mongoc-collection.c" role="src" />
+ <file md5sum="d6dd8c32d0da3935704c95923fa2308e" name="src/libmongoc/src/mongoc/mongoc-collection.h" role="src" />
<file md5sum="0719eeb539261f693d9f82e7b30ef5a4" name="src/libmongoc/src/mongoc/mongoc-compression-private.h" role="src" />
- <file md5sum="e33f59b808c10538815f6880d9cdfae6" name="src/libmongoc/src/mongoc/mongoc-compression.c" role="src" />
- <file md5sum="f997cfd5a1f8bca51447d55feaeedbaa" name="src/libmongoc/src/mongoc/mongoc-config.h" role="src" />
- <file md5sum="c9ac46af8ebe2580715a7e44f09a610d" name="src/libmongoc/src/mongoc/mongoc-config.h.in" role="src" />
+ <file md5sum="ecc7736accc6830203f5191d53115962" name="src/libmongoc/src/mongoc/mongoc-compression.c" role="src" />
+ <file md5sum="cfede66a4a5a0bc83f759ce247f18be6" name="src/libmongoc/src/mongoc/mongoc-config.h" role="src" />
+ <file md5sum="36496f02e80ef3d91d5e7519554663dc" name="src/libmongoc/src/mongoc/mongoc-config.h.in" role="src" />
<file md5sum="bf841aed8c3d1c149f57c2005027a52c" name="src/libmongoc/src/mongoc/mongoc-counters-private.h" role="src" />
- <file md5sum="7a86bc5362315beeb18c28bf4bde9b72" name="src/libmongoc/src/mongoc/mongoc-counters.c" role="src" />
- <file md5sum="81cd4511fe5a3a29e9c2d4c5d460e711" name="src/libmongoc/src/mongoc/mongoc-counters.defs" role="src" />
+ <file md5sum="3fc2582ce850656117296bf95619aa9a" name="src/libmongoc/src/mongoc/mongoc-counters.c" role="src" />
+ <file md5sum="04d57743da7337aaec4dfe958a5742b7" name="src/libmongoc/src/mongoc/mongoc-counters.defs" role="src" />
<file md5sum="6f3d1bd292923eacad0839df87407ea4" name="src/libmongoc/src/mongoc/mongoc-crypto-cng-private.h" role="src" />
<file md5sum="7024277616218a4c0ebb0a5644a3b6f7" name="src/libmongoc/src/mongoc/mongoc-crypto-cng.c" role="src" />
<file md5sum="1ef407f0f4fae573efe1c75f515b15ed" name="src/libmongoc/src/mongoc/mongoc-crypto-cng.h" role="src" />
<file md5sum="2b8af7462ad5b51fccc8b9916bb5092a" name="src/libmongoc/src/mongoc/mongoc-crypto-common-crypto-private.h" role="src" />
<file md5sum="6b28b2c651acdb41895fefcde92dac47" name="src/libmongoc/src/mongoc/mongoc-crypto-common-crypto.c" role="src" />
<file md5sum="9778d108cdaf179bb33ff6cbd48075d6" name="src/libmongoc/src/mongoc/mongoc-crypto-openssl-private.h" role="src" />
<file md5sum="a5a953a090d4b18f59a82078db934186" name="src/libmongoc/src/mongoc/mongoc-crypto-openssl.c" role="src" />
<file md5sum="75b63d16b8b2d7ded14036f5f073a8c8" name="src/libmongoc/src/mongoc/mongoc-crypto-private.h" role="src" />
<file md5sum="7af81250ea11f0a11ebc5b9f399a31d6" name="src/libmongoc/src/mongoc/mongoc-crypto.c" role="src" />
<file md5sum="a7ad4340af32b36aff7db6e188958865" name="src/libmongoc/src/mongoc/mongoc-cursor-array-private.h" role="src" />
- <file md5sum="991e3711d0cbe2e01536ab716273a8ef" name="src/libmongoc/src/mongoc/mongoc-cursor-array.c" role="src" />
+ <file md5sum="88ff2f5e6ee4d8ce9d6edf0e950df736" name="src/libmongoc/src/mongoc/mongoc-cursor-array.c" role="src" />
<file md5sum="624ed803369a45a77cd0ecf1b3eab701" name="src/libmongoc/src/mongoc/mongoc-cursor-cursorid-private.h" role="src" />
- <file md5sum="66a86a464d7e21bb3edb77744848eac5" name="src/libmongoc/src/mongoc/mongoc-cursor-cursorid.c" role="src" />
- <file md5sum="0dbb992934605032c5f78f9b8fdfafe3" name="src/libmongoc/src/mongoc/mongoc-cursor-private.h" role="src" />
+ <file md5sum="b4909fafce1cf245377a9d453bc84f31" name="src/libmongoc/src/mongoc/mongoc-cursor-cursorid.c" role="src" />
+ <file md5sum="15e1c3b0a0d7bc63b6d1998b05e7d3cf" name="src/libmongoc/src/mongoc/mongoc-cursor-private.h" role="src" />
<file md5sum="401d056e2b6959ffb96b623a6953c704" name="src/libmongoc/src/mongoc/mongoc-cursor-transform-private.h" role="src" />
<file md5sum="496bccd3270ec9f966230cd6a77f221b" name="src/libmongoc/src/mongoc/mongoc-cursor-transform.c" role="src" />
- <file md5sum="a6130077ba3824c55c5fd1d379fb4cc3" name="src/libmongoc/src/mongoc/mongoc-cursor.c" role="src" />
+ <file md5sum="ea1970b13e8843f96914da70df3223d0" name="src/libmongoc/src/mongoc/mongoc-cursor.c" role="src" />
<file md5sum="c8717d70b7dcf64e8351a67b653028c2" name="src/libmongoc/src/mongoc/mongoc-cursor.h" role="src" />
- <file md5sum="46a387f1345687b1179964269a79b7b2" name="src/libmongoc/src/mongoc/mongoc-cyrus-private.h" role="src" />
- <file md5sum="1fcb40a5241982a4c021a13f8060c74d" name="src/libmongoc/src/mongoc/mongoc-cyrus.c" role="src" />
- <file md5sum="06f2def156735127a3b78fbf17bc228c" name="src/libmongoc/src/mongoc/mongoc-database-private.h" role="src" />
- <file md5sum="5c4415d3274e5042c9c3f64003754f6e" name="src/libmongoc/src/mongoc/mongoc-database.c" role="src" />
- <file md5sum="04f47c5fafcc31d8bb3b0f05300e82fb" name="src/libmongoc/src/mongoc/mongoc-database.h" role="src" />
+ <file md5sum="679240b7b4f28544f8548f00f2e3a228" name="src/libmongoc/src/mongoc/mongoc-cyrus-private.h" role="src" />
+ <file md5sum="2dfb3f3d1a48da4ba5b51567ab7baf8e" name="src/libmongoc/src/mongoc/mongoc-cyrus.c" role="src" />
+ <file md5sum="d81f97faaa96cd79eb421558e4e9f593" name="src/libmongoc/src/mongoc/mongoc-database-private.h" role="src" />
+ <file md5sum="2bd24870a849272f49abc589dc2a24d2" name="src/libmongoc/src/mongoc/mongoc-database.c" role="src" />
+ <file md5sum="afd310cf00bd782a6e4df064978e9c80" name="src/libmongoc/src/mongoc/mongoc-database.h" role="src" />
<file md5sum="d078fdada8c42b12a7749291aedabd83" name="src/libmongoc/src/mongoc/mongoc-errno-private.h" role="src" />
- <file md5sum="35076f8368b1657ebdb9335acba96ece" name="src/libmongoc/src/mongoc/mongoc-error.h" role="src" />
+ <file md5sum="da778d569b70c8ad237f8f8b866d37fa" name="src/libmongoc/src/mongoc/mongoc-error.h" role="src" />
<file md5sum="b3f65d48c053b6c41023065364efb459" name="src/libmongoc/src/mongoc/mongoc-find-and-modify-private.h" role="src" />
<file md5sum="57c6671cb89445fd7e5f9948d4215dfe" name="src/libmongoc/src/mongoc/mongoc-find-and-modify.c" role="src" />
<file md5sum="3c4796d115477cbb6359daf8ea5d6d81" name="src/libmongoc/src/mongoc/mongoc-find-and-modify.h" role="src" />
<file md5sum="d525a7c52dc00c28f8e41f0b5eb594af" name="src/libmongoc/src/mongoc/mongoc-flags.h" role="src" />
<file md5sum="11b6f617b95007b70ee8910fd7a4f37f" name="src/libmongoc/src/mongoc/mongoc-gridfs-file-list-private.h" role="src" />
- <file md5sum="55787f0d6d184986d9406d489bad47c8" name="src/libmongoc/src/mongoc/mongoc-gridfs-file-list.c" role="src" />
+ <file md5sum="a9b3079399677b7b33ff67be1ef4ffab" name="src/libmongoc/src/mongoc/mongoc-gridfs-file-list.c" role="src" />
<file md5sum="fd76aff14c8955f18d146a87422e0b0e" name="src/libmongoc/src/mongoc/mongoc-gridfs-file-list.h" role="src" />
<file md5sum="c2a09954b6e6052b64660717828cd510" name="src/libmongoc/src/mongoc/mongoc-gridfs-file-page-private.h" role="src" />
<file md5sum="6f0ed4d860816a02bfd98c9b50340227" name="src/libmongoc/src/mongoc/mongoc-gridfs-file-page.c" role="src" />
<file md5sum="62cee46bc3e899eb3b9cc50b293ec24f" name="src/libmongoc/src/mongoc/mongoc-gridfs-file-page.h" role="src" />
<file md5sum="475f90f518868d60b30e9579f6aa8173" name="src/libmongoc/src/mongoc/mongoc-gridfs-file-private.h" role="src" />
- <file md5sum="0e6e2ea475ec5158e6294e430aaf38ce" name="src/libmongoc/src/mongoc/mongoc-gridfs-file.c" role="src" />
+ <file md5sum="cf1bbf830fd0fab13d35f8cba4c0bfe9" name="src/libmongoc/src/mongoc/mongoc-gridfs-file.c" role="src" />
<file md5sum="41b93c603de61598a286680ae106c445" name="src/libmongoc/src/mongoc/mongoc-gridfs-file.h" role="src" />
<file md5sum="c0d009822182252f718f12bc91831fbd" name="src/libmongoc/src/mongoc/mongoc-gridfs-private.h" role="src" />
- <file md5sum="a3c87ba9220ff8cecdf1d3e6fffee5cc" name="src/libmongoc/src/mongoc/mongoc-gridfs.c" role="src" />
+ <file md5sum="493a046a237865f1cb2196179996f7ba" name="src/libmongoc/src/mongoc/mongoc-gridfs.c" role="src" />
<file md5sum="46346929a8b97c037a7d1d4b9962463f" name="src/libmongoc/src/mongoc/mongoc-gridfs.h" role="src" />
<file md5sum="dece6f3a8c930015061ad0ed6b3c150c" name="src/libmongoc/src/mongoc/mongoc-gssapi-private.h" role="src" />
<file md5sum="f238a7c7de0e7526d1086098ce25b34c" name="src/libmongoc/src/mongoc/mongoc-gssapi.c" role="src" />
<file md5sum="2cd1fa633ba652860b2030e49f0f615d" name="src/libmongoc/src/mongoc/mongoc-handshake-compiler-private.h" role="src" />
<file md5sum="acd3ac65330f6168eb1fc6577122ac1b" name="src/libmongoc/src/mongoc/mongoc-handshake-os-private.h" role="src" />
- <file md5sum="effc4b3efb168e71f109ba477e764540" name="src/libmongoc/src/mongoc/mongoc-handshake-private.h" role="src" />
- <file md5sum="3c8b24aaf69fb443562fec9d575f6b8a" name="src/libmongoc/src/mongoc/mongoc-handshake.c" role="src" />
+ <file md5sum="507e4548f17d948426a838dd90a65aa7" name="src/libmongoc/src/mongoc/mongoc-handshake-private.h" role="src" />
+ <file md5sum="d7aa79eef501125dc1cfbaab5f74edc3" name="src/libmongoc/src/mongoc/mongoc-handshake.c" role="src" />
<file md5sum="388f40f82089b16891d60c725a542de8" name="src/libmongoc/src/mongoc/mongoc-handshake.h" role="src" />
- <file md5sum="332280878107ebdd89acc7cd81db21ef" name="src/libmongoc/src/mongoc/mongoc-host-list-private.h" role="src" />
- <file md5sum="716c96af55e51d223cb6030e088a0eae" name="src/libmongoc/src/mongoc/mongoc-host-list.c" role="src" />
+ <file md5sum="45a5b5cc10ed0c55ffb6a1c332048910" name="src/libmongoc/src/mongoc/mongoc-host-list-private.h" role="src" />
+ <file md5sum="8c3d83891ecf96ae366de7db167d9e15" name="src/libmongoc/src/mongoc/mongoc-host-list.c" role="src" />
<file md5sum="f70e2d7aedaa022afd2e9f23d3d2e76e" name="src/libmongoc/src/mongoc/mongoc-host-list.h" role="src" />
<file md5sum="5ac6b6febc5f47878215fbc74bd44de7" name="src/libmongoc/src/mongoc/mongoc-index.c" role="src" />
<file md5sum="3f3c464648d900306ae7b280eee0bba6" name="src/libmongoc/src/mongoc/mongoc-index.h" role="src" />
- <file md5sum="98ad7e9f7d993be9a4690fccee375e12" name="src/libmongoc/src/mongoc/mongoc-init.c" role="src" />
+ <file md5sum="5c5137f163d63d2a9a02ac46f72acf50" name="src/libmongoc/src/mongoc/mongoc-init.c" role="src" />
<file md5sum="12558b7ab94e2f39099cb8b0624ba968" name="src/libmongoc/src/mongoc/mongoc-init.h" role="src" />
- <file md5sum="b398a2fcabf5b20ec32f81174eeb2335" name="src/libmongoc/src/mongoc/mongoc-iovec.h" role="src" />
+ <file md5sum="74fb569d764dc34b112b6574ca30b76d" name="src/libmongoc/src/mongoc/mongoc-iovec.h" role="src" />
<file md5sum="6f7f31e0d0f1921e0a5fb96ba2cd9661" name="src/libmongoc/src/mongoc/mongoc-libressl-private.h" role="src" />
<file md5sum="81b33112fe46e5160d8e87382b1140c9" name="src/libmongoc/src/mongoc/mongoc-libressl.c" role="src" />
<file md5sum="a73820e036b564f7c8ad21d902d5d66e" name="src/libmongoc/src/mongoc/mongoc-linux-distro-scanner-private.h" role="src" />
<file md5sum="c1234d64e33fd6e685966ee11f39478a" name="src/libmongoc/src/mongoc/mongoc-linux-distro-scanner.c" role="src" />
<file md5sum="f4346bf2b2988b26b88775871bd27328" name="src/libmongoc/src/mongoc/mongoc-list-private.h" role="src" />
<file md5sum="ab4d31b167f229b490faf5b71a714339" name="src/libmongoc/src/mongoc/mongoc-list.c" role="src" />
<file md5sum="107da88d5cdff601a26446095a991a59" name="src/libmongoc/src/mongoc/mongoc-log-private.h" role="src" />
<file md5sum="e73fa576e5a3c04fe0a3a7a7b5cf82b5" name="src/libmongoc/src/mongoc/mongoc-log.c" role="src" />
<file md5sum="76e962ea420c1b911269c797c8a3adf0" name="src/libmongoc/src/mongoc/mongoc-log.h" role="src" />
<file md5sum="e4fac525a93a35492e16098e5270e639" name="src/libmongoc/src/mongoc/mongoc-macros.h" role="src" />
<file md5sum="a2565f41ea906c646e2f54720d1b67ff" name="src/libmongoc/src/mongoc/mongoc-matcher-op-private.h" role="src" />
- <file md5sum="49159694d0e167f5272390d7ff6b834d" name="src/libmongoc/src/mongoc/mongoc-matcher-op.c" role="src" />
+ <file md5sum="a39a14b6784d275b7557ed80e4397519" name="src/libmongoc/src/mongoc/mongoc-matcher-op.c" role="src" />
<file md5sum="bf951a240548f776a50461e051c740ef" name="src/libmongoc/src/mongoc/mongoc-matcher-private.h" role="src" />
<file md5sum="b068f9b5977666b664aab7dd97781a38" name="src/libmongoc/src/mongoc/mongoc-matcher.c" role="src" />
<file md5sum="d2617ff07c67a58cb548245c413382a8" name="src/libmongoc/src/mongoc/mongoc-matcher.h" role="src" />
<file md5sum="bbd4948f47290dc7c9393a51931ca3a6" name="src/libmongoc/src/mongoc/mongoc-memcmp-private.h" role="src" />
<file md5sum="0d8d56f849bb672bf57dbdf29dd52bdf" name="src/libmongoc/src/mongoc/mongoc-memcmp.c" role="src" />
- <file md5sum="c4f004fe5de2467e3b2eba1060deb978" name="src/libmongoc/src/mongoc/mongoc-opcode.h" role="src" />
+ <file md5sum="f260fffacf2ff90cd61a519511ed7dd6" name="src/libmongoc/src/mongoc/mongoc-opcode.h" role="src" />
<file md5sum="5db0e035e9dd68c9df8931b3e0278e01" name="src/libmongoc/src/mongoc/mongoc-openssl-private.h" role="src" />
<file md5sum="51ad1317cbdc11769020b689cae4e77d" name="src/libmongoc/src/mongoc/mongoc-openssl.c" role="src" />
<file md5sum="8b5d953bb66d952e1c7c81186393d714" name="src/libmongoc/src/mongoc/mongoc-queue-private.h" role="src" />
<file md5sum="7da2ed3fabbeee9a88c7ddeab9f11a20" name="src/libmongoc/src/mongoc/mongoc-queue.c" role="src" />
<file md5sum="e57b2ef04f85e5b51aa16fe051c60cf1" name="src/libmongoc/src/mongoc/mongoc-rand-cng.c" role="src" />
<file md5sum="b8818c8c7c155272ab33a04dfb0c464c" name="src/libmongoc/src/mongoc/mongoc-rand-common-crypto.c" role="src" />
<file md5sum="9335fc967215cfdee64e6f884bccf4ac" name="src/libmongoc/src/mongoc/mongoc-rand-openssl.c" role="src" />
<file md5sum="9d0874c28f40e7afe17fc38e52754e26" name="src/libmongoc/src/mongoc/mongoc-rand-private.h" role="src" />
<file md5sum="a9f55f7922990fe356d0bb75cf09901e" name="src/libmongoc/src/mongoc/mongoc-rand.h" role="src" />
<file md5sum="b8f581bb3c78adffb9465f817c623ac6" name="src/libmongoc/src/mongoc/mongoc-read-concern-private.h" role="src" />
- <file md5sum="35e6e223cb33df4be3f412e332d46f63" name="src/libmongoc/src/mongoc/mongoc-read-concern.c" role="src" />
+ <file md5sum="e337035804224ad0516cc22c3f605e06" name="src/libmongoc/src/mongoc/mongoc-read-concern.c" role="src" />
<file md5sum="a9b0cc3201220ce7688aed3babcc2ce7" name="src/libmongoc/src/mongoc/mongoc-read-concern.h" role="src" />
- <file md5sum="d18a126c0d7eec58b097745b00a790c6" name="src/libmongoc/src/mongoc/mongoc-read-prefs-private.h" role="src" />
- <file md5sum="04f214d6b43b33b61613b4c085c164e8" name="src/libmongoc/src/mongoc/mongoc-read-prefs.c" role="src" />
+ <file md5sum="47cbfa07d500e0b98b2754f8ccd9fd86" name="src/libmongoc/src/mongoc/mongoc-read-prefs-private.h" role="src" />
+ <file md5sum="0181970f2eca35bfdd509b97d893c392" name="src/libmongoc/src/mongoc/mongoc-read-prefs.c" role="src" />
<file md5sum="45e90593271f17c82d21513d136dd677" name="src/libmongoc/src/mongoc/mongoc-read-prefs.h" role="src" />
- <file md5sum="e5d014b88b77f2f143f210fa9828cca7" name="src/libmongoc/src/mongoc/mongoc-rpc-private.h" role="src" />
- <file md5sum="d93efda61a2ab6c35de562f44b63e960" name="src/libmongoc/src/mongoc/mongoc-rpc.c" role="src" />
- <file md5sum="be94e6cc68aff52c49079171f661a2e9" name="src/libmongoc/src/mongoc/mongoc-sasl-private.h" role="src" />
- <file md5sum="6383f4dd4160e092f06ecd3080cb270d" name="src/libmongoc/src/mongoc/mongoc-sasl.c" role="src" />
+ <file md5sum="6cf48920f583ad0c4d453ed7f93d70ac" name="src/libmongoc/src/mongoc/mongoc-rpc-private.h" role="src" />
+ <file md5sum="161022e5726068a57a557049f2ba1bf7" name="src/libmongoc/src/mongoc/mongoc-rpc.c" role="src" />
+ <file md5sum="92a533406985d92c9464f2269bbd3bfb" name="src/libmongoc/src/mongoc/mongoc-sasl-private.h" role="src" />
+ <file md5sum="3f1dc134d74dbc09b1dac415b06e731b" name="src/libmongoc/src/mongoc/mongoc-sasl.c" role="src" />
<file md5sum="589fdf85fbae3a7066c606cc6a286970" name="src/libmongoc/src/mongoc/mongoc-scram-private.h" role="src" />
<file md5sum="20d4fe448dd3828ba8fb0483ed1488f3" name="src/libmongoc/src/mongoc/mongoc-scram.c" role="src" />
<file md5sum="cf746217e6db641c303af0df750895b3" name="src/libmongoc/src/mongoc/mongoc-secure-channel-private.h" role="src" />
- <file md5sum="36e16202dd4ee2a8d903c1211ca5cc3b" name="src/libmongoc/src/mongoc/mongoc-secure-channel.c" role="src" />
+ <file md5sum="e98246af49277dab91ba1c4cef556247" name="src/libmongoc/src/mongoc/mongoc-secure-channel.c" role="src" />
<file md5sum="dc624825cfaf5b68664efb7260c93785" name="src/libmongoc/src/mongoc/mongoc-secure-transport-private.h" role="src" />
- <file md5sum="545a95e5ebdbb40767d124b44b4cc1ba" name="src/libmongoc/src/mongoc/mongoc-secure-transport.c" role="src" />
- <file md5sum="3269b97252313b6f17e179f33aa44913" name="src/libmongoc/src/mongoc/mongoc-server-description-private.h" role="src" />
- <file md5sum="39d4b63b970e8658cced4f070a199627" name="src/libmongoc/src/mongoc/mongoc-server-description.c" role="src" />
- <file md5sum="a229b80c0763b7c9d694461dc2159822" name="src/libmongoc/src/mongoc/mongoc-server-description.h" role="src" />
- <file md5sum="8acd8100bec70a16bb6fa98db370e006" name="src/libmongoc/src/mongoc/mongoc-server-stream-private.h" role="src" />
- <file md5sum="0af4a374590f03c480f22e200305f819" name="src/libmongoc/src/mongoc/mongoc-server-stream.c" role="src" />
+ <file md5sum="6fe2e1d3b8efb6ccf91a95a4b79ec126" name="src/libmongoc/src/mongoc/mongoc-secure-transport.c" role="src" />
+ <file md5sum="627612ffff0c4cfe3cf2e0e85e59db90" name="src/libmongoc/src/mongoc/mongoc-server-description-private.h" role="src" />
+ <file md5sum="cad1008a9e537064807d5f7e52c92913" name="src/libmongoc/src/mongoc/mongoc-server-description.c" role="src" />
+ <file md5sum="47ca11693b8ebd32a19097df94b1e4cf" name="src/libmongoc/src/mongoc/mongoc-server-description.h" role="src" />
+ <file md5sum="0d538d076e2320b1b851cd91b6c4fc2f" name="src/libmongoc/src/mongoc/mongoc-server-stream-private.h" role="src" />
+ <file md5sum="ffc10eef8451934a6ff8169a86b6bfa7" name="src/libmongoc/src/mongoc/mongoc-server-stream.c" role="src" />
<file md5sum="1f2f5e3eca34ea61ac9c45d5f46c7bea" name="src/libmongoc/src/mongoc/mongoc-set-private.h" role="src" />
- <file md5sum="8ccf743a528228e45df86f939187691b" name="src/libmongoc/src/mongoc/mongoc-set.c" role="src" />
+ <file md5sum="a8ddc51ca19a29331335f80b31c794f8" name="src/libmongoc/src/mongoc/mongoc-set.c" role="src" />
<file md5sum="ca327e5c252d3ba97d6c9ab032305e5f" name="src/libmongoc/src/mongoc/mongoc-socket-private.h" role="src" />
- <file md5sum="785e74ff6e5d3baafbb67d8f29468877" name="src/libmongoc/src/mongoc/mongoc-socket.c" role="src" />
+ <file md5sum="b863664de969ad00d4924bcc00f27275" name="src/libmongoc/src/mongoc/mongoc-socket.c" role="src" />
<file md5sum="ceab1549dde4d167a800d5a70d9d0b9b" name="src/libmongoc/src/mongoc/mongoc-socket.h" role="src" />
<file md5sum="05bf29935e2bbbc82f96b49c7dec7108" name="src/libmongoc/src/mongoc/mongoc-ssl-private.h" role="src" />
<file md5sum="9dfb38e915946752d1fe28a038b2a606" name="src/libmongoc/src/mongoc/mongoc-ssl.c" role="src" />
<file md5sum="fa7ceac4882b442c346dcafd280675f5" name="src/libmongoc/src/mongoc/mongoc-ssl.h" role="src" />
- <file md5sum="3e08931a7ce2ba0ba059e83008cdd18a" name="src/libmongoc/src/mongoc/mongoc-sspi-private.h" role="src" />
+ <file md5sum="605bae2c58fc982a88fb9c79940e4fb5" name="src/libmongoc/src/mongoc/mongoc-sspi-private.h" role="src" />
<file md5sum="1572cd62c78a88b239c39d1ce9e169d2" name="src/libmongoc/src/mongoc/mongoc-sspi.c" role="src" />
<file md5sum="6e2f1849fbd868392e249b144a1224a9" name="src/libmongoc/src/mongoc/mongoc-stream-buffered.c" role="src" />
<file md5sum="595b533b1988bbdcfa56dff1b5bf3d22" name="src/libmongoc/src/mongoc/mongoc-stream-buffered.h" role="src" />
<file md5sum="131537d12d1fe8d7ee9b63a9f7ce27b7" name="src/libmongoc/src/mongoc/mongoc-stream-file.c" role="src" />
<file md5sum="fd95cee74b0c182bcb1059482b6e20f7" name="src/libmongoc/src/mongoc/mongoc-stream-file.h" role="src" />
<file md5sum="d247dac07fdfffa79422f0682f546a77" name="src/libmongoc/src/mongoc/mongoc-stream-gridfs.c" role="src" />
<file md5sum="4aedf06d31523f5798b5da869d6b96da" name="src/libmongoc/src/mongoc/mongoc-stream-gridfs.h" role="src" />
- <file md5sum="ba59401b4e69b181081395feaf9eca02" name="src/libmongoc/src/mongoc/mongoc-stream-private.h" role="src" />
+ <file md5sum="27877449371933549f8e07e1e4dc0240" name="src/libmongoc/src/mongoc/mongoc-stream-private.h" role="src" />
<file md5sum="e5bc4577d69081841b6ec339e48f3831" name="src/libmongoc/src/mongoc/mongoc-stream-socket.c" role="src" />
<file md5sum="21878b2874ec53ff88034c5fc94a8e36" name="src/libmongoc/src/mongoc/mongoc-stream-socket.h" role="src" />
<file md5sum="a74784297494ff6ba621175a0d2c0173" name="src/libmongoc/src/mongoc/mongoc-stream-tls-libressl-private.h" role="src" />
<file md5sum="3b9a7823f867095f5dddeba72c3eb02d" name="src/libmongoc/src/mongoc/mongoc-stream-tls-libressl.c" role="src" />
<file md5sum="9b23137cec32ffbc50d64eed110f5d64" name="src/libmongoc/src/mongoc/mongoc-stream-tls-libressl.h" role="src" />
<file md5sum="b94abd4f830e770edb4fdcb3de94e899" name="src/libmongoc/src/mongoc/mongoc-stream-tls-openssl-bio-private.h" role="src" />
<file md5sum="d9b74e3747e535580d49fd84fa968ff2" name="src/libmongoc/src/mongoc/mongoc-stream-tls-openssl-bio.c" role="src" />
<file md5sum="7c46291f019d55b03fde342c3f8a9787" name="src/libmongoc/src/mongoc/mongoc-stream-tls-openssl-private.h" role="src" />
- <file md5sum="f00969fe6175703089bcbb3f8836ac9d" name="src/libmongoc/src/mongoc/mongoc-stream-tls-openssl.c" role="src" />
+ <file md5sum="6a8980a09fee8eb13303d43175088ff6" name="src/libmongoc/src/mongoc/mongoc-stream-tls-openssl.c" role="src" />
<file md5sum="a139ea68812ffc186864a37ecaeffde4" name="src/libmongoc/src/mongoc/mongoc-stream-tls-openssl.h" role="src" />
<file md5sum="991ad2ed2a006a524126641819075dd0" name="src/libmongoc/src/mongoc/mongoc-stream-tls-private.h" role="src" />
<file md5sum="22c6550c4bda6c672868bb0091873af2" name="src/libmongoc/src/mongoc/mongoc-stream-tls-secure-channel-private.h" role="src" />
<file md5sum="3b359e6bb8532231f495824a13608fe4" name="src/libmongoc/src/mongoc/mongoc-stream-tls-secure-channel.c" role="src" />
<file md5sum="1d8d42150198ee2c3b843b01260d1d59" name="src/libmongoc/src/mongoc/mongoc-stream-tls-secure-channel.h" role="src" />
<file md5sum="b0a2657fd14bda6ff08dc08028860cd7" name="src/libmongoc/src/mongoc/mongoc-stream-tls-secure-transport-private.h" role="src" />
<file md5sum="5dbf8cea2d7a86ad28fd1725ed736b8b" name="src/libmongoc/src/mongoc/mongoc-stream-tls-secure-transport.c" role="src" />
<file md5sum="3559c23a18e215d035d5232d46b703ab" name="src/libmongoc/src/mongoc/mongoc-stream-tls-secure-transport.h" role="src" />
<file md5sum="293bc98699ba6e4bc15ca81c45166e19" name="src/libmongoc/src/mongoc/mongoc-stream-tls.c" role="src" />
<file md5sum="2ac41a260b7b5070863ef2c4a7d5b8d0" name="src/libmongoc/src/mongoc/mongoc-stream-tls.h" role="src" />
- <file md5sum="d328e357c4c9754abb3f658a5d3f7c48" name="src/libmongoc/src/mongoc/mongoc-stream.c" role="src" />
+ <file md5sum="f598d5df683df112f317f054b0622bf8" name="src/libmongoc/src/mongoc/mongoc-stream.c" role="src" />
<file md5sum="86440e4657a80a75d4cd6d3823cfc01b" name="src/libmongoc/src/mongoc/mongoc-stream.h" role="src" />
<file md5sum="ceeb38c5d0125ba0ed559e2557f27770" name="src/libmongoc/src/mongoc/mongoc-thread-private.h" role="src" />
<file md5sum="8498688a48396943f0b1a656613a9de4" name="src/libmongoc/src/mongoc/mongoc-topology-description-apm-private.h" role="src" />
- <file md5sum="c29f194ee4342754d0db9c2a3b3f878e" name="src/libmongoc/src/mongoc/mongoc-topology-description-apm.c" role="src" />
- <file md5sum="0676dcdbd3bc02684912f3570d7fd9e4" name="src/libmongoc/src/mongoc/mongoc-topology-description-private.h" role="src" />
- <file md5sum="1b4d2b9d287692dd9e8dfc6fd4c15826" name="src/libmongoc/src/mongoc/mongoc-topology-description.c" role="src" />
+ <file md5sum="8f93b1626480c7c10b077d772ab35314" name="src/libmongoc/src/mongoc/mongoc-topology-description-apm.c" role="src" />
+ <file md5sum="47916a0ed8d74b0c27022797e2ae9fbb" name="src/libmongoc/src/mongoc/mongoc-topology-description-private.h" role="src" />
+ <file md5sum="055769efd1067edb544788720e590fbb" name="src/libmongoc/src/mongoc/mongoc-topology-description.c" role="src" />
<file md5sum="53ba27e26803bb1705b05fd44d19d8b7" name="src/libmongoc/src/mongoc/mongoc-topology-description.h" role="src" />
- <file md5sum="789348fd3fd3a71130349cf6f733d306" name="src/libmongoc/src/mongoc/mongoc-topology-private.h" role="src" />
- <file md5sum="9e5212b7fc03292c96eb8f5af1872029" name="src/libmongoc/src/mongoc/mongoc-topology-scanner-private.h" role="src" />
- <file md5sum="b5a6fefe5644cbfe8294dac1f4c8e00b" name="src/libmongoc/src/mongoc/mongoc-topology-scanner.c" role="src" />
- <file md5sum="8667f2752869853c54740240b47ec574" name="src/libmongoc/src/mongoc/mongoc-topology.c" role="src" />
+ <file md5sum="1594c9800ae9477b71640844a0e71284" name="src/libmongoc/src/mongoc/mongoc-topology-private.h" role="src" />
+ <file md5sum="1d05f380ea283c0240bcdb123a8728dc" name="src/libmongoc/src/mongoc/mongoc-topology-scanner-private.h" role="src" />
+ <file md5sum="e0a488fb6e83c0c8ed84a988cbaec9b9" name="src/libmongoc/src/mongoc/mongoc-topology-scanner.c" role="src" />
+ <file md5sum="1a44627a5f53d51c8ae070a97c5fb430" name="src/libmongoc/src/mongoc/mongoc-topology.c" role="src" />
<file md5sum="9070611a08226982864902ecf668d098" name="src/libmongoc/src/mongoc/mongoc-trace-private.h" role="src" />
- <file md5sum="c4f7c3cef35a4d694c0d5571bfd276a3" name="src/libmongoc/src/mongoc/mongoc-uri-private.h" role="src" />
- <file md5sum="d950b7d412ea51f64f2fb62b38107174" name="src/libmongoc/src/mongoc/mongoc-uri.c" role="src" />
- <file md5sum="ff5e789f40f8c20801915eb459401f5e" name="src/libmongoc/src/mongoc/mongoc-uri.h" role="src" />
- <file md5sum="4cece4a1a6c16868a13a31cedee8cd46" name="src/libmongoc/src/mongoc/mongoc-util-private.h" role="src" />
- <file md5sum="4c23935b28f360913a2f9df54d6f8f3c" name="src/libmongoc/src/mongoc/mongoc-util.c" role="src" />
+ <file md5sum="3654be761daea9a8c30580003f575ef9" name="src/libmongoc/src/mongoc/mongoc-uri-private.h" role="src" />
+ <file md5sum="c2318a9c8cf79be4cadb95de8c1a7947" name="src/libmongoc/src/mongoc/mongoc-uri.c" role="src" />
+ <file md5sum="f80a9783134d5bae220a759b6f1eaa70" name="src/libmongoc/src/mongoc/mongoc-uri.h" role="src" />
+ <file md5sum="0b2c90b737011c3fb9180eab8608cd47" name="src/libmongoc/src/mongoc/mongoc-util-private.h" role="src" />
+ <file md5sum="4570342ac67995d1619d68f33073211d" name="src/libmongoc/src/mongoc/mongoc-util.c" role="src" />
<file md5sum="5c7c2a82e500e9a518fa596ff9c44021" name="src/libmongoc/src/mongoc/mongoc-version-functions.c" role="src" />
<file md5sum="0f37390d6ec78b9b332f7a87924407dc" name="src/libmongoc/src/mongoc/mongoc-version-functions.h" role="src" />
- <file md5sum="3c53a61a5a80066a89bdb7b926881b17" name="src/libmongoc/src/mongoc/mongoc-version.h" role="src" />
+ <file md5sum="27a233d4562789fbdca4b8d21db438fa" name="src/libmongoc/src/mongoc/mongoc-version.h" role="src" />
<file md5sum="2f63b4b1f86910aab6a6e7423af3f7a8" name="src/libmongoc/src/mongoc/mongoc-version.h.in" role="src" />
- <file md5sum="22d9e0f21c5242e7884f6854899edbc7" name="src/libmongoc/src/mongoc/mongoc-write-command-private.h" role="src" />
- <file md5sum="35324995b3b3996c7a4599cb540afbd7" name="src/libmongoc/src/mongoc/mongoc-write-command.c" role="src" />
- <file md5sum="244a3b0c18fcc0580870900f8103073a" name="src/libmongoc/src/mongoc/mongoc-write-concern-private.h" role="src" />
- <file md5sum="2ab1559db960960d9af654ee1d1eea85" name="src/libmongoc/src/mongoc/mongoc-write-concern.c" role="src" />
+ <file md5sum="e20377c8ac871a1ec823c422e4b48059" name="src/libmongoc/src/mongoc/mongoc-write-command-legacy-private.h" role="src" />
+ <file md5sum="2fd1922805e346c85274b6a0726db7d6" name="src/libmongoc/src/mongoc/mongoc-write-command-legacy.c" role="src" />
+ <file md5sum="e4d41c68b44faf86723a08a758f49022" name="src/libmongoc/src/mongoc/mongoc-write-command-private.h" role="src" />
+ <file md5sum="6d5700f478f0128fb6c665739bb60960" name="src/libmongoc/src/mongoc/mongoc-write-command.c" role="src" />
+ <file md5sum="32adce895a9bac3ef9909f738febdb96" name="src/libmongoc/src/mongoc/mongoc-write-concern-private.h" role="src" />
+ <file md5sum="1811b49837746092dd020e53937f04de" name="src/libmongoc/src/mongoc/mongoc-write-concern.c" role="src" />
<file md5sum="c508009d926802beaef08602b99094c8" name="src/libmongoc/src/mongoc/mongoc-write-concern.h" role="src" />
- <file md5sum="28f12f34bd8577a12914ee4dedef3edf" name="src/libmongoc/src/mongoc/mongoc.h" role="src" />
+ <file md5sum="abb28d5583924c9d81751e4ecbc5289c" name="src/libmongoc/src/mongoc/mongoc.h" role="src" />
<file md5sum="f3e25b8f4865b4ff5ce5830870d8cec5" name="src/libmongoc/src/mongoc/op-compressed.def" role="src" />
<file md5sum="ed1890accd7d9a1426fdac121ed42ad4" name="src/libmongoc/src/mongoc/op-delete.def" role="src" />
<file md5sum="9af88adf2ef432761bbe81db2f9bdce7" name="src/libmongoc/src/mongoc/op-get-more.def" role="src" />
<file md5sum="242fff22640f143ae262e363e8552a7e" name="src/libmongoc/src/mongoc/op-header.def" role="src" />
<file md5sum="2efe34631dd0d540d0f1d2fdb2d57813" name="src/libmongoc/src/mongoc/op-insert.def" role="src" />
<file md5sum="79ab986cb49a47e7c9bccdc4005a9697" name="src/libmongoc/src/mongoc/op-kill-cursors.def" role="src" />
- <file md5sum="25dd34248b0000465c7e2ac769fc509a" name="src/libmongoc/src/mongoc/op-msg.def" role="src" />
+ <file md5sum="9194db408ea52f87fa0cf6169c78eee9" name="src/libmongoc/src/mongoc/op-msg.def" role="src" />
<file md5sum="a986d22cb495d652dc6059d722bb3266" name="src/libmongoc/src/mongoc/op-query.def" role="src" />
<file md5sum="f82bc931404ce00b318675126572d667" name="src/libmongoc/src/mongoc/op-reply-header.def" role="src" />
<file md5sum="dd22bb15cb70d35fe25192f0f304871b" name="src/libmongoc/src/mongoc/op-reply.def" role="src" />
<file md5sum="03c6179a4fe8b51c606b03a763529d55" name="src/libmongoc/src/mongoc/op-update.def" role="src" />
<file md5sum="28abcc30025e7d451b99261aa88f218f" name="src/libmongoc/src/mongoc/utlist.h" role="src" />
- <file md5sum="b14b05b0fd5ab1c867397927b67c1bd3" name="src/libmongoc/VERSION_CURRENT" role="src" />
- <file md5sum="b14b05b0fd5ab1c867397927b67c1bd3" name="src/libmongoc/VERSION_RELEASED" role="src" />
- <file md5sum="b1e6eeec6d7738306d501e0998881349" name="src/bson-encode.c" role="src" />
- <file md5sum="cdaeec6f974f315d109533dd9e05d94d" name="src/bson.c" role="src" />
+ <file md5sum="4dc406b7d1b6e49c22eee2e01177c195" name="src/libmongoc/src/zlib-1.2.11/adler32.c" role="src" />
+ <file md5sum="bca8dc2f982dfce8944aec9f4a83626c" name="src/libmongoc/src/zlib-1.2.11/compress.c" role="src" />
+ <file md5sum="f9a17af8e4efe8019fca94827ea1c0db" name="src/libmongoc/src/zlib-1.2.11/crc32.c" role="src" />
+ <file md5sum="f28d16b67efecdfafa0d816a7d982124" name="src/libmongoc/src/zlib-1.2.11/crc32.h" role="src" />
+ <file md5sum="9e645d2cc17f3e324028b90d25edc969" name="src/libmongoc/src/zlib-1.2.11/deflate.c" role="src" />
+ <file md5sum="c5839b3f66d79c5aa0daa5062de59bd5" name="src/libmongoc/src/zlib-1.2.11/deflate.h" role="src" />
+ <file md5sum="29d02cff161bde3e4e717b25a2ab7050" name="src/libmongoc/src/zlib-1.2.11/gzclose.c" role="src" />
+ <file md5sum="de472a3069a84c6e6b1eb083c3f91b53" name="src/libmongoc/src/zlib-1.2.11/gzguts.h" role="src" />
+ <file md5sum="2c08acd5014596272031fdd1a36d0f1c" name="src/libmongoc/src/zlib-1.2.11/gzlib.c" role="src" />
+ <file md5sum="a2a2f3a65c2891b1a8cf98be9b499e96" name="src/libmongoc/src/zlib-1.2.11/gzread.c" role="src" />
+ <file md5sum="92c3520553ac47aaa67f36ac9a571761" name="src/libmongoc/src/zlib-1.2.11/gzwrite.c" role="src" />
+ <file md5sum="f2adcadab3504b146be9c4928821f233" name="src/libmongoc/src/zlib-1.2.11/infback.c" role="src" />
+ <file md5sum="20d7b26f5ae64f4e8501f846beab550c" name="src/libmongoc/src/zlib-1.2.11/inffast.c" role="src" />
+ <file md5sum="f3669099d3f571dbc0426401ed5f50e3" name="src/libmongoc/src/zlib-1.2.11/inffast.h" role="src" />
+ <file md5sum="7fa3e91804601b6618c915b76a8dc332" name="src/libmongoc/src/zlib-1.2.11/inffixed.h" role="src" />
+ <file md5sum="c892c303a1be104ed0efb628e18ed85a" name="src/libmongoc/src/zlib-1.2.11/inflate.c" role="src" />
+ <file md5sum="12c1f3adaf005c8a4cfb629f2e266d30" name="src/libmongoc/src/zlib-1.2.11/inflate.h" role="src" />
+ <file md5sum="34a634c4c6e1de2e357f18c170e6b96c" name="src/libmongoc/src/zlib-1.2.11/inftrees.c" role="src" />
+ <file md5sum="ec87be89b9bcca8ced80a70f857e823b" name="src/libmongoc/src/zlib-1.2.11/inftrees.h" role="src" />
+ <file md5sum="fddcbf441ed0140dec23927ee34de9a7" name="src/libmongoc/src/zlib-1.2.11/trees.c" role="src" />
+ <file md5sum="51fdcb3e2ccf60ca13c06920c89296a3" name="src/libmongoc/src/zlib-1.2.11/trees.h" role="src" />
+ <file md5sum="dc210f08738914519d15edf1bb6e5141" name="src/libmongoc/src/zlib-1.2.11/uncompr.c" role="src" />
+ <file md5sum="66c6b3953f574eca8b709dd5f6865745" name="src/libmongoc/src/zlib-1.2.11/zconf.h" role="src" />
+ <file md5sum="66c6b3953f574eca8b709dd5f6865745" name="src/libmongoc/src/zlib-1.2.11/zconf.h.in" role="src" />
+ <file md5sum="0338828e9d00c94645648b1517108324" name="src/libmongoc/src/zlib-1.2.11/zlib.h" role="src" />
+ <file md5sum="60674eee456b5cab09b25a4bfd55d533" name="src/libmongoc/src/zlib-1.2.11/zlib.pc.in" role="src" />
+ <file md5sum="69d0e0950d3ab5c1938d8566257c33a3" name="src/libmongoc/src/zlib-1.2.11/zutil.c" role="src" />
+ <file md5sum="b8a47cd8873cbfa8daf689f88dd62f75" name="src/libmongoc/src/zlib-1.2.11/zutil.h" role="src" />
+ <file md5sum="3f1b82dd2ff6d570bac503930a74e609" name="src/libmongoc/VERSION_CURRENT" role="src" />
+ <file md5sum="3f1b82dd2ff6d570bac503930a74e609" name="src/libmongoc/VERSION_RELEASED" role="src" />
+ <file md5sum="af89425d3087e443aa5dfe722a797b27" name="src/bson-encode.c" role="src" />
+ <file md5sum="772381a14c32e6d34fb3d4c6c58af231" name="src/bson.c" role="src" />
<file md5sum="7f3a9da81c74468d7af2bffb76986405" name="tests/apm/bug0950-001.phpt" role="test" />
<file md5sum="00654229722c57a064ffbc41b7b10f9b" name="tests/apm/bug0950-002.phpt" role="test" />
- <file md5sum="e6dad23928aa72a82a9cba345231c801" name="tests/apm/monitoring-addSubscriber-001.phpt" role="test" />
- <file md5sum="c1e1a66749316549daff5419a8bb3df9" name="tests/apm/monitoring-addSubscriber-002.phpt" role="test" />
- <file md5sum="1badcfc1818659e4d1af67a51cb263f8" name="tests/apm/monitoring-addSubscriber-003.phpt" role="test" />
- <file md5sum="5c04573b079dc9dc005ec29e63387647" name="tests/apm/monitoring-addSubscriber-004.phpt" role="test" />
- <file md5sum="2beac8ca4b1014f0ccb6e8bde43a00dc" name="tests/apm/monitoring-commandFailed-001.phpt" role="test" />
- <file md5sum="f8e1d6a32a7bace6b9440f0974da0ceb" name="tests/apm/monitoring-commandFailed-002.phpt" role="test" />
- <file md5sum="da13417b2206284c51edb30bf64afb9b" name="tests/apm/monitoring-commandStarted-001.phpt" role="test" />
- <file md5sum="d6756585d0fe7cb9a9f468114aeb01e2" name="tests/apm/monitoring-commandSucceeded-001.phpt" role="test" />
- <file md5sum="65076ba653d62e35a5d41fcdd6261ba0" name="tests/apm/monitoring-commandSucceeded-002.phpt" role="test" />
- <file md5sum="742aa8efe0ffd875d19108a4cb86266e" name="tests/apm/monitoring-removeSubscriber-001.phpt" role="test" />
- <file md5sum="a837ac17deb3648e16366c6160dc4f97" name="tests/apm/monitoring-removeSubscriber-002.phpt" role="test" />
- <file md5sum="3f28a3815007332f6e7f0bc8b94b0627" name="tests/apm/overview.phpt" role="test" />
+ <file md5sum="904794b888d35edc486b059bb6b84c49" name="tests/apm/monitoring-addSubscriber-001.phpt" role="test" />
+ <file md5sum="297650828572036da72763c1e48eeffd" name="tests/apm/monitoring-addSubscriber-002.phpt" role="test" />
+ <file md5sum="41d6fed11ebf11ff7516308a2088897f" name="tests/apm/monitoring-addSubscriber-003.phpt" role="test" />
+ <file md5sum="45fd272b923e4bc8fe186a6fd67bbfc0" name="tests/apm/monitoring-addSubscriber-004.phpt" role="test" />
+ <file md5sum="3e697a05b7e695b4ff9330b0e9abdc1f" name="tests/apm/monitoring-commandFailed-001.phpt" role="test" />
+ <file md5sum="1706a8dfdf7c60885b61f74f67d77fe5" name="tests/apm/monitoring-commandFailed-002.phpt" role="test" />
+ <file md5sum="7d9d45ab1728f2f1dbfae2426848b390" name="tests/apm/monitoring-commandStarted-001.phpt" role="test" />
+ <file md5sum="fd113e2f45c9eaed0d4a499ca0b8305f" name="tests/apm/monitoring-commandSucceeded-001.phpt" role="test" />
+ <file md5sum="f1c413d42be4819a68205497acefd204" name="tests/apm/monitoring-commandSucceeded-002.phpt" role="test" />
+ <file md5sum="a2aab148ec3cdec4ba7674c255fb71fc" name="tests/apm/monitoring-removeSubscriber-001.phpt" role="test" />
+ <file md5sum="75b448f24fb654f6a977af18d0073b10" name="tests/apm/monitoring-removeSubscriber-002.phpt" role="test" />
+ <file md5sum="4a6fcdd7b99a38d40f12fab1d3facfe1" name="tests/apm/overview.phpt" role="test" />
<file md5sum="b6ee01ca55973b188b7d83d79884c946" name="tests/bson-corpus/array-decodeError-001.phpt" role="test" />
<file md5sum="83f8434bb60797b4c6de3a675082b2ee" name="tests/bson-corpus/array-decodeError-002.phpt" role="test" />
<file md5sum="a185b9e13f33a7a5e33deed198aa5c56" name="tests/bson-corpus/array-decodeError-003.phpt" role="test" />
<file md5sum="5f7904f3ebaf38bb540b0b17e5ecd4d1" name="tests/bson-corpus/array-valid-001.phpt" role="test" />
<file md5sum="e664e3621ec2bad95c47047ce680b611" name="tests/bson-corpus/array-valid-002.phpt" role="test" />
<file md5sum="f7c25ec5028f6c436884b1160863fd0f" name="tests/bson-corpus/array-valid-003.phpt" role="test" />
<file md5sum="c74ab67c202ff570c5019da81993c5ba" name="tests/bson-corpus/array-valid-004.phpt" role="test" />
<file md5sum="8e413f5cb807cde854a34cb13b3386fc" name="tests/bson-corpus/binary-decodeError-001.phpt" role="test" />
<file md5sum="634976ba21553985d6ad1b33c3c7a94d" name="tests/bson-corpus/binary-decodeError-002.phpt" role="test" />
<file md5sum="c3d1081f87b20c7eccc634cc1fbe64dc" name="tests/bson-corpus/binary-decodeError-003.phpt" role="test" />
<file md5sum="029e9562d5745fcc40f68fa1f418ec3b" name="tests/bson-corpus/binary-decodeError-004.phpt" role="test" />
<file md5sum="4b2fb55983642f27a35f5044b0636d91" name="tests/bson-corpus/binary-decodeError-005.phpt" role="test" />
<file md5sum="42973b41103468bfafd3066be4bb7b48" name="tests/bson-corpus/binary-valid-001.phpt" role="test" />
<file md5sum="f6e30233405b16c9b436ea008a85bfd6" name="tests/bson-corpus/binary-valid-002.phpt" role="test" />
<file md5sum="3b4b3909abda591b158dd45ebdbe166b" name="tests/bson-corpus/binary-valid-003.phpt" role="test" />
<file md5sum="7292866378520a3727a8e126bffa9ae3" name="tests/bson-corpus/binary-valid-004.phpt" role="test" />
<file md5sum="2d3ff45f820fbc5b9338b5459de97866" name="tests/bson-corpus/binary-valid-005.phpt" role="test" />
<file md5sum="11933cd51cd1cdcc3cd134464e0896cf" name="tests/bson-corpus/binary-valid-006.phpt" role="test" />
<file md5sum="44569d1c0bf30bce454994a195f7823e" name="tests/bson-corpus/binary-valid-007.phpt" role="test" />
<file md5sum="05a35df52abd6765a5189c128284fcb4" name="tests/bson-corpus/binary-valid-008.phpt" role="test" />
<file md5sum="f84598efb7037482b132ac4fca5857e3" name="tests/bson-corpus/binary-valid-009.phpt" role="test" />
<file md5sum="6c4f6905bb8e6863c96588bd3a645177" name="tests/bson-corpus/binary-valid-010.phpt" role="test" />
<file md5sum="9a5f5e5d38089aaff6164dbc671e2246" name="tests/bson-corpus/binary-valid-011.phpt" role="test" />
<file md5sum="6acf60b8609adcb02a080fc2f2742acb" name="tests/bson-corpus/boolean-decodeError-001.phpt" role="test" />
<file md5sum="1b94fe0b7a11495dceaff6d1d2076082" name="tests/bson-corpus/boolean-decodeError-002.phpt" role="test" />
<file md5sum="2e89a679f02e12daac8311602f83c58b" name="tests/bson-corpus/boolean-valid-001.phpt" role="test" />
<file md5sum="fd64b1f664576a8cb9d5a8e96f883cdb" name="tests/bson-corpus/boolean-valid-002.phpt" role="test" />
<file md5sum="cb2c63091ce618abb5033167e4759ad3" name="tests/bson-corpus/code-decodeError-001.phpt" role="test" />
<file md5sum="d31b0b0e2e4979a9b9ec4e4d5bbca6ab" name="tests/bson-corpus/code-decodeError-002.phpt" role="test" />
<file md5sum="def5a9221b25131693a1a53d31535fef" name="tests/bson-corpus/code-decodeError-003.phpt" role="test" />
<file md5sum="a5047a340325e20232ad5ed40afc870b" name="tests/bson-corpus/code-decodeError-004.phpt" role="test" />
<file md5sum="3b5e60f96e1a28845decd47a24d9e1c8" name="tests/bson-corpus/code-decodeError-005.phpt" role="test" />
<file md5sum="5a5f978ebc00be2e2fd3f05d3544ad78" name="tests/bson-corpus/code-decodeError-006.phpt" role="test" />
<file md5sum="67b521b59cf0cee700b27ad76ad3f97f" name="tests/bson-corpus/code-decodeError-007.phpt" role="test" />
<file md5sum="4dd62a2936dc0d2a8b89d1e11727071c" name="tests/bson-corpus/code-valid-001.phpt" role="test" />
<file md5sum="fbbeca71baad82db682358fe22b40f92" name="tests/bson-corpus/code-valid-002.phpt" role="test" />
<file md5sum="39db4713192b7cf86eafaa9abc7fa35d" name="tests/bson-corpus/code-valid-003.phpt" role="test" />
<file md5sum="d56a95f4ddc720c825a8af0a14e65112" name="tests/bson-corpus/code-valid-004.phpt" role="test" />
<file md5sum="2fdd4dedb78278d264e22c3a059bae09" name="tests/bson-corpus/code-valid-005.phpt" role="test" />
<file md5sum="72e0600d4fa803ed685315248b0f864d" name="tests/bson-corpus/code-valid-006.phpt" role="test" />
<file md5sum="7e7b00cd6ae321cd06b04696558b93f9" name="tests/bson-corpus/code_w_scope-decodeError-001.phpt" role="test" />
<file md5sum="f0cba92ca6f26e073d988f2748d8e776" name="tests/bson-corpus/code_w_scope-decodeError-002.phpt" role="test" />
<file md5sum="eb2b50f7a920cc3442398f8e19f67119" name="tests/bson-corpus/code_w_scope-decodeError-003.phpt" role="test" />
<file md5sum="589dbd0b51bbc31700dd9a2edfef75c3" name="tests/bson-corpus/code_w_scope-decodeError-004.phpt" role="test" />
<file md5sum="7a02aa94adfd1007367d25ad001455a7" name="tests/bson-corpus/code_w_scope-decodeError-005.phpt" role="test" />
<file md5sum="2f8df9fe573147aa9b4e8416ce52b678" name="tests/bson-corpus/code_w_scope-decodeError-006.phpt" role="test" />
<file md5sum="8c9dbdc9c9b15b05859e87fc720483e9" name="tests/bson-corpus/code_w_scope-decodeError-007.phpt" role="test" />
<file md5sum="1b538522ec683297f76a6f53d9bfc559" name="tests/bson-corpus/code_w_scope-decodeError-008.phpt" role="test" />
<file md5sum="722497f92c19465fef10be41e1cbe89e" name="tests/bson-corpus/code_w_scope-decodeError-009.phpt" role="test" />
<file md5sum="7122940f8a8170b4129a8cb164bdd069" name="tests/bson-corpus/code_w_scope-decodeError-010.phpt" role="test" />
<file md5sum="64c54371cd7354cd57abb68d9e4bd9e0" name="tests/bson-corpus/code_w_scope-decodeError-011.phpt" role="test" />
<file md5sum="e51d958466aeeb7dbfd76347c3ac21e8" name="tests/bson-corpus/code_w_scope-valid-001.phpt" role="test" />
<file md5sum="27bc43506ee0a8112ccebf1e6020eb17" name="tests/bson-corpus/code_w_scope-valid-002.phpt" role="test" />
<file md5sum="a921c315930c5e272c2cd0ef6767bba2" name="tests/bson-corpus/code_w_scope-valid-003.phpt" role="test" />
<file md5sum="841870f527f9a55d8b49271fe6fd1c59" name="tests/bson-corpus/code_w_scope-valid-004.phpt" role="test" />
<file md5sum="5f945ba46fa191c856e73b76fd5ff451" name="tests/bson-corpus/code_w_scope-valid-005.phpt" role="test" />
<file md5sum="ce7213c4e3aae833d56812e57ca28cfa" name="tests/bson-corpus/datetime-decodeError-001.phpt" role="test" />
<file md5sum="f7bfa0b5340ca943ff152c50b0e1bf04" name="tests/bson-corpus/datetime-valid-001.phpt" role="test" />
<file md5sum="7b4a43c6d6f90fef8b7bca0a8c9a61e0" name="tests/bson-corpus/datetime-valid-002.phpt" role="test" />
<file md5sum="471939bcb3717b9d784cbb41dff0b8b4" name="tests/bson-corpus/datetime-valid-003.phpt" role="test" />
<file md5sum="0f753526142aad6abed8345951f3f90f" name="tests/bson-corpus/datetime-valid-004.phpt" role="test" />
+ <file md5sum="093f74a124695331852c20f6ba4236ee" name="tests/bson-corpus/dbpointer-decodeError-001.phpt" role="test" />
+ <file md5sum="21c522b8b544870ad4982ca50dd1e7ae" name="tests/bson-corpus/dbpointer-decodeError-002.phpt" role="test" />
+ <file md5sum="0399ddebcb1fd2dd5764bd81425f6265" name="tests/bson-corpus/dbpointer-decodeError-003.phpt" role="test" />
+ <file md5sum="72e19565405e17ddd67b58a12f7fcde2" name="tests/bson-corpus/dbpointer-decodeError-004.phpt" role="test" />
+ <file md5sum="7579629d6cbaac8e1b3a0f5dddbea518" name="tests/bson-corpus/dbpointer-decodeError-005.phpt" role="test" />
+ <file md5sum="5f3222fe704bc140610288700005bbf1" name="tests/bson-corpus/dbpointer-decodeError-006.phpt" role="test" />
+ <file md5sum="4fe16bf89fe5df9ccc78fe065d635d97" name="tests/bson-corpus/dbpointer-valid-001.phpt" role="test" />
+ <file md5sum="2e8314da3bf0d89a6cc59aa05beab8d5" name="tests/bson-corpus/dbpointer-valid-002.phpt" role="test" />
+ <file md5sum="03f8c4c2d67c9836b2bea4f3e6e01878" name="tests/bson-corpus/dbpointer-valid-003.phpt" role="test" />
<file md5sum="f6a0d646f5f59227ab1a2a2d94fdf180" name="tests/bson-corpus/dbref-valid-001.phpt" role="test" />
<file md5sum="0a9d842217ab322bffa873e492687737" name="tests/bson-corpus/dbref-valid-002.phpt" role="test" />
<file md5sum="35045645543825eb1964601f4fe86cae" name="tests/bson-corpus/dbref-valid-003.phpt" role="test" />
<file md5sum="6d08c11b156e71399412d2271dcc31ae" name="tests/bson-corpus/dbref-valid-004.phpt" role="test" />
<file md5sum="c8bea39a89b0a90b84842ad6d3a96a11" name="tests/bson-corpus/dbref-valid-005.phpt" role="test" />
<file md5sum="1be9664e30513a74507db5a31493d6ab" name="tests/bson-corpus/decimal128-1-valid-001.phpt" role="test" />
<file md5sum="6211d74f1ce5b93637cb0a0649071866" name="tests/bson-corpus/decimal128-1-valid-002.phpt" role="test" />
<file md5sum="6c3cc1a8b4342f226e53614aee46c850" name="tests/bson-corpus/decimal128-1-valid-003.phpt" role="test" />
<file md5sum="1258eedd19348a86ddd31f21d1115d9c" name="tests/bson-corpus/decimal128-1-valid-004.phpt" role="test" />
<file md5sum="ba2b9cf011c89650faa41c46503d6201" name="tests/bson-corpus/decimal128-1-valid-005.phpt" role="test" />
<file md5sum="13646f18b50308a4816c3a2adb583433" name="tests/bson-corpus/decimal128-1-valid-006.phpt" role="test" />
<file md5sum="e61f4ac46d6d11c7ff1e3eff3fead1bb" name="tests/bson-corpus/decimal128-1-valid-007.phpt" role="test" />
<file md5sum="ed121eaa293ae98345e2632ce931d87e" name="tests/bson-corpus/decimal128-1-valid-008.phpt" role="test" />
<file md5sum="38799d1515616bb61426710833d27a33" name="tests/bson-corpus/decimal128-1-valid-009.phpt" role="test" />
<file md5sum="675915a04d0ccbe5e33c8cd191c49009" name="tests/bson-corpus/decimal128-1-valid-010.phpt" role="test" />
<file md5sum="da13a23d8a366f64173469a48d34bf75" name="tests/bson-corpus/decimal128-1-valid-011.phpt" role="test" />
<file md5sum="e53db0b3304cf22b40bd88648905b8b0" name="tests/bson-corpus/decimal128-1-valid-012.phpt" role="test" />
<file md5sum="e0ea8c1c0af2018097e277766178af4d" name="tests/bson-corpus/decimal128-1-valid-013.phpt" role="test" />
<file md5sum="683e01fbd575c916cf8318f79abe3e24" name="tests/bson-corpus/decimal128-1-valid-014.phpt" role="test" />
<file md5sum="14e8fd8b30319c05b7b11bb36ca1d653" name="tests/bson-corpus/decimal128-1-valid-015.phpt" role="test" />
<file md5sum="75c92b5d14312891ab5c3e16055743fa" name="tests/bson-corpus/decimal128-1-valid-016.phpt" role="test" />
<file md5sum="8a7fa49d165fda74aa161fe86a462b76" name="tests/bson-corpus/decimal128-1-valid-017.phpt" role="test" />
<file md5sum="ac80ba6121ddaa84a249b30ca2ea1872" name="tests/bson-corpus/decimal128-1-valid-018.phpt" role="test" />
<file md5sum="5d01624977a5c30b7d119314f3aff184" name="tests/bson-corpus/decimal128-1-valid-019.phpt" role="test" />
<file md5sum="8c3fcbbc3a7aa1640fe397c67a5cf899" name="tests/bson-corpus/decimal128-1-valid-020.phpt" role="test" />
<file md5sum="53f02848dfab7990fd60b1e9d38b67ad" name="tests/bson-corpus/decimal128-1-valid-021.phpt" role="test" />
<file md5sum="4361a2779601eba39c3636fe4083e6aa" name="tests/bson-corpus/decimal128-1-valid-022.phpt" role="test" />
<file md5sum="aa5c274f7340aa5640db649637a2b598" name="tests/bson-corpus/decimal128-1-valid-023.phpt" role="test" />
<file md5sum="cd2d62f86eefa21ed3515cf7331abebc" name="tests/bson-corpus/decimal128-1-valid-024.phpt" role="test" />
<file md5sum="5bc00004cb90af3b3c96d94f874ee0d2" name="tests/bson-corpus/decimal128-1-valid-025.phpt" role="test" />
<file md5sum="fcb960fc23592fae8d2ab1f043fe6d96" name="tests/bson-corpus/decimal128-1-valid-026.phpt" role="test" />
<file md5sum="7b4e7a0b389b795a77b30f955488581d" name="tests/bson-corpus/decimal128-1-valid-027.phpt" role="test" />
<file md5sum="03e0925896672089326dccfb5cdf3bb7" name="tests/bson-corpus/decimal128-1-valid-028.phpt" role="test" />
<file md5sum="7e4e2525f124be41a89a73d26b7918a0" name="tests/bson-corpus/decimal128-1-valid-029.phpt" role="test" />
<file md5sum="975e4fae01096e30dd2f99d8fae7e5ba" name="tests/bson-corpus/decimal128-1-valid-030.phpt" role="test" />
<file md5sum="9082185cf22808c7e69dcb1da3c423d9" name="tests/bson-corpus/decimal128-1-valid-031.phpt" role="test" />
<file md5sum="434f43477c58954f210d1af1fb6588c1" name="tests/bson-corpus/decimal128-1-valid-032.phpt" role="test" />
<file md5sum="a052b5e7678dd60a7c7e7de744c87017" name="tests/bson-corpus/decimal128-1-valid-033.phpt" role="test" />
<file md5sum="613ab4ac14503406b50607c19d273cb8" name="tests/bson-corpus/decimal128-1-valid-034.phpt" role="test" />
<file md5sum="5c0d2131e7acbfaedc525a8f4c4f2d45" name="tests/bson-corpus/decimal128-1-valid-035.phpt" role="test" />
<file md5sum="3fe40e8d661ebc168a849a2c45472477" name="tests/bson-corpus/decimal128-1-valid-036.phpt" role="test" />
<file md5sum="ed25faa820a283907a32e179616f5334" name="tests/bson-corpus/decimal128-1-valid-037.phpt" role="test" />
<file md5sum="8b29111ce0810339305ebb10f2bef961" name="tests/bson-corpus/decimal128-1-valid-038.phpt" role="test" />
<file md5sum="b7a91a4e418cad9b65d56549fadbadc1" name="tests/bson-corpus/decimal128-1-valid-039.phpt" role="test" />
<file md5sum="6bd56aaf565aa7aad486a0c6dd65e65a" name="tests/bson-corpus/decimal128-1-valid-040.phpt" role="test" />
<file md5sum="e676d6b392e18ca3c128e53fcef060d7" name="tests/bson-corpus/decimal128-1-valid-041.phpt" role="test" />
<file md5sum="442a9f5586ada55d3f140dffa1877de6" name="tests/bson-corpus/decimal128-1-valid-042.phpt" role="test" />
<file md5sum="84ea678317f307cb585a66b5cc692a02" name="tests/bson-corpus/decimal128-1-valid-043.phpt" role="test" />
<file md5sum="95d97fe1c10db4b55b011c14eef8a39d" name="tests/bson-corpus/decimal128-1-valid-044.phpt" role="test" />
<file md5sum="403ac53597d14f2ccdede8493c078d2e" name="tests/bson-corpus/decimal128-1-valid-045.phpt" role="test" />
<file md5sum="820b7afdb1489889b589672526912ba4" name="tests/bson-corpus/decimal128-1-valid-046.phpt" role="test" />
<file md5sum="80ee524599f9f7e0642c7b37fd897e09" name="tests/bson-corpus/decimal128-1-valid-047.phpt" role="test" />
<file md5sum="d8f2a7e25e4008b5d7819cd8b678fcb9" name="tests/bson-corpus/decimal128-1-valid-048.phpt" role="test" />
<file md5sum="c238495574eeedc4cf8fd00573ea905e" name="tests/bson-corpus/decimal128-1-valid-049.phpt" role="test" />
<file md5sum="0b0973d7ea834026c163a2011fbeb080" name="tests/bson-corpus/decimal128-1-valid-050.phpt" role="test" />
<file md5sum="ec24828ca55a2bda0a2d66505b64936d" name="tests/bson-corpus/decimal128-1-valid-051.phpt" role="test" />
<file md5sum="729441ba04ae3012c56930d4096717c5" name="tests/bson-corpus/decimal128-1-valid-052.phpt" role="test" />
<file md5sum="8f95866d1d16ab43e97c7f2faa57ea28" name="tests/bson-corpus/decimal128-1-valid-053.phpt" role="test" />
<file md5sum="c37788e500a3ffd5e4fe9de2c7beb8d2" name="tests/bson-corpus/decimal128-1-valid-054.phpt" role="test" />
<file md5sum="75284b0772f6c51e3ddfac7b54b690fc" name="tests/bson-corpus/decimal128-1-valid-055.phpt" role="test" />
<file md5sum="195bb4db3fe142ea2a5c18f42511fb98" name="tests/bson-corpus/decimal128-1-valid-056.phpt" role="test" />
<file md5sum="ee3b5cfa0a47ab7816934bb3d910f865" name="tests/bson-corpus/decimal128-2-valid-001.phpt" role="test" />
<file md5sum="5e408cdeb8c9bd64821c2036e297074b" name="tests/bson-corpus/decimal128-2-valid-002.phpt" role="test" />
<file md5sum="14d41a31977b7f3f3117846057ba2601" name="tests/bson-corpus/decimal128-2-valid-003.phpt" role="test" />
<file md5sum="b18a1b7373ede0724f2419af2589ba2c" name="tests/bson-corpus/decimal128-2-valid-004.phpt" role="test" />
<file md5sum="66ff234efe9a92930a955012376426a8" name="tests/bson-corpus/decimal128-2-valid-005.phpt" role="test" />
<file md5sum="2ebc1553235eb6940c8eb52511ffaa20" name="tests/bson-corpus/decimal128-2-valid-006.phpt" role="test" />
<file md5sum="5b3c244258cc82c1518d8921c5968629" name="tests/bson-corpus/decimal128-2-valid-007.phpt" role="test" />
<file md5sum="37c5c0137ab212c29589fbc3f933b6e8" name="tests/bson-corpus/decimal128-2-valid-008.phpt" role="test" />
<file md5sum="0161b23d856375b66b5c609a0c11ab40" name="tests/bson-corpus/decimal128-2-valid-009.phpt" role="test" />
<file md5sum="b9a07e0062873f860c2bc4883ef61bea" name="tests/bson-corpus/decimal128-2-valid-010.phpt" role="test" />
<file md5sum="0d2c193c752e22363784d7f3fe8223cd" name="tests/bson-corpus/decimal128-2-valid-011.phpt" role="test" />
<file md5sum="2bbf62a4f860531f268664ccc1ff04a7" name="tests/bson-corpus/decimal128-2-valid-012.phpt" role="test" />
<file md5sum="8c7793815244ce46beb792fff2d5d9b2" name="tests/bson-corpus/decimal128-2-valid-013.phpt" role="test" />
<file md5sum="a018ded9ca70adae810dd2a6e1e73ac0" name="tests/bson-corpus/decimal128-2-valid-014.phpt" role="test" />
<file md5sum="33c3d1896d9a523f0dc3a6d17c7cd672" name="tests/bson-corpus/decimal128-2-valid-015.phpt" role="test" />
<file md5sum="cee62aa6daacce199f4bcff29baaa770" name="tests/bson-corpus/decimal128-2-valid-016.phpt" role="test" />
<file md5sum="8fb1a596f198f8dca07ad897ba785c44" name="tests/bson-corpus/decimal128-2-valid-017.phpt" role="test" />
<file md5sum="0446235e9a03299e5056cc02af33710c" name="tests/bson-corpus/decimal128-2-valid-018.phpt" role="test" />
<file md5sum="18f5d9e1b47d4a616382b538a8c96748" name="tests/bson-corpus/decimal128-2-valid-019.phpt" role="test" />
<file md5sum="7687301937038736b52fabce10c4318d" name="tests/bson-corpus/decimal128-2-valid-020.phpt" role="test" />
<file md5sum="f4e793063b75e6bdc63417fda9693333" name="tests/bson-corpus/decimal128-2-valid-021.phpt" role="test" />
<file md5sum="9402b1bb065a1cd7e86b4926bf66b465" name="tests/bson-corpus/decimal128-2-valid-022.phpt" role="test" />
<file md5sum="419293525c66ae26d2b20bb543814df4" name="tests/bson-corpus/decimal128-2-valid-023.phpt" role="test" />
<file md5sum="2175c57d8e8cb92126c4c92b0ccfc488" name="tests/bson-corpus/decimal128-2-valid-024.phpt" role="test" />
<file md5sum="0b1133406d9c82e15b84b85237be2bed" name="tests/bson-corpus/decimal128-2-valid-025.phpt" role="test" />
<file md5sum="d466e879d9363723795783b4c4eb47ff" name="tests/bson-corpus/decimal128-2-valid-026.phpt" role="test" />
<file md5sum="af2cd17bc30e18cbf53f942d8f362b47" name="tests/bson-corpus/decimal128-2-valid-027.phpt" role="test" />
<file md5sum="6fbbd48bb5eb9f930e85fad5bead1519" name="tests/bson-corpus/decimal128-2-valid-028.phpt" role="test" />
<file md5sum="15798456fa5e0d6452f1083b1fc5807b" name="tests/bson-corpus/decimal128-2-valid-029.phpt" role="test" />
<file md5sum="de2f22e66df859f1013a9cfadfd6ba98" name="tests/bson-corpus/decimal128-2-valid-030.phpt" role="test" />
<file md5sum="7eea0cf3baacbf61664f9d4daebcaba0" name="tests/bson-corpus/decimal128-2-valid-031.phpt" role="test" />
<file md5sum="8854ca1f4536973749715dd8fc29c9da" name="tests/bson-corpus/decimal128-2-valid-032.phpt" role="test" />
<file md5sum="7804c313c8e262224e2786cd658c5dfb" name="tests/bson-corpus/decimal128-2-valid-033.phpt" role="test" />
<file md5sum="ecfcceb6d572ecc43d8f51f7b461052a" name="tests/bson-corpus/decimal128-2-valid-034.phpt" role="test" />
<file md5sum="795cb7a559a455790f48b8d7099657e2" name="tests/bson-corpus/decimal128-2-valid-035.phpt" role="test" />
<file md5sum="5294b8700c5b184ceeab7e3fdac9ac09" name="tests/bson-corpus/decimal128-2-valid-036.phpt" role="test" />
<file md5sum="fa8c1f8e2471b1e88f70494e3d43a2a3" name="tests/bson-corpus/decimal128-2-valid-037.phpt" role="test" />
<file md5sum="b26ceb09c9c9fb679ce2ae070b46e440" name="tests/bson-corpus/decimal128-2-valid-038.phpt" role="test" />
<file md5sum="930c0a170ba388984e21ccc62befaa17" name="tests/bson-corpus/decimal128-2-valid-039.phpt" role="test" />
<file md5sum="1fc738e64b9cadd5508a6eef087e9e6a" name="tests/bson-corpus/decimal128-2-valid-040.phpt" role="test" />
<file md5sum="fd7e38aa78b293c936e6e30916154be2" name="tests/bson-corpus/decimal128-2-valid-041.phpt" role="test" />
<file md5sum="f1a6c263b9f19ca118927987a3a3e960" name="tests/bson-corpus/decimal128-2-valid-042.phpt" role="test" />
<file md5sum="dce7e447bd22e0e1b6ca368a0126f6c9" name="tests/bson-corpus/decimal128-2-valid-043.phpt" role="test" />
<file md5sum="5250c400c16366276e85dd3a84a68464" name="tests/bson-corpus/decimal128-2-valid-044.phpt" role="test" />
<file md5sum="d8ed4f32c9f21737c3db57d2ff9f517d" name="tests/bson-corpus/decimal128-2-valid-045.phpt" role="test" />
<file md5sum="8da310bcd5248f1a4c788e57292fcbf3" name="tests/bson-corpus/decimal128-2-valid-046.phpt" role="test" />
<file md5sum="b78100f032f9b07917f03f6f43b27032" name="tests/bson-corpus/decimal128-2-valid-047.phpt" role="test" />
<file md5sum="fb9958f291c703f5c29a7f5572db3bcc" name="tests/bson-corpus/decimal128-2-valid-048.phpt" role="test" />
<file md5sum="39f6a9f651550909f54aba9ce62ee4c5" name="tests/bson-corpus/decimal128-2-valid-049.phpt" role="test" />
<file md5sum="de192bf93dc79e402aa334f45ba18b1e" name="tests/bson-corpus/decimal128-2-valid-050.phpt" role="test" />
<file md5sum="3d67788e6781992107655e299446eddb" name="tests/bson-corpus/decimal128-2-valid-051.phpt" role="test" />
<file md5sum="6e480e0a1b326ce956f40b40fb5ed094" name="tests/bson-corpus/decimal128-2-valid-052.phpt" role="test" />
<file md5sum="b41ce0b1219941d0432219f37a710aa5" name="tests/bson-corpus/decimal128-2-valid-053.phpt" role="test" />
<file md5sum="91b106e513924c9a611bc7a27919ce4b" name="tests/bson-corpus/decimal128-2-valid-054.phpt" role="test" />
<file md5sum="4dfbe9e6347d72e4ba4403e722314b53" name="tests/bson-corpus/decimal128-2-valid-055.phpt" role="test" />
<file md5sum="caf747e3fd2e28b322612199074487dd" name="tests/bson-corpus/decimal128-2-valid-056.phpt" role="test" />
<file md5sum="30424dfa94cdaf355ca52920a787fd10" name="tests/bson-corpus/decimal128-2-valid-057.phpt" role="test" />
<file md5sum="cf94d76866c57ff87907cafeb8fbe7e8" name="tests/bson-corpus/decimal128-2-valid-058.phpt" role="test" />
<file md5sum="48e42590a32c360bb1861b691884c2a0" name="tests/bson-corpus/decimal128-2-valid-059.phpt" role="test" />
<file md5sum="5eb9b793969b6fbf7adc222f285ad250" name="tests/bson-corpus/decimal128-2-valid-060.phpt" role="test" />
<file md5sum="2365af956b837469db89cdbdc8778421" name="tests/bson-corpus/decimal128-2-valid-061.phpt" role="test" />
<file md5sum="91d59d84d2ba39e6ba24ff68221d74f6" name="tests/bson-corpus/decimal128-2-valid-062.phpt" role="test" />
<file md5sum="753a7e1af8e0a2d9975335e8ea3f140a" name="tests/bson-corpus/decimal128-2-valid-063.phpt" role="test" />
<file md5sum="944e77c82029d92d3ff7140cf5dc5018" name="tests/bson-corpus/decimal128-2-valid-064.phpt" role="test" />
<file md5sum="ea6e3558be75757726e0dd2e970c023c" name="tests/bson-corpus/decimal128-2-valid-065.phpt" role="test" />
<file md5sum="ab5c5c8d5cdf13c31212a8b122a1f7d6" name="tests/bson-corpus/decimal128-2-valid-066.phpt" role="test" />
<file md5sum="76a085cd5533ccb41e25c04ce208404d" name="tests/bson-corpus/decimal128-2-valid-067.phpt" role="test" />
<file md5sum="4c038c0004299008f8530ee1905db50a" name="tests/bson-corpus/decimal128-2-valid-068.phpt" role="test" />
<file md5sum="3f5cc49cc6ba45b816028b3cc92d264f" name="tests/bson-corpus/decimal128-2-valid-069.phpt" role="test" />
<file md5sum="a7e88590454ce68e11ae15005f23082e" name="tests/bson-corpus/decimal128-2-valid-070.phpt" role="test" />
<file md5sum="5bae02a95b54fd348d174eb3d85f04eb" name="tests/bson-corpus/decimal128-2-valid-071.phpt" role="test" />
<file md5sum="ebd162d6f711b331e7accc7fa5c1d835" name="tests/bson-corpus/decimal128-2-valid-072.phpt" role="test" />
<file md5sum="d235b6936bfecf314267d9c14953a7d4" name="tests/bson-corpus/decimal128-2-valid-073.phpt" role="test" />
<file md5sum="c5ec17d328828052df60b7584bf38757" name="tests/bson-corpus/decimal128-2-valid-074.phpt" role="test" />
<file md5sum="9ba36e27db6ecb0a0aa936a5b492b0b9" name="tests/bson-corpus/decimal128-2-valid-075.phpt" role="test" />
<file md5sum="fb767d85a2edeae3c8bffa7d5760c10b" name="tests/bson-corpus/decimal128-2-valid-076.phpt" role="test" />
<file md5sum="c6957d607d662ed2cc67330eb10f2402" name="tests/bson-corpus/decimal128-2-valid-077.phpt" role="test" />
<file md5sum="681b2a2936c14f2004c7dec85d01b2e2" name="tests/bson-corpus/decimal128-2-valid-078.phpt" role="test" />
<file md5sum="1b1f27774210373d6afcf151bbb07bca" name="tests/bson-corpus/decimal128-2-valid-079.phpt" role="test" />
<file md5sum="e22ea399a7e6b673efbd0dc4c0888699" name="tests/bson-corpus/decimal128-2-valid-080.phpt" role="test" />
<file md5sum="2788019eba5db7c958a9a1f2535c8c0d" name="tests/bson-corpus/decimal128-2-valid-081.phpt" role="test" />
<file md5sum="e26769d5a2bde67c35f09c10a9de9dbf" name="tests/bson-corpus/decimal128-2-valid-082.phpt" role="test" />
<file md5sum="5dcc949efe0bd05699a5600c8c7171b0" name="tests/bson-corpus/decimal128-2-valid-083.phpt" role="test" />
<file md5sum="0c0e987a3230f753fe95372c7f90a32b" name="tests/bson-corpus/decimal128-2-valid-084.phpt" role="test" />
<file md5sum="66c6528d392928c73aaee101bab0d4e7" name="tests/bson-corpus/decimal128-2-valid-085.phpt" role="test" />
<file md5sum="4b4c6033d1cc69584ddc02d2cb61679c" name="tests/bson-corpus/decimal128-2-valid-086.phpt" role="test" />
<file md5sum="9fef33369778b3b31af02f918bc940d1" name="tests/bson-corpus/decimal128-2-valid-087.phpt" role="test" />
<file md5sum="83571d78ab83eb7d6f26039c06392f61" name="tests/bson-corpus/decimal128-2-valid-088.phpt" role="test" />
<file md5sum="6f3abc25b06b28dd814748aefe26fbee" name="tests/bson-corpus/decimal128-2-valid-089.phpt" role="test" />
<file md5sum="4bdd4a38167e036b0edc16fef1d0d7c0" name="tests/bson-corpus/decimal128-2-valid-090.phpt" role="test" />
<file md5sum="f70773da403b020c400324ad86a7cef0" name="tests/bson-corpus/decimal128-2-valid-091.phpt" role="test" />
<file md5sum="3b10869e9ebff2e3fe99adeb0932fb1c" name="tests/bson-corpus/decimal128-2-valid-092.phpt" role="test" />
<file md5sum="0341f9cca69ca88f914acfa9c0b11dcd" name="tests/bson-corpus/decimal128-2-valid-093.phpt" role="test" />
<file md5sum="5dd6ebcbd16e2d783fb924bc0168ced7" name="tests/bson-corpus/decimal128-2-valid-094.phpt" role="test" />
<file md5sum="be4a8b9bec6e5e94d77ff089ff1d0ee1" name="tests/bson-corpus/decimal128-2-valid-095.phpt" role="test" />
<file md5sum="442195cc8ef85c91e2fbef49122e628e" name="tests/bson-corpus/decimal128-2-valid-096.phpt" role="test" />
<file md5sum="7b92d8695368b4974328d3bf221e46be" name="tests/bson-corpus/decimal128-2-valid-097.phpt" role="test" />
<file md5sum="2e3ff697a3a2abd7b9ec13e52358ebb3" name="tests/bson-corpus/decimal128-2-valid-098.phpt" role="test" />
<file md5sum="ec685a2e20a0ec15f6bf3ff73f360d85" name="tests/bson-corpus/decimal128-2-valid-099.phpt" role="test" />
<file md5sum="4201a63bfd136d7e662938f67dd05254" name="tests/bson-corpus/decimal128-2-valid-100.phpt" role="test" />
<file md5sum="2d08081f184061e053ebc445e5b8cd24" name="tests/bson-corpus/decimal128-2-valid-101.phpt" role="test" />
<file md5sum="3a6ff963db42fcf64206e2de4ee780cc" name="tests/bson-corpus/decimal128-2-valid-102.phpt" role="test" />
<file md5sum="540d5b91603c7357e8d89824d5719737" name="tests/bson-corpus/decimal128-2-valid-103.phpt" role="test" />
<file md5sum="796fc26fd08560613f19d9083e020338" name="tests/bson-corpus/decimal128-2-valid-104.phpt" role="test" />
<file md5sum="49df24fb744715760e55787d0957b5de" name="tests/bson-corpus/decimal128-2-valid-105.phpt" role="test" />
<file md5sum="66855ccf81d802eb68391b0807feedaf" name="tests/bson-corpus/decimal128-2-valid-106.phpt" role="test" />
<file md5sum="bbba9d352a373a719ccbe406d96ca835" name="tests/bson-corpus/decimal128-2-valid-107.phpt" role="test" />
<file md5sum="52d7e75eec143dd82f2c90ef5d20dd73" name="tests/bson-corpus/decimal128-2-valid-108.phpt" role="test" />
<file md5sum="b111889617eac3398d98dc2847efa454" name="tests/bson-corpus/decimal128-2-valid-109.phpt" role="test" />
<file md5sum="25202c94a851caaa78be7cc01d338083" name="tests/bson-corpus/decimal128-2-valid-110.phpt" role="test" />
<file md5sum="ac6afaac7a362f5418c78734fd34e8c0" name="tests/bson-corpus/decimal128-2-valid-111.phpt" role="test" />
<file md5sum="b5900c5ed1adec986f86fa21bd0da84f" name="tests/bson-corpus/decimal128-2-valid-112.phpt" role="test" />
<file md5sum="9cefd97e411d3ca7612875f1dc156f90" name="tests/bson-corpus/decimal128-2-valid-113.phpt" role="test" />
<file md5sum="8aae5fee09f7e8a17a69c47bd1fee75e" name="tests/bson-corpus/decimal128-2-valid-114.phpt" role="test" />
<file md5sum="e65ebb960f782a5013d39baaed48d41e" name="tests/bson-corpus/decimal128-2-valid-115.phpt" role="test" />
<file md5sum="cac712953a826fb899b728b30e3cea8d" name="tests/bson-corpus/decimal128-2-valid-116.phpt" role="test" />
<file md5sum="eff59a7472ca181c00839c6c7d87bd82" name="tests/bson-corpus/decimal128-2-valid-117.phpt" role="test" />
<file md5sum="9320d637ed82864d61562da53a374d0a" name="tests/bson-corpus/decimal128-2-valid-118.phpt" role="test" />
<file md5sum="5850b1ff7d5c26da1468534752e98ea3" name="tests/bson-corpus/decimal128-2-valid-119.phpt" role="test" />
<file md5sum="5dcf443d0ccc15b32aa834069fa6c412" name="tests/bson-corpus/decimal128-2-valid-120.phpt" role="test" />
<file md5sum="f85faa8be07f8d8173eb072a0570dc0e" name="tests/bson-corpus/decimal128-2-valid-121.phpt" role="test" />
<file md5sum="573c4ccdfad6c9f0f59360c986ebd2c9" name="tests/bson-corpus/decimal128-2-valid-122.phpt" role="test" />
<file md5sum="82c51a7042011e90499bf8170f28952d" name="tests/bson-corpus/decimal128-2-valid-123.phpt" role="test" />
<file md5sum="b0d78129713054d1c026f070c1ba0dad" name="tests/bson-corpus/decimal128-2-valid-124.phpt" role="test" />
<file md5sum="971f60d5191b863bd465b8cfe99fbb81" name="tests/bson-corpus/decimal128-2-valid-125.phpt" role="test" />
<file md5sum="008c8b511828b32f4345e74f432f57a1" name="tests/bson-corpus/decimal128-2-valid-126.phpt" role="test" />
<file md5sum="84014477bceff0f8d732a34e91400663" name="tests/bson-corpus/decimal128-2-valid-127.phpt" role="test" />
<file md5sum="aa3df20a486284805f6633f613f4ff9c" name="tests/bson-corpus/decimal128-2-valid-128.phpt" role="test" />
<file md5sum="8a0bb497bfac2faa7cee23cdc8d8a2c3" name="tests/bson-corpus/decimal128-2-valid-129.phpt" role="test" />
<file md5sum="e880542c707e7fc3a0bb82ff519213ac" name="tests/bson-corpus/decimal128-2-valid-130.phpt" role="test" />
<file md5sum="553bf9e872800102f95acd398629af64" name="tests/bson-corpus/decimal128-2-valid-131.phpt" role="test" />
<file md5sum="77a7bcbcba9bd363342d9c7b830fae34" name="tests/bson-corpus/decimal128-2-valid-132.phpt" role="test" />
<file md5sum="cff1328a6288da15f9350b68f8c5d489" name="tests/bson-corpus/decimal128-2-valid-133.phpt" role="test" />
<file md5sum="0ee8ae38952b950651a7b43a57b8c60b" name="tests/bson-corpus/decimal128-2-valid-134.phpt" role="test" />
<file md5sum="1ea5f4d4114b0706a6b8bd4ad46179c6" name="tests/bson-corpus/decimal128-2-valid-135.phpt" role="test" />
<file md5sum="6330096fc3283f7ef5136caafa57b1bd" name="tests/bson-corpus/decimal128-2-valid-136.phpt" role="test" />
<file md5sum="e6c4f50ff19a3e501ca6b573f3f47825" name="tests/bson-corpus/decimal128-2-valid-137.phpt" role="test" />
<file md5sum="e3a7756627e40c90117bbd3e7e8d17a2" name="tests/bson-corpus/decimal128-2-valid-138.phpt" role="test" />
<file md5sum="ba8acc01b63d6e8dd109c4c7997e2c6c" name="tests/bson-corpus/decimal128-2-valid-139.phpt" role="test" />
<file md5sum="7fab66eb8d2f53b33375a0013a955d26" name="tests/bson-corpus/decimal128-2-valid-140.phpt" role="test" />
<file md5sum="9f169b5e4371cbe24c056973cde84831" name="tests/bson-corpus/decimal128-2-valid-141.phpt" role="test" />
<file md5sum="d5dfc064d6f4e5e448f42bd730fed054" name="tests/bson-corpus/decimal128-2-valid-142.phpt" role="test" />
<file md5sum="fe97ba097b1d594eb1a09cd106a11854" name="tests/bson-corpus/decimal128-2-valid-143.phpt" role="test" />
<file md5sum="867c2c7530dd452ba36f0fa0d7022d91" name="tests/bson-corpus/decimal128-2-valid-144.phpt" role="test" />
<file md5sum="0bbc6e5d9a03298d265a72bddb8076b0" name="tests/bson-corpus/decimal128-2-valid-145.phpt" role="test" />
<file md5sum="b4ba690f01910bca2af1d00954d80a9d" name="tests/bson-corpus/decimal128-2-valid-146.phpt" role="test" />
<file md5sum="ea231b7006051408759096f712705fd7" name="tests/bson-corpus/decimal128-2-valid-147.phpt" role="test" />
<file md5sum="df997d34321d8620ff8a35d810fd9b02" name="tests/bson-corpus/decimal128-2-valid-148.phpt" role="test" />
<file md5sum="d4479dfa2cc526a133799b5e625181c3" name="tests/bson-corpus/decimal128-2-valid-149.phpt" role="test" />
<file md5sum="a6a1e66f9c931b707885f8d094500ac9" name="tests/bson-corpus/decimal128-2-valid-150.phpt" role="test" />
<file md5sum="3c92721f8d215a7c3fea8ac667706fba" name="tests/bson-corpus/decimal128-2-valid-151.phpt" role="test" />
<file md5sum="8fd7c1533b61fe6eca0e58ea4a017cab" name="tests/bson-corpus/decimal128-2-valid-152.phpt" role="test" />
<file md5sum="9748fe4d36fc0ceb9e72eb929748d760" name="tests/bson-corpus/decimal128-2-valid-153.phpt" role="test" />
<file md5sum="8d6cb5244dd36b26cf92648de850307b" name="tests/bson-corpus/decimal128-2-valid-154.phpt" role="test" />
<file md5sum="d9f95167b0f45e34bf472a405bb2cf11" name="tests/bson-corpus/decimal128-2-valid-155.phpt" role="test" />
<file md5sum="d8a54fba958ab819f07e0addec0b977f" name="tests/bson-corpus/decimal128-2-valid-156.phpt" role="test" />
<file md5sum="070b469361b80b8229357d23555aaf63" name="tests/bson-corpus/decimal128-2-valid-157.phpt" role="test" />
<file md5sum="34ce63d06e50aa4944679b48de0de335" name="tests/bson-corpus/decimal128-3-valid-001.phpt" role="test" />
<file md5sum="df328c1d2158b658942dce1a4c5bdfce" name="tests/bson-corpus/decimal128-3-valid-002.phpt" role="test" />
<file md5sum="9bcf947760c6bb9a958234bc41afd4f4" name="tests/bson-corpus/decimal128-3-valid-003.phpt" role="test" />
<file md5sum="180a4302dd151247a9a37cb5ce9d5d47" name="tests/bson-corpus/decimal128-3-valid-004.phpt" role="test" />
<file md5sum="54b812360f1de0b104455a39c8889c9e" name="tests/bson-corpus/decimal128-3-valid-005.phpt" role="test" />
<file md5sum="9f7c80841aacca871904857d6710cfa3" name="tests/bson-corpus/decimal128-3-valid-006.phpt" role="test" />
<file md5sum="0293d8c4c2123f53c819c27730d86197" name="tests/bson-corpus/decimal128-3-valid-007.phpt" role="test" />
<file md5sum="60e423b7c4a59f10532f6e10caf6e0ac" name="tests/bson-corpus/decimal128-3-valid-008.phpt" role="test" />
<file md5sum="e19c736e10533f9fe6998ae25081d21c" name="tests/bson-corpus/decimal128-3-valid-009.phpt" role="test" />
<file md5sum="965acf5ad70ef683da051811f71ad0bd" name="tests/bson-corpus/decimal128-3-valid-010.phpt" role="test" />
<file md5sum="628c029a7d5c3d51b9d669f11741703e" name="tests/bson-corpus/decimal128-3-valid-011.phpt" role="test" />
<file md5sum="05198752b5da78889234ed05f1be6993" name="tests/bson-corpus/decimal128-3-valid-012.phpt" role="test" />
<file md5sum="2a19fe2d928be479d4da0d7b5fe8005f" name="tests/bson-corpus/decimal128-3-valid-013.phpt" role="test" />
<file md5sum="3af90adbafbaa424d7b9f9318ff6db94" name="tests/bson-corpus/decimal128-3-valid-014.phpt" role="test" />
<file md5sum="52114773cfeb41974cefc54e33ffa9fa" name="tests/bson-corpus/decimal128-3-valid-015.phpt" role="test" />
<file md5sum="7076c929dbc5f535627e251811cb7187" name="tests/bson-corpus/decimal128-3-valid-016.phpt" role="test" />
<file md5sum="e17cda3744c02f6a62114f0b9227ab49" name="tests/bson-corpus/decimal128-3-valid-017.phpt" role="test" />
<file md5sum="61c8d58565c8d1224a9c0676d5513b4b" name="tests/bson-corpus/decimal128-3-valid-018.phpt" role="test" />
<file md5sum="3baacae5cfebaa97ea82b6b86ca7f67f" name="tests/bson-corpus/decimal128-3-valid-019.phpt" role="test" />
<file md5sum="6f0d7886b289c38462f5d73bab69429d" name="tests/bson-corpus/decimal128-3-valid-020.phpt" role="test" />
<file md5sum="31733bfe4a02fd610b308f6ec295e011" name="tests/bson-corpus/decimal128-3-valid-021.phpt" role="test" />
<file md5sum="848c7632ba9b923bf87fc62bd9f7d950" name="tests/bson-corpus/decimal128-3-valid-022.phpt" role="test" />
<file md5sum="318f52f20af58648b597e5a0a84cb502" name="tests/bson-corpus/decimal128-3-valid-023.phpt" role="test" />
<file md5sum="601d7464591e31244bf866bb08ed22b7" name="tests/bson-corpus/decimal128-3-valid-024.phpt" role="test" />
<file md5sum="e8fff68562159a22cb3dd1cc18da37ae" name="tests/bson-corpus/decimal128-3-valid-025.phpt" role="test" />
<file md5sum="1b3494487358e1a4f21d63a246e4d1d1" name="tests/bson-corpus/decimal128-3-valid-026.phpt" role="test" />
<file md5sum="890ab1774515db8761dc156430f8d85c" name="tests/bson-corpus/decimal128-3-valid-027.phpt" role="test" />
<file md5sum="e3eadea00725f55269ffca85c5f8c008" name="tests/bson-corpus/decimal128-3-valid-028.phpt" role="test" />
<file md5sum="8fbb4f5ca491b489e81a7ce09d3949cb" name="tests/bson-corpus/decimal128-3-valid-029.phpt" role="test" />
<file md5sum="f47494ab7e679e8818e1d6859bd5f127" name="tests/bson-corpus/decimal128-3-valid-030.phpt" role="test" />
<file md5sum="2684997e4eb6f61274bf887accfeed4a" name="tests/bson-corpus/decimal128-3-valid-031.phpt" role="test" />
<file md5sum="994338eb57c4179dd912901548d1f8ec" name="tests/bson-corpus/decimal128-3-valid-032.phpt" role="test" />
<file md5sum="819721414a29a06d9955b0cad0f2125d" name="tests/bson-corpus/decimal128-3-valid-033.phpt" role="test" />
<file md5sum="365a51312afc80e0fb501150c29e6459" name="tests/bson-corpus/decimal128-3-valid-034.phpt" role="test" />
<file md5sum="e7a0116a4a1cc5505da6a5d9b57dabac" name="tests/bson-corpus/decimal128-3-valid-035.phpt" role="test" />
<file md5sum="367042b8bc7cbc069a12e467b4ee808f" name="tests/bson-corpus/decimal128-3-valid-036.phpt" role="test" />
<file md5sum="81a942f5107c4ff31de391acb675238a" name="tests/bson-corpus/decimal128-3-valid-037.phpt" role="test" />
<file md5sum="18a0462849355e6152bfe9730c85bd62" name="tests/bson-corpus/decimal128-3-valid-038.phpt" role="test" />
<file md5sum="9ac0dbafb449b20c8d4577134b3426b6" name="tests/bson-corpus/decimal128-3-valid-039.phpt" role="test" />
<file md5sum="90ebcac3ae67c235aeb5238e1f4ec133" name="tests/bson-corpus/decimal128-3-valid-040.phpt" role="test" />
<file md5sum="6251fd1a5f9e2c7fd74dfa63bbd62d7a" name="tests/bson-corpus/decimal128-3-valid-041.phpt" role="test" />
<file md5sum="cca47ea381a8738e7b0099f460f2f8c0" name="tests/bson-corpus/decimal128-3-valid-042.phpt" role="test" />
<file md5sum="f637d2f9517908f27030e3660196515b" name="tests/bson-corpus/decimal128-3-valid-043.phpt" role="test" />
<file md5sum="bde96730654b79fd7b4078b07d282eef" name="tests/bson-corpus/decimal128-3-valid-044.phpt" role="test" />
<file md5sum="ee71d0f43cd2f467813ca6a77dcf2d30" name="tests/bson-corpus/decimal128-3-valid-045.phpt" role="test" />
<file md5sum="ff0e3aad7b641672842c802c49a958d6" name="tests/bson-corpus/decimal128-3-valid-046.phpt" role="test" />
<file md5sum="94b54b1fb76f4b0eff34f4eb0e9f4405" name="tests/bson-corpus/decimal128-3-valid-047.phpt" role="test" />
<file md5sum="52044d796ece83c2352f65673b6c1350" name="tests/bson-corpus/decimal128-3-valid-048.phpt" role="test" />
<file md5sum="9ce69dba579892069c0e85917a7c8974" name="tests/bson-corpus/decimal128-3-valid-049.phpt" role="test" />
<file md5sum="d8e88496d162ff5715f5501ef3c18b83" name="tests/bson-corpus/decimal128-3-valid-050.phpt" role="test" />
<file md5sum="bac8d2811e73164fb379d31c0f913247" name="tests/bson-corpus/decimal128-3-valid-051.phpt" role="test" />
<file md5sum="9e5fa37875831ef40eadf3c56ce823f2" name="tests/bson-corpus/decimal128-3-valid-052.phpt" role="test" />
<file md5sum="798796c56a13185581d5ae58a64dbd46" name="tests/bson-corpus/decimal128-3-valid-053.phpt" role="test" />
<file md5sum="29745eb4e3008772f3eb96a8eb9cee74" name="tests/bson-corpus/decimal128-3-valid-054.phpt" role="test" />
<file md5sum="0d53d0dca5f8ef56e07f74b9961c9de5" name="tests/bson-corpus/decimal128-3-valid-055.phpt" role="test" />
<file md5sum="369f39cc9176e7e5bf7771a5f41ee635" name="tests/bson-corpus/decimal128-3-valid-056.phpt" role="test" />
<file md5sum="a5755fd2f1fd412503a52a8e71822f58" name="tests/bson-corpus/decimal128-3-valid-057.phpt" role="test" />
<file md5sum="e70940764cd396ac7ba9ceca6869ae6c" name="tests/bson-corpus/decimal128-3-valid-058.phpt" role="test" />
<file md5sum="81f941b52e9fb46279be381b17cd3382" name="tests/bson-corpus/decimal128-3-valid-059.phpt" role="test" />
<file md5sum="34a9bfac2faf455c77e2eacff967eeac" name="tests/bson-corpus/decimal128-3-valid-060.phpt" role="test" />
<file md5sum="359126abd17081eff11ce85b77b520f7" name="tests/bson-corpus/decimal128-3-valid-061.phpt" role="test" />
<file md5sum="a648faf4b3d80ba1cfdc9babc6d1f2f4" name="tests/bson-corpus/decimal128-3-valid-062.phpt" role="test" />
<file md5sum="ca9b3b5c19c652c7458b47d5d31d993c" name="tests/bson-corpus/decimal128-3-valid-063.phpt" role="test" />
<file md5sum="476611f8de079cfd73a8ad165827b1e3" name="tests/bson-corpus/decimal128-3-valid-064.phpt" role="test" />
<file md5sum="bb4d8b42a95eeff6952cf123aa8c50f7" name="tests/bson-corpus/decimal128-3-valid-065.phpt" role="test" />
<file md5sum="1e49a8bddd4fb5d856a8946bfddedc72" name="tests/bson-corpus/decimal128-3-valid-066.phpt" role="test" />
<file md5sum="63b3f358f5e98d0b14d8533570b94fbe" name="tests/bson-corpus/decimal128-3-valid-067.phpt" role="test" />
<file md5sum="d7c9b5b0837ed0c9072a43c23f0ea79d" name="tests/bson-corpus/decimal128-3-valid-068.phpt" role="test" />
<file md5sum="91642a109e973cb5ae8a6e73b8b383e5" name="tests/bson-corpus/decimal128-3-valid-069.phpt" role="test" />
<file md5sum="2315a5f9acac305bd8c5b95b3e0504f1" name="tests/bson-corpus/decimal128-3-valid-070.phpt" role="test" />
<file md5sum="d9d4d7744a2f7fd8a1503a3052db4ced" name="tests/bson-corpus/decimal128-3-valid-071.phpt" role="test" />
<file md5sum="bf0656069ff0056699c12f9efafd1fdc" name="tests/bson-corpus/decimal128-3-valid-072.phpt" role="test" />
<file md5sum="43fdb9db13d73a584253c80048aca85e" name="tests/bson-corpus/decimal128-3-valid-073.phpt" role="test" />
<file md5sum="7f01cc48402ce9aa550f47f0085f5be6" name="tests/bson-corpus/decimal128-3-valid-074.phpt" role="test" />
<file md5sum="56e888b90bb6d68d36a3420ab40fa2a2" name="tests/bson-corpus/decimal128-3-valid-075.phpt" role="test" />
<file md5sum="c4747e234d72b0e9cf83de75e6e4596b" name="tests/bson-corpus/decimal128-3-valid-076.phpt" role="test" />
<file md5sum="253e370c0292e398e2e2df3722030754" name="tests/bson-corpus/decimal128-3-valid-077.phpt" role="test" />
<file md5sum="09da81d16a25a265188a09240668d394" name="tests/bson-corpus/decimal128-3-valid-078.phpt" role="test" />
<file md5sum="9cf3e496ab05c2adfc02c39330a7445a" name="tests/bson-corpus/decimal128-3-valid-079.phpt" role="test" />
<file md5sum="0518d4e8c881748990645a167f4f36cf" name="tests/bson-corpus/decimal128-3-valid-080.phpt" role="test" />
<file md5sum="08eb0d3cad6adbb15a79456bfb2f026b" name="tests/bson-corpus/decimal128-3-valid-081.phpt" role="test" />
<file md5sum="d6cb12e7bda8e841448e93b789b52dd6" name="tests/bson-corpus/decimal128-3-valid-082.phpt" role="test" />
<file md5sum="bac369bafd38e7fafa2ba592e5d30489" name="tests/bson-corpus/decimal128-3-valid-083.phpt" role="test" />
<file md5sum="7a48888dc58dbf811db159d2c10c843c" name="tests/bson-corpus/decimal128-3-valid-084.phpt" role="test" />
<file md5sum="8e23f78cdac0b401de8262c3af50e356" name="tests/bson-corpus/decimal128-3-valid-085.phpt" role="test" />
<file md5sum="be50c0aebd85dcbef025970c46cd913d" name="tests/bson-corpus/decimal128-3-valid-086.phpt" role="test" />
<file md5sum="6751c5d768d4d52661269ece902566e1" name="tests/bson-corpus/decimal128-3-valid-087.phpt" role="test" />
<file md5sum="9878d291035d1dc914bc82e1b5d5c584" name="tests/bson-corpus/decimal128-3-valid-088.phpt" role="test" />
<file md5sum="0542decc55be115b2398094ae114ccc8" name="tests/bson-corpus/decimal128-3-valid-089.phpt" role="test" />
<file md5sum="11e1ae724b83903286609d49928fd9d2" name="tests/bson-corpus/decimal128-3-valid-090.phpt" role="test" />
<file md5sum="69676108995b60008c71a704eede26ce" name="tests/bson-corpus/decimal128-3-valid-091.phpt" role="test" />
<file md5sum="73e811163a049f29878895044bc62a9d" name="tests/bson-corpus/decimal128-3-valid-092.phpt" role="test" />
<file md5sum="f6ac7fd403b04684561c9fad043fc8ea" name="tests/bson-corpus/decimal128-3-valid-093.phpt" role="test" />
<file md5sum="a8313d3d58d2d86ba3bc65e6ce8a4d26" name="tests/bson-corpus/decimal128-3-valid-094.phpt" role="test" />
<file md5sum="2b5f4bf73e72166bdeeeadee235524c4" name="tests/bson-corpus/decimal128-3-valid-095.phpt" role="test" />
<file md5sum="702c6144e3119b48c504f400434b9937" name="tests/bson-corpus/decimal128-3-valid-096.phpt" role="test" />
<file md5sum="f1956970c8ad095e2121cababaad4374" name="tests/bson-corpus/decimal128-3-valid-097.phpt" role="test" />
<file md5sum="c0d39b7a047fb6e79db8e8499acf7975" name="tests/bson-corpus/decimal128-3-valid-098.phpt" role="test" />
<file md5sum="cd6605d45f8b50e1b912601c225b5aaa" name="tests/bson-corpus/decimal128-3-valid-099.phpt" role="test" />
<file md5sum="3ae675145aa2e4fab775942feee47abd" name="tests/bson-corpus/decimal128-3-valid-100.phpt" role="test" />
<file md5sum="3b464a872ae34247bdc420a98e8b0ed0" name="tests/bson-corpus/decimal128-3-valid-101.phpt" role="test" />
<file md5sum="9ad48a9a8204c467f69b4513974b424d" name="tests/bson-corpus/decimal128-3-valid-102.phpt" role="test" />
<file md5sum="ad81004f6f6dde066f37f2518d4276ad" name="tests/bson-corpus/decimal128-3-valid-103.phpt" role="test" />
<file md5sum="8605780af705eed39e0a63687fbfabf8" name="tests/bson-corpus/decimal128-3-valid-104.phpt" role="test" />
<file md5sum="03bd099aae2cd95304891a2024078d95" name="tests/bson-corpus/decimal128-3-valid-105.phpt" role="test" />
<file md5sum="9b8753f1a0c682afc798e26f9bb7438c" name="tests/bson-corpus/decimal128-3-valid-106.phpt" role="test" />
<file md5sum="287d867a9177b14f4a79675fe7de3a63" name="tests/bson-corpus/decimal128-3-valid-107.phpt" role="test" />
<file md5sum="1fe640b7c1ce4fb9f1434668f924d291" name="tests/bson-corpus/decimal128-3-valid-108.phpt" role="test" />
<file md5sum="04f4c79344465d3b97bf06af70f50fc9" name="tests/bson-corpus/decimal128-3-valid-109.phpt" role="test" />
<file md5sum="135e27eb2013c1020a281dc274b5c5ad" name="tests/bson-corpus/decimal128-3-valid-110.phpt" role="test" />
<file md5sum="b5b913cd09fa92954ef0f733ea75d034" name="tests/bson-corpus/decimal128-3-valid-111.phpt" role="test" />
<file md5sum="5450dae06e8644a656e50f63aca7a59d" name="tests/bson-corpus/decimal128-3-valid-112.phpt" role="test" />
<file md5sum="95d5dd6366600afad79399eadbf02d45" name="tests/bson-corpus/decimal128-3-valid-113.phpt" role="test" />
<file md5sum="804eefe67a94dc093096379149d830ef" name="tests/bson-corpus/decimal128-3-valid-114.phpt" role="test" />
<file md5sum="b2216f79aa2963b8125dc9255c216e9c" name="tests/bson-corpus/decimal128-3-valid-115.phpt" role="test" />
<file md5sum="bcb72d39f0bbf6be1232811ada8b5452" name="tests/bson-corpus/decimal128-3-valid-116.phpt" role="test" />
<file md5sum="9294200386f7d81ade24785c6adc6a4e" name="tests/bson-corpus/decimal128-3-valid-117.phpt" role="test" />
<file md5sum="c2bba821bc6d48e512d277a8f9641e4e" name="tests/bson-corpus/decimal128-3-valid-118.phpt" role="test" />
<file md5sum="6d0de9a32d83136fc3a6db0ca8783c6f" name="tests/bson-corpus/decimal128-3-valid-119.phpt" role="test" />
<file md5sum="f870f22e48817f120623476ff39a9d5c" name="tests/bson-corpus/decimal128-3-valid-120.phpt" role="test" />
<file md5sum="2b71fd8e044c4aa5874e789b4f8cacb6" name="tests/bson-corpus/decimal128-3-valid-121.phpt" role="test" />
<file md5sum="4edc5e6640c225b79832bc2e0f48c726" name="tests/bson-corpus/decimal128-3-valid-122.phpt" role="test" />
<file md5sum="d660ff8273614aaa01123cb5319d2fce" name="tests/bson-corpus/decimal128-3-valid-123.phpt" role="test" />
<file md5sum="76ed3bc8e675cc584d1ecfd81874f0da" name="tests/bson-corpus/decimal128-3-valid-124.phpt" role="test" />
<file md5sum="0e118b247fe378b22933eabce71addd5" name="tests/bson-corpus/decimal128-3-valid-125.phpt" role="test" />
<file md5sum="ea4a57092893d65ebc6c10bd845f73da" name="tests/bson-corpus/decimal128-3-valid-126.phpt" role="test" />
<file md5sum="033bd199b375c8d8edfc99557efb664c" name="tests/bson-corpus/decimal128-3-valid-127.phpt" role="test" />
<file md5sum="b39431446dd3024d4586c5ee7a53a9c7" name="tests/bson-corpus/decimal128-3-valid-128.phpt" role="test" />
<file md5sum="a5c5465e913593e71ebbdb9ddda2b7e0" name="tests/bson-corpus/decimal128-3-valid-129.phpt" role="test" />
<file md5sum="48e9c801bc4e85c1b645c337484969dc" name="tests/bson-corpus/decimal128-3-valid-130.phpt" role="test" />
<file md5sum="ff1fd438092fec37c43f5581074a8654" name="tests/bson-corpus/decimal128-3-valid-131.phpt" role="test" />
<file md5sum="7a49e15bbe61c0f44f77032a0590dd08" name="tests/bson-corpus/decimal128-3-valid-132.phpt" role="test" />
<file md5sum="41b5c0a8d472f19f445f18b1d2415103" name="tests/bson-corpus/decimal128-3-valid-133.phpt" role="test" />
<file md5sum="ceedef671302bcfb2139ce9ec6f3d76d" name="tests/bson-corpus/decimal128-3-valid-134.phpt" role="test" />
<file md5sum="102f119348c5131e67511ca4a826958f" name="tests/bson-corpus/decimal128-3-valid-135.phpt" role="test" />
<file md5sum="93a232d6dd61f6b9c4c8385393ebdef4" name="tests/bson-corpus/decimal128-3-valid-136.phpt" role="test" />
<file md5sum="f01d124d3b1b4174de15fa782ed5a8a6" name="tests/bson-corpus/decimal128-3-valid-137.phpt" role="test" />
<file md5sum="919ce4fdde473d6479e43e9dfdb0e186" name="tests/bson-corpus/decimal128-3-valid-138.phpt" role="test" />
<file md5sum="53b86c3935f6cd6af126b8218d94e160" name="tests/bson-corpus/decimal128-3-valid-139.phpt" role="test" />
<file md5sum="7bd66fd3bffdeff24d9dd3ccd5b877a8" name="tests/bson-corpus/decimal128-3-valid-140.phpt" role="test" />
<file md5sum="b6b0407bba7493ada8641c3ea20cb0f3" name="tests/bson-corpus/decimal128-3-valid-141.phpt" role="test" />
<file md5sum="1f753ad00b8f28fbc904442a4a1c00d2" name="tests/bson-corpus/decimal128-3-valid-142.phpt" role="test" />
<file md5sum="9c56975628864c07cdb1a987d32af308" name="tests/bson-corpus/decimal128-3-valid-143.phpt" role="test" />
<file md5sum="46e1f60aea42cb626a6a8c6e2e8aed24" name="tests/bson-corpus/decimal128-3-valid-144.phpt" role="test" />
<file md5sum="dfb1c9f2a6677a54b38e15a00a076206" name="tests/bson-corpus/decimal128-3-valid-145.phpt" role="test" />
<file md5sum="483b6f3b8adbdffe6b2822d45ac27fdd" name="tests/bson-corpus/decimal128-3-valid-146.phpt" role="test" />
<file md5sum="34280ee40ad731d5fd74e2896627bcfa" name="tests/bson-corpus/decimal128-3-valid-147.phpt" role="test" />
<file md5sum="f50691f53242893865a5e7e8167efdf3" name="tests/bson-corpus/decimal128-3-valid-148.phpt" role="test" />
<file md5sum="6caa2f259d2a4d4fec8bfae1c189cd29" name="tests/bson-corpus/decimal128-3-valid-149.phpt" role="test" />
<file md5sum="a174e644ef93e3bbf40b9b1b22de211f" name="tests/bson-corpus/decimal128-3-valid-150.phpt" role="test" />
<file md5sum="d5cdcd6aa65dff7129d65080a273b28d" name="tests/bson-corpus/decimal128-3-valid-151.phpt" role="test" />
<file md5sum="aa21bd5fb048c7dcd081e666b2658a32" name="tests/bson-corpus/decimal128-3-valid-152.phpt" role="test" />
<file md5sum="06d138aad451e764f3c5160374acc456" name="tests/bson-corpus/decimal128-3-valid-153.phpt" role="test" />
<file md5sum="60ed7e43676c63a4f05eed0e23b97f21" name="tests/bson-corpus/decimal128-3-valid-154.phpt" role="test" />
<file md5sum="698c194680b1a261a9040d05bcfdb6f0" name="tests/bson-corpus/decimal128-3-valid-155.phpt" role="test" />
<file md5sum="8fca768c3428aea430c5307c80e72d7d" name="tests/bson-corpus/decimal128-3-valid-156.phpt" role="test" />
<file md5sum="7ccffdab18207ff6ac931876e8819158" name="tests/bson-corpus/decimal128-3-valid-157.phpt" role="test" />
<file md5sum="ff3d7be550b35a00d313cb74a1d2ae94" name="tests/bson-corpus/decimal128-3-valid-158.phpt" role="test" />
<file md5sum="6313e9b84336ea9a22034df9f5ae4278" name="tests/bson-corpus/decimal128-3-valid-159.phpt" role="test" />
<file md5sum="af457cac1c796d177c0ddced3cb7b507" name="tests/bson-corpus/decimal128-3-valid-160.phpt" role="test" />
<file md5sum="5a3c16d9b930348d2f25feb9174b630d" name="tests/bson-corpus/decimal128-3-valid-161.phpt" role="test" />
<file md5sum="78af9a90018a3174bd3225f806abc98c" name="tests/bson-corpus/decimal128-3-valid-162.phpt" role="test" />
<file md5sum="bab6fe8749117af054525b4b3148b59b" name="tests/bson-corpus/decimal128-3-valid-163.phpt" role="test" />
<file md5sum="e80be94522fe70a126623c4e347439fc" name="tests/bson-corpus/decimal128-3-valid-164.phpt" role="test" />
<file md5sum="b1c1243a7a52aea8e15f685d03afefad" name="tests/bson-corpus/decimal128-3-valid-165.phpt" role="test" />
<file md5sum="74d49b5f3409db69768e4daa20da44c6" name="tests/bson-corpus/decimal128-3-valid-166.phpt" role="test" />
<file md5sum="12fc4d315fddfc77dac54e611710e35c" name="tests/bson-corpus/decimal128-3-valid-167.phpt" role="test" />
<file md5sum="cf310a36aae9825fdd782acf3c16ba8a" name="tests/bson-corpus/decimal128-3-valid-168.phpt" role="test" />
<file md5sum="453122b2d589ce01ad556a47bf49c83b" name="tests/bson-corpus/decimal128-3-valid-169.phpt" role="test" />
<file md5sum="d741c0b4b531ca79835fbe40ac29f274" name="tests/bson-corpus/decimal128-3-valid-170.phpt" role="test" />
<file md5sum="bb0ce34b58847ffa1c78d4146f7418a0" name="tests/bson-corpus/decimal128-3-valid-171.phpt" role="test" />
<file md5sum="69f67ca42b68a6de8f8c3b1d40646dfc" name="tests/bson-corpus/decimal128-3-valid-172.phpt" role="test" />
<file md5sum="b65a2e3284509b9c5e462a264627bcb9" name="tests/bson-corpus/decimal128-3-valid-173.phpt" role="test" />
<file md5sum="c9bd868906728cabd730205804a164ab" name="tests/bson-corpus/decimal128-3-valid-174.phpt" role="test" />
<file md5sum="b305dcd7417ad764dd2957e1027287c5" name="tests/bson-corpus/decimal128-3-valid-175.phpt" role="test" />
<file md5sum="d5af17d98c8c607b0d5282bbf6182920" name="tests/bson-corpus/decimal128-3-valid-176.phpt" role="test" />
<file md5sum="ea5fc2dd9f80691d17cac7f090f71d90" name="tests/bson-corpus/decimal128-3-valid-177.phpt" role="test" />
<file md5sum="7952b43a5122c26a8ca2246fad1bb2ba" name="tests/bson-corpus/decimal128-3-valid-178.phpt" role="test" />
<file md5sum="d85eb123995adc0d78b1740884ef2685" name="tests/bson-corpus/decimal128-3-valid-179.phpt" role="test" />
<file md5sum="c728812d21d551ab4dff6408af92be0f" name="tests/bson-corpus/decimal128-3-valid-180.phpt" role="test" />
<file md5sum="c76e2b2c5bdfe687573356e00a97fbdf" name="tests/bson-corpus/decimal128-3-valid-181.phpt" role="test" />
<file md5sum="829e8f36fa61407de01d27bd9eeef27c" name="tests/bson-corpus/decimal128-3-valid-182.phpt" role="test" />
<file md5sum="d3ccc5c3981cd94f355639c378e4677b" name="tests/bson-corpus/decimal128-3-valid-183.phpt" role="test" />
<file md5sum="9504c6aa7b80b8db9184cb52474f6c85" name="tests/bson-corpus/decimal128-3-valid-184.phpt" role="test" />
<file md5sum="2a9eeffd0beef95d467064de10f02de9" name="tests/bson-corpus/decimal128-3-valid-185.phpt" role="test" />
<file md5sum="f47be02dfa343467cb9e6ee3417c3dd7" name="tests/bson-corpus/decimal128-3-valid-186.phpt" role="test" />
<file md5sum="96d35bfcaa77ef683578e6c270758790" name="tests/bson-corpus/decimal128-3-valid-187.phpt" role="test" />
<file md5sum="47e5c49caf081b6129518717e0963882" name="tests/bson-corpus/decimal128-3-valid-188.phpt" role="test" />
<file md5sum="7a90df09c7c531d3faf54764b9033ccb" name="tests/bson-corpus/decimal128-3-valid-189.phpt" role="test" />
<file md5sum="2564d7a605f7c459b57ed68b6e9a4fe0" name="tests/bson-corpus/decimal128-3-valid-190.phpt" role="test" />
<file md5sum="164acbaf5bd1fb0285b44c1bbf869cf3" name="tests/bson-corpus/decimal128-3-valid-191.phpt" role="test" />
<file md5sum="69f2c6581d8069f7eec36c31ee852d9c" name="tests/bson-corpus/decimal128-3-valid-192.phpt" role="test" />
<file md5sum="347b7859ac548820b806f92a9baac4d8" name="tests/bson-corpus/decimal128-3-valid-193.phpt" role="test" />
<file md5sum="6cace49760bbafb3fb3741e3e0de8b46" name="tests/bson-corpus/decimal128-3-valid-194.phpt" role="test" />
<file md5sum="7d54c24b910eae152d58eb12fa151c32" name="tests/bson-corpus/decimal128-3-valid-195.phpt" role="test" />
<file md5sum="429a273911a483c2c57755ad8e001d7b" name="tests/bson-corpus/decimal128-3-valid-196.phpt" role="test" />
<file md5sum="2832812b1b6ce8d2fd9b5daac398ee49" name="tests/bson-corpus/decimal128-3-valid-197.phpt" role="test" />
<file md5sum="27874826657d1ba50af6b3a8fffca76c" name="tests/bson-corpus/decimal128-3-valid-198.phpt" role="test" />
<file md5sum="8b71ad45296b088c0dd63330a23ecd4f" name="tests/bson-corpus/decimal128-3-valid-199.phpt" role="test" />
<file md5sum="92f0aa39dc57b06ac3c3ce0ce7ad610e" name="tests/bson-corpus/decimal128-3-valid-200.phpt" role="test" />
<file md5sum="3d152e470dfc642c980ad1a432d0349f" name="tests/bson-corpus/decimal128-3-valid-201.phpt" role="test" />
<file md5sum="c6f013f11312804349500ecc1e8a3351" name="tests/bson-corpus/decimal128-3-valid-202.phpt" role="test" />
<file md5sum="6848f1bc8eca604b55c47fc8aa38fe75" name="tests/bson-corpus/decimal128-3-valid-203.phpt" role="test" />
<file md5sum="d3caea6474c2e1bd8f654955fc64aa2e" name="tests/bson-corpus/decimal128-3-valid-204.phpt" role="test" />
<file md5sum="0f8fc5ab722aa03bc3fc7e3860c9a077" name="tests/bson-corpus/decimal128-3-valid-205.phpt" role="test" />
<file md5sum="8e24ccc73ba8342df00c84d031148604" name="tests/bson-corpus/decimal128-3-valid-206.phpt" role="test" />
<file md5sum="ec75b7a5cffe375b83dd42d27a9e2d1b" name="tests/bson-corpus/decimal128-3-valid-207.phpt" role="test" />
<file md5sum="f7c54531b297db29bb31515d63682bc1" name="tests/bson-corpus/decimal128-3-valid-208.phpt" role="test" />
<file md5sum="a7f1b30a6a5b7c66173a83d7ee961929" name="tests/bson-corpus/decimal128-3-valid-209.phpt" role="test" />
<file md5sum="c7684068a7e28cb83f38fbe8fa839817" name="tests/bson-corpus/decimal128-3-valid-210.phpt" role="test" />
<file md5sum="f4b13a599e9a1472afc2e8d3fa9024ec" name="tests/bson-corpus/decimal128-3-valid-211.phpt" role="test" />
<file md5sum="00958f5dc092ab03b9416901f2ca420f" name="tests/bson-corpus/decimal128-3-valid-212.phpt" role="test" />
<file md5sum="bf9dad19e8fa0f2d0263a882a7bd8ff0" name="tests/bson-corpus/decimal128-3-valid-213.phpt" role="test" />
<file md5sum="b56f4a4785368c98157a59d654511b09" name="tests/bson-corpus/decimal128-3-valid-214.phpt" role="test" />
<file md5sum="ce4fdc35eb9250c5b9393f6bb8f3a1df" name="tests/bson-corpus/decimal128-3-valid-215.phpt" role="test" />
<file md5sum="4f5f37a624b349a30d5d7db07bd7f8f1" name="tests/bson-corpus/decimal128-3-valid-216.phpt" role="test" />
<file md5sum="0cc395f87f42a8aa0dcc4b12815d2be9" name="tests/bson-corpus/decimal128-3-valid-217.phpt" role="test" />
<file md5sum="b930c8c6c6e6585035a1c66e22aa93eb" name="tests/bson-corpus/decimal128-3-valid-218.phpt" role="test" />
<file md5sum="25b18102a0465f8eb725d60ed7804aae" name="tests/bson-corpus/decimal128-3-valid-219.phpt" role="test" />
<file md5sum="9cdc0a1bae62b587da3359e7835df88c" name="tests/bson-corpus/decimal128-3-valid-220.phpt" role="test" />
<file md5sum="e7507fadc34ba39c785caf3afb433124" name="tests/bson-corpus/decimal128-3-valid-221.phpt" role="test" />
<file md5sum="e051c0f931bc32024c89135fd968a1eb" name="tests/bson-corpus/decimal128-3-valid-222.phpt" role="test" />
<file md5sum="1323db4f58f17eff6aa0ed1d3e00fb30" name="tests/bson-corpus/decimal128-3-valid-223.phpt" role="test" />
<file md5sum="45bdd12b7abcb5f8f89a63777da88f43" name="tests/bson-corpus/decimal128-3-valid-224.phpt" role="test" />
<file md5sum="4396d4763a727c77b2f9a0b0863346b2" name="tests/bson-corpus/decimal128-3-valid-225.phpt" role="test" />
<file md5sum="99b4a3961fbf77dc7acd6800ebca9c75" name="tests/bson-corpus/decimal128-3-valid-226.phpt" role="test" />
<file md5sum="2369608b5fed1db23661d54259da490b" name="tests/bson-corpus/decimal128-3-valid-227.phpt" role="test" />
<file md5sum="947d659f83807f67e858859a6b033ec4" name="tests/bson-corpus/decimal128-3-valid-228.phpt" role="test" />
<file md5sum="30ae76f8a7f60a2398c3f510cb207f25" name="tests/bson-corpus/decimal128-3-valid-229.phpt" role="test" />
<file md5sum="86f36843eca5e1ced0ef3444694aef0b" name="tests/bson-corpus/decimal128-3-valid-230.phpt" role="test" />
<file md5sum="664bbaa8c107f0fce2767a088a12b791" name="tests/bson-corpus/decimal128-3-valid-231.phpt" role="test" />
<file md5sum="0c4f8fb0b5c2a24119261dd8cb7c611c" name="tests/bson-corpus/decimal128-3-valid-232.phpt" role="test" />
<file md5sum="d75b88a50714e5b2dde675bbecb5af67" name="tests/bson-corpus/decimal128-3-valid-233.phpt" role="test" />
<file md5sum="50031e356b25cc6bcdc6ec6feabdbaaa" name="tests/bson-corpus/decimal128-3-valid-234.phpt" role="test" />
<file md5sum="a25eb0e75c2af7bd90aada71bddd9320" name="tests/bson-corpus/decimal128-3-valid-235.phpt" role="test" />
<file md5sum="89482d2a83c13c53346f59a5b5694125" name="tests/bson-corpus/decimal128-3-valid-236.phpt" role="test" />
<file md5sum="4c9877820e4836a88496db7de7321f33" name="tests/bson-corpus/decimal128-3-valid-237.phpt" role="test" />
<file md5sum="67cd9f43beb81a49ec625d536a271127" name="tests/bson-corpus/decimal128-3-valid-238.phpt" role="test" />
<file md5sum="75cf91ed5d8caa16a200f8471c468f0f" name="tests/bson-corpus/decimal128-3-valid-239.phpt" role="test" />
<file md5sum="1862658ccdb34e863796b317ce135d5d" name="tests/bson-corpus/decimal128-3-valid-240.phpt" role="test" />
<file md5sum="95d2c1258eda3145275a8f042232d821" name="tests/bson-corpus/decimal128-3-valid-241.phpt" role="test" />
<file md5sum="6f2a12b7c3346e02d8d9df70dbf24443" name="tests/bson-corpus/decimal128-3-valid-242.phpt" role="test" />
<file md5sum="ca041a60616750c6fd631e785aecf06a" name="tests/bson-corpus/decimal128-3-valid-243.phpt" role="test" />
<file md5sum="182f0e57202ed38f3b52deba31b7830a" name="tests/bson-corpus/decimal128-3-valid-244.phpt" role="test" />
<file md5sum="8a00aadf85b1dd97fb7d432886d045aa" name="tests/bson-corpus/decimal128-3-valid-245.phpt" role="test" />
<file md5sum="0461bbc97f686051b08e6a5bbb53b959" name="tests/bson-corpus/decimal128-3-valid-246.phpt" role="test" />
<file md5sum="a4621259bd5fa3d8799066a099f83ee8" name="tests/bson-corpus/decimal128-3-valid-247.phpt" role="test" />
<file md5sum="02d0781517ee131ac6824feda1ee3d06" name="tests/bson-corpus/decimal128-3-valid-248.phpt" role="test" />
<file md5sum="70e8b607541f4be7e319abc6e7851d23" name="tests/bson-corpus/decimal128-3-valid-249.phpt" role="test" />
<file md5sum="87ed40582612e5fb716b79cb35f4fd3f" name="tests/bson-corpus/decimal128-3-valid-250.phpt" role="test" />
<file md5sum="8df6fd7da9c803f09e2123d8e908cf6f" name="tests/bson-corpus/decimal128-3-valid-251.phpt" role="test" />
<file md5sum="303eca9da56f6fc952c57a349886a047" name="tests/bson-corpus/decimal128-3-valid-252.phpt" role="test" />
<file md5sum="d5219e0fe85d9e20ee278e8b0df66164" name="tests/bson-corpus/decimal128-3-valid-253.phpt" role="test" />
<file md5sum="c392834bd6a188890ca34ba35e3d1da5" name="tests/bson-corpus/decimal128-3-valid-254.phpt" role="test" />
<file md5sum="74bff884a0f11350fda93122266b47de" name="tests/bson-corpus/decimal128-3-valid-255.phpt" role="test" />
<file md5sum="a8aae300a1bbea5a59f982dd1d04a7d8" name="tests/bson-corpus/decimal128-3-valid-256.phpt" role="test" />
<file md5sum="64d3badff7261c4f8c396180228e9263" name="tests/bson-corpus/decimal128-3-valid-257.phpt" role="test" />
<file md5sum="72863a6b340184d2efc06bec4b0b1b52" name="tests/bson-corpus/decimal128-3-valid-258.phpt" role="test" />
<file md5sum="1a18988ad9f4645ec716c5728de5123d" name="tests/bson-corpus/decimal128-3-valid-259.phpt" role="test" />
<file md5sum="217fb1e340d68c7ebeed3b1812e75007" name="tests/bson-corpus/decimal128-3-valid-260.phpt" role="test" />
<file md5sum="479050beb76594438da5e46d5b11ab1f" name="tests/bson-corpus/decimal128-3-valid-261.phpt" role="test" />
<file md5sum="642266524c9f30a9883d406db25f962f" name="tests/bson-corpus/decimal128-3-valid-262.phpt" role="test" />
<file md5sum="90c6819ad16d227ddc9bd08874a1a38f" name="tests/bson-corpus/decimal128-3-valid-263.phpt" role="test" />
<file md5sum="9edb6624bcfcee4ab0ab3c70861e6429" name="tests/bson-corpus/decimal128-3-valid-264.phpt" role="test" />
<file md5sum="865052b3dc114e727e94fe3ea35fe374" name="tests/bson-corpus/decimal128-3-valid-265.phpt" role="test" />
<file md5sum="db8cedb3eb82fe29bbaa90e1971380ee" name="tests/bson-corpus/decimal128-3-valid-266.phpt" role="test" />
<file md5sum="9f466e601b029010cc581ff0b5f6611b" name="tests/bson-corpus/decimal128-3-valid-267.phpt" role="test" />
<file md5sum="390e8afe134fee7238646a72d5984ba1" name="tests/bson-corpus/decimal128-3-valid-268.phpt" role="test" />
<file md5sum="a26942ae9397497efc6645fec446948f" name="tests/bson-corpus/decimal128-3-valid-269.phpt" role="test" />
<file md5sum="73f302c571efdfa0718b66c8f0586e49" name="tests/bson-corpus/decimal128-3-valid-270.phpt" role="test" />
<file md5sum="788c2ae67bbb0fe0ac1f184b2fad9b8f" name="tests/bson-corpus/decimal128-3-valid-271.phpt" role="test" />
<file md5sum="a96eef0690ad802e3fd7055ce15dc8f2" name="tests/bson-corpus/decimal128-3-valid-272.phpt" role="test" />
<file md5sum="670c9c570cd11238d78939b84fd3495d" name="tests/bson-corpus/decimal128-3-valid-273.phpt" role="test" />
<file md5sum="d087bd4fb582aaf15cac1f6487e85f1b" name="tests/bson-corpus/decimal128-3-valid-274.phpt" role="test" />
<file md5sum="bffaad4ce838bd0f05572805f4a792b3" name="tests/bson-corpus/decimal128-3-valid-275.phpt" role="test" />
<file md5sum="69df3124a8fc7c86f5d6a8872a9ea53b" name="tests/bson-corpus/decimal128-3-valid-276.phpt" role="test" />
<file md5sum="8c5513d50689ca1d35d1fb2c9df7bf2f" name="tests/bson-corpus/decimal128-3-valid-277.phpt" role="test" />
<file md5sum="ee2b602524107eff28c4d72402773ba4" name="tests/bson-corpus/decimal128-3-valid-278.phpt" role="test" />
<file md5sum="03db443213b04fa200cf3b7507796f32" name="tests/bson-corpus/decimal128-3-valid-279.phpt" role="test" />
<file md5sum="ca8fbc712096a759a73bf075780e647f" name="tests/bson-corpus/decimal128-3-valid-280.phpt" role="test" />
<file md5sum="5385fc73f83bbcfa4201c7783f496b72" name="tests/bson-corpus/decimal128-3-valid-281.phpt" role="test" />
<file md5sum="a816d6cbd31c7707200bdf7eb936bff7" name="tests/bson-corpus/decimal128-3-valid-282.phpt" role="test" />
<file md5sum="2f4497bacc3ff1d2b02833b0a295eaf4" name="tests/bson-corpus/decimal128-3-valid-283.phpt" role="test" />
<file md5sum="5c661b0156dc44a6b91f314c4b0f2509" name="tests/bson-corpus/decimal128-3-valid-284.phpt" role="test" />
<file md5sum="009d8193db88b5b9d866d712847fc389" name="tests/bson-corpus/decimal128-3-valid-285.phpt" role="test" />
<file md5sum="f528d64653839930b2725b0295074e3d" name="tests/bson-corpus/decimal128-3-valid-286.phpt" role="test" />
<file md5sum="e08f16db3d1d683b3e42f8b719c9a3ec" name="tests/bson-corpus/decimal128-3-valid-287.phpt" role="test" />
<file md5sum="a2723797d475df77bfa469aafcd1c23f" name="tests/bson-corpus/decimal128-3-valid-288.phpt" role="test" />
<file md5sum="7adc561fcc55c39f9b3952c1e642bc35" name="tests/bson-corpus/decimal128-3-valid-289.phpt" role="test" />
<file md5sum="d9e1e6515ace82b87d62155d6f690302" name="tests/bson-corpus/decimal128-3-valid-290.phpt" role="test" />
<file md5sum="56dc7d83c6e060c7b4307d0b7b9a6c0a" name="tests/bson-corpus/decimal128-3-valid-291.phpt" role="test" />
<file md5sum="92c4a118b61e0f354fe29e2e3a190a30" name="tests/bson-corpus/decimal128-3-valid-292.phpt" role="test" />
<file md5sum="74385ffc43126c1f584a31d022803ecf" name="tests/bson-corpus/decimal128-3-valid-293.phpt" role="test" />
<file md5sum="6d7e8945e86aa6f43e34849ae7ca019f" name="tests/bson-corpus/decimal128-3-valid-294.phpt" role="test" />
<file md5sum="19cc48ff2f3d7c0ac7463d051d546e5d" name="tests/bson-corpus/decimal128-3-valid-295.phpt" role="test" />
<file md5sum="046aea2e28cffc3c5ebc17d570c93303" name="tests/bson-corpus/decimal128-3-valid-296.phpt" role="test" />
<file md5sum="4a82366da632db80ae0d4ce74967849e" name="tests/bson-corpus/decimal128-3-valid-297.phpt" role="test" />
<file md5sum="9be11a5077f99d2e967124da5be6966d" name="tests/bson-corpus/decimal128-3-valid-298.phpt" role="test" />
<file md5sum="b952249c6ff1df0a12cf97167dea0761" name="tests/bson-corpus/decimal128-3-valid-299.phpt" role="test" />
<file md5sum="1dbd6aa53f3fd22835c60700d768cc64" name="tests/bson-corpus/decimal128-3-valid-300.phpt" role="test" />
<file md5sum="4a4333c5beba793ea2f9da8f3eead9cb" name="tests/bson-corpus/decimal128-3-valid-301.phpt" role="test" />
<file md5sum="504ce3bd1b863d0a304e1bec32b16964" name="tests/bson-corpus/decimal128-3-valid-302.phpt" role="test" />
<file md5sum="ddaef1de98c84dc697ebd60ee31d7c01" name="tests/bson-corpus/decimal128-3-valid-303.phpt" role="test" />
<file md5sum="1b84be6a6b81415243b515b8606f38b4" name="tests/bson-corpus/decimal128-3-valid-304.phpt" role="test" />
<file md5sum="1582fb1c5fcdeb152698567ce67c598e" name="tests/bson-corpus/decimal128-3-valid-305.phpt" role="test" />
<file md5sum="18db5afa1316af3b9de7d839153df780" name="tests/bson-corpus/decimal128-3-valid-306.phpt" role="test" />
<file md5sum="e66e488feb111d4d69ce77360329c898" name="tests/bson-corpus/decimal128-3-valid-307.phpt" role="test" />
<file md5sum="ef802778b9b3518610ace5dd8ea6b74c" name="tests/bson-corpus/decimal128-3-valid-308.phpt" role="test" />
<file md5sum="9ec3b1d0453a41981144d85b0f3eb575" name="tests/bson-corpus/decimal128-4-parseError-001.phpt" role="test" />
<file md5sum="506cd6ad521dacb23091429867c77f69" name="tests/bson-corpus/decimal128-4-parseError-002.phpt" role="test" />
<file md5sum="4d1c8bb08975e5f5689fddaeec8e8402" name="tests/bson-corpus/decimal128-4-parseError-003.phpt" role="test" />
<file md5sum="09a3731b634372e305ab1d20459a38b1" name="tests/bson-corpus/decimal128-4-parseError-004.phpt" role="test" />
<file md5sum="e6c661208cdfce2b594b7e438a5cb586" name="tests/bson-corpus/decimal128-4-parseError-005.phpt" role="test" />
<file md5sum="7abd53b9fd4b63103042469f1e712c51" name="tests/bson-corpus/decimal128-4-parseError-006.phpt" role="test" />
<file md5sum="a414cd52ef74a681a04bd08d174e07a4" name="tests/bson-corpus/decimal128-4-parseError-007.phpt" role="test" />
<file md5sum="93f7b286ece0c2cdc8ca80fb6deb3c29" name="tests/bson-corpus/decimal128-4-parseError-008.phpt" role="test" />
<file md5sum="a48bd5fa08ac0fd11524f9ed4349ea00" name="tests/bson-corpus/decimal128-4-parseError-009.phpt" role="test" />
<file md5sum="59db6e06cb969d386d2c4aff43789350" name="tests/bson-corpus/decimal128-4-parseError-010.phpt" role="test" />
<file md5sum="d5e93f9615e8a86ebb8406a5f06554d3" name="tests/bson-corpus/decimal128-4-parseError-011.phpt" role="test" />
<file md5sum="da29e2793f0f33cde882297f3ad93d16" name="tests/bson-corpus/decimal128-4-parseError-012.phpt" role="test" />
<file md5sum="bfa1678cd7627d4ffb33da8a97bc3291" name="tests/bson-corpus/decimal128-4-parseError-013.phpt" role="test" />
<file md5sum="e5ceaa080a31506dc370cb4eb584e5ce" name="tests/bson-corpus/decimal128-4-parseError-014.phpt" role="test" />
<file md5sum="7cc0cb86e19dacbd5a1950a36253bb9e" name="tests/bson-corpus/decimal128-4-parseError-015.phpt" role="test" />
<file md5sum="a0b16a86367ed7b6b0e708145d800cdf" name="tests/bson-corpus/decimal128-4-parseError-016.phpt" role="test" />
<file md5sum="fea774bacaf0b19b966a1cbf4eb7e495" name="tests/bson-corpus/decimal128-4-parseError-017.phpt" role="test" />
<file md5sum="0769502b551a8f55b21986d410a89bc4" name="tests/bson-corpus/decimal128-4-parseError-018.phpt" role="test" />
<file md5sum="f700e859506a827495b206157d0e79c1" name="tests/bson-corpus/decimal128-4-parseError-019.phpt" role="test" />
<file md5sum="15c4a2681de9e3e17634e0456dbb81f9" name="tests/bson-corpus/decimal128-4-parseError-020.phpt" role="test" />
<file md5sum="3d6c296dd577cf2f6d2d6fe52a07be20" name="tests/bson-corpus/decimal128-4-valid-001.phpt" role="test" />
<file md5sum="3b1e53207bff49057a875a76ceab1058" name="tests/bson-corpus/decimal128-4-valid-002.phpt" role="test" />
<file md5sum="c32fa8265ce7a165b28270e91c6afcbb" name="tests/bson-corpus/decimal128-4-valid-003.phpt" role="test" />
<file md5sum="fdcd72b9eed2a19ebbcdda5ffc0d3df6" name="tests/bson-corpus/decimal128-4-valid-004.phpt" role="test" />
<file md5sum="1306105e2f10e6f78d184b092a953020" name="tests/bson-corpus/decimal128-4-valid-005.phpt" role="test" />
<file md5sum="3d26a89dbcb3e20d41bbc7c4ea88bf18" name="tests/bson-corpus/decimal128-4-valid-006.phpt" role="test" />
<file md5sum="564b1167b72d3ac047170a0055b63026" name="tests/bson-corpus/decimal128-4-valid-007.phpt" role="test" />
<file md5sum="035a85fee0cd8c911c489b8e2d0d934e" name="tests/bson-corpus/decimal128-4-valid-008.phpt" role="test" />
<file md5sum="4b7270c06d321789031e4ffd0339134d" name="tests/bson-corpus/decimal128-4-valid-009.phpt" role="test" />
<file md5sum="12008e294dc7a4e60bbdbc86db43ef9a" name="tests/bson-corpus/decimal128-4-valid-010.phpt" role="test" />
<file md5sum="36af20079f691c94fd97142c9db41f77" name="tests/bson-corpus/decimal128-4-valid-011.phpt" role="test" />
<file md5sum="65841643c7443e84ee055d8fccff8fbe" name="tests/bson-corpus/decimal128-4-valid-012.phpt" role="test" />
<file md5sum="59d23c843f648bd0c2f389758d9b966e" name="tests/bson-corpus/decimal128-4-valid-013.phpt" role="test" />
<file md5sum="ae3f4157e8809fc7c378dad169f4b9f3" name="tests/bson-corpus/decimal128-5-valid-001.phpt" role="test" />
<file md5sum="dff889d4075849339476305219f5a236" name="tests/bson-corpus/decimal128-5-valid-002.phpt" role="test" />
<file md5sum="ba14ba6c016fdc18ff66f6db048fb0a3" name="tests/bson-corpus/decimal128-5-valid-003.phpt" role="test" />
<file md5sum="ba9a44b79affe27f730a7ec433352389" name="tests/bson-corpus/decimal128-5-valid-004.phpt" role="test" />
<file md5sum="5d5fcaa84d28796febd9b624c4ceced2" name="tests/bson-corpus/decimal128-5-valid-005.phpt" role="test" />
<file md5sum="cac2941094fc213e304e669388f6de42" name="tests/bson-corpus/decimal128-5-valid-006.phpt" role="test" />
<file md5sum="108213814aeceb9da7dae49085066aa9" name="tests/bson-corpus/decimal128-5-valid-007.phpt" role="test" />
<file md5sum="6e3802a9c28479151b815ae05ffedf4a" name="tests/bson-corpus/decimal128-5-valid-008.phpt" role="test" />
<file md5sum="e3a4f86c70897a071c52b8b63bd1ca72" name="tests/bson-corpus/decimal128-5-valid-009.phpt" role="test" />
<file md5sum="b050d4ec02398e8937444491e2008168" name="tests/bson-corpus/decimal128-5-valid-010.phpt" role="test" />
<file md5sum="b682adf6c7c93c0eb5ffe2482cbc86bb" name="tests/bson-corpus/decimal128-5-valid-011.phpt" role="test" />
<file md5sum="f86b59299c66649e186813f5efef88bc" name="tests/bson-corpus/decimal128-5-valid-012.phpt" role="test" />
<file md5sum="6a111849778a50875bb58ff1c139b78c" name="tests/bson-corpus/decimal128-5-valid-013.phpt" role="test" />
<file md5sum="d437c772800f78e1ef3469753641bdbd" name="tests/bson-corpus/decimal128-5-valid-014.phpt" role="test" />
<file md5sum="b3e546fc577e3d4eb0e148e9b7ebf07e" name="tests/bson-corpus/decimal128-5-valid-015.phpt" role="test" />
<file md5sum="c760575aae19fc8c536c30d7242a6779" name="tests/bson-corpus/decimal128-5-valid-016.phpt" role="test" />
<file md5sum="120f3057821b1a1ef4571c5d24f7a852" name="tests/bson-corpus/decimal128-5-valid-017.phpt" role="test" />
<file md5sum="ff5f2fd4dd075787ee81fd9e5bb3d8af" name="tests/bson-corpus/decimal128-5-valid-018.phpt" role="test" />
<file md5sum="972cbb598b739fe1e21d8b7058228c9e" name="tests/bson-corpus/decimal128-5-valid-019.phpt" role="test" />
<file md5sum="3d202b50c8a32d0f8d37ca43d5d93614" name="tests/bson-corpus/decimal128-5-valid-020.phpt" role="test" />
<file md5sum="e01b14a2a78c53e3a7b5a7eb4e4e8388" name="tests/bson-corpus/decimal128-5-valid-021.phpt" role="test" />
<file md5sum="02246aa93b1389373755099887155461" name="tests/bson-corpus/decimal128-5-valid-022.phpt" role="test" />
<file md5sum="ebed7e1c9008b272c3975fec979316ea" name="tests/bson-corpus/decimal128-5-valid-023.phpt" role="test" />
<file md5sum="148ee3caa80bb3e2c6016fa26490dd9d" name="tests/bson-corpus/decimal128-5-valid-024.phpt" role="test" />
<file md5sum="96915be6c733911d940e78b75da95d20" name="tests/bson-corpus/decimal128-5-valid-025.phpt" role="test" />
<file md5sum="978fecb824df0768511946b796cde1a8" name="tests/bson-corpus/decimal128-5-valid-026.phpt" role="test" />
<file md5sum="5e4607edd84ed63a80f860dd3a14a7a9" name="tests/bson-corpus/decimal128-5-valid-027.phpt" role="test" />
<file md5sum="0a35f861c451aaeb7b6ac51791e991b0" name="tests/bson-corpus/decimal128-5-valid-028.phpt" role="test" />
<file md5sum="c98cb2f14e20a5269154e87b51fc70bf" name="tests/bson-corpus/decimal128-5-valid-029.phpt" role="test" />
<file md5sum="dbb0dcd9dcddbe13dad3ebad0313583e" name="tests/bson-corpus/decimal128-5-valid-030.phpt" role="test" />
<file md5sum="365ca1ab11f99b25cb6429d7378edb56" name="tests/bson-corpus/decimal128-5-valid-031.phpt" role="test" />
<file md5sum="93942e13b80c76e95467b9366f606feb" name="tests/bson-corpus/decimal128-5-valid-032.phpt" role="test" />
<file md5sum="87f28f7c56eda1038b64ed8399c2e5ec" name="tests/bson-corpus/decimal128-5-valid-033.phpt" role="test" />
<file md5sum="5771eda7663092952f6813ebb215215a" name="tests/bson-corpus/decimal128-5-valid-034.phpt" role="test" />
<file md5sum="f17274f6b5d5a99ad0c967e95d22fa46" name="tests/bson-corpus/decimal128-5-valid-035.phpt" role="test" />
<file md5sum="b4881b4d619085b5653467fde9421fd5" name="tests/bson-corpus/decimal128-5-valid-036.phpt" role="test" />
<file md5sum="23c639715ea3612e866cf83d70d42ad9" name="tests/bson-corpus/decimal128-5-valid-037.phpt" role="test" />
<file md5sum="cba440a1b64840a63c71396c610682f2" name="tests/bson-corpus/decimal128-5-valid-038.phpt" role="test" />
<file md5sum="7a100539ca4b0e15127e30856ef8c472" name="tests/bson-corpus/decimal128-5-valid-039.phpt" role="test" />
<file md5sum="3b531bd1f338838a6bbb73a57fc9427b" name="tests/bson-corpus/decimal128-5-valid-040.phpt" role="test" />
<file md5sum="1df369ca39dc66acd2307b51f02391c7" name="tests/bson-corpus/decimal128-5-valid-041.phpt" role="test" />
<file md5sum="29a4582d8e5df180ccb4cf39077ade55" name="tests/bson-corpus/decimal128-5-valid-042.phpt" role="test" />
<file md5sum="9b6eefcc8964252e142dece5934c4b82" name="tests/bson-corpus/decimal128-5-valid-043.phpt" role="test" />
<file md5sum="2488859070570a442172d73831d15c36" name="tests/bson-corpus/decimal128-5-valid-044.phpt" role="test" />
<file md5sum="912be2cfedca3826ab13d326008a5e5f" name="tests/bson-corpus/decimal128-5-valid-045.phpt" role="test" />
<file md5sum="20f9f115f762cf2b8892c93036f29ffc" name="tests/bson-corpus/decimal128-5-valid-046.phpt" role="test" />
<file md5sum="209f1af4e3df9af67b83202cedddbcf1" name="tests/bson-corpus/decimal128-5-valid-047.phpt" role="test" />
<file md5sum="e83ed0b68f24e69f71cc9a9edfd93baa" name="tests/bson-corpus/decimal128-5-valid-048.phpt" role="test" />
<file md5sum="d984bf34b0053f704324c9ec0b9550cc" name="tests/bson-corpus/decimal128-5-valid-049.phpt" role="test" />
<file md5sum="534a913c9076f73604d69aa89a3e2b7f" name="tests/bson-corpus/decimal128-5-valid-050.phpt" role="test" />
<file md5sum="7fcc1d601ac8bb4f319d3013b2ff8d6f" name="tests/bson-corpus/decimal128-5-valid-051.phpt" role="test" />
<file md5sum="52f3ec367d8b1fdc14ea55a024633eeb" name="tests/bson-corpus/decimal128-5-valid-052.phpt" role="test" />
<file md5sum="6389fc0885e1671a6f4124876d7833b3" name="tests/bson-corpus/decimal128-5-valid-053.phpt" role="test" />
<file md5sum="0353aefe4f579411272f1db12cb9fb3c" name="tests/bson-corpus/decimal128-5-valid-054.phpt" role="test" />
<file md5sum="3259ba4a4500d16ed417c510ae39fb12" name="tests/bson-corpus/decimal128-5-valid-055.phpt" role="test" />
<file md5sum="ec6605e6c62dcfa8b40755dc6a68d4b7" name="tests/bson-corpus/decimal128-5-valid-056.phpt" role="test" />
<file md5sum="a80b45d66bb1b199ce676562aed98c8c" name="tests/bson-corpus/decimal128-5-valid-057.phpt" role="test" />
<file md5sum="8be00241652618fafc467ee5f0921c21" name="tests/bson-corpus/decimal128-5-valid-058.phpt" role="test" />
<file md5sum="69ecad834fe150b6af057e247f57d1ad" name="tests/bson-corpus/decimal128-5-valid-059.phpt" role="test" />
<file md5sum="5ef10e88062b5e8db6d57f2b3e9189cd" name="tests/bson-corpus/decimal128-5-valid-060.phpt" role="test" />
<file md5sum="e9b480a2bbb5a0840cef3a5c3b5b91c2" name="tests/bson-corpus/decimal128-5-valid-061.phpt" role="test" />
<file md5sum="5dc42a6bf54856a4184eff9cf0c093c5" name="tests/bson-corpus/decimal128-5-valid-062.phpt" role="test" />
<file md5sum="897f1d812ca47a431cf3bc5c17418290" name="tests/bson-corpus/decimal128-5-valid-063.phpt" role="test" />
<file md5sum="f127d0bfefd67c56f891896b37a76d8b" name="tests/bson-corpus/decimal128-5-valid-064.phpt" role="test" />
<file md5sum="221826b0f6273fb3f0858e521e0fe37e" name="tests/bson-corpus/decimal128-5-valid-065.phpt" role="test" />
<file md5sum="7ee3050a25a8743e9ce67691d997d190" name="tests/bson-corpus/decimal128-5-valid-066.phpt" role="test" />
<file md5sum="661f0ef7137987581e5fa6b93cdc06bb" name="tests/bson-corpus/decimal128-5-valid-067.phpt" role="test" />
<file md5sum="97fe7865f6b77af9761a310343fba415" name="tests/bson-corpus/decimal128-6-parseError-001.phpt" role="test" />
<file md5sum="9f575f6daff291e442343d44a3cac10a" name="tests/bson-corpus/decimal128-6-parseError-002.phpt" role="test" />
<file md5sum="4a66430c6a6a2f1a937f6eaab10f1f2c" name="tests/bson-corpus/decimal128-6-parseError-003.phpt" role="test" />
<file md5sum="6530b59f6a323ed20f911f87c0d6d5b9" name="tests/bson-corpus/decimal128-6-parseError-004.phpt" role="test" />
<file md5sum="b587d7e022d5ee929306a44f5e8f3666" name="tests/bson-corpus/decimal128-6-parseError-005.phpt" role="test" />
<file md5sum="1ce11e7590e301fe8cff91baf7f27299" name="tests/bson-corpus/decimal128-6-parseError-006.phpt" role="test" />
<file md5sum="f394f82562d15f669d1de267689cc95a" name="tests/bson-corpus/decimal128-6-parseError-007.phpt" role="test" />
<file md5sum="87032ec278bb9a13827a5b62cc2f21fc" name="tests/bson-corpus/decimal128-6-parseError-008.phpt" role="test" />
<file md5sum="e7ee4b1b929964e5ba5978dba60e098f" name="tests/bson-corpus/decimal128-6-parseError-009.phpt" role="test" />
<file md5sum="283726e643d46cbefb0b3641047756d4" name="tests/bson-corpus/decimal128-6-parseError-010.phpt" role="test" />
<file md5sum="4996d80b5d21cc5decb6036e0bcdc6ed" name="tests/bson-corpus/decimal128-6-parseError-011.phpt" role="test" />
<file md5sum="a9b27777c25778d465ab062981e80a46" name="tests/bson-corpus/decimal128-6-parseError-012.phpt" role="test" />
<file md5sum="d34cc4951df6044ca57808d9a7681b8e" name="tests/bson-corpus/decimal128-6-parseError-013.phpt" role="test" />
<file md5sum="539c801052840e099b620e31abb265e7" name="tests/bson-corpus/decimal128-6-parseError-014.phpt" role="test" />
<file md5sum="3f5e3afbd9eaceaf15480ec29c33e05e" name="tests/bson-corpus/decimal128-6-parseError-015.phpt" role="test" />
<file md5sum="c9bd1f761f82e17d1cad1a5e333c4d87" name="tests/bson-corpus/decimal128-6-parseError-016.phpt" role="test" />
<file md5sum="53493d03e0efa89d433e111d9e5b2c97" name="tests/bson-corpus/decimal128-6-parseError-017.phpt" role="test" />
<file md5sum="bac21d79cf24233a381256126e570e33" name="tests/bson-corpus/decimal128-6-parseError-018.phpt" role="test" />
<file md5sum="6f5805f1fdee72e416dcf3d6a2992407" name="tests/bson-corpus/decimal128-6-parseError-019.phpt" role="test" />
<file md5sum="3e2b85ba531690fb2e861dac542caccb" name="tests/bson-corpus/decimal128-6-parseError-020.phpt" role="test" />
<file md5sum="14bd678420d73ad4979dce95c78a1dcf" name="tests/bson-corpus/decimal128-6-parseError-021.phpt" role="test" />
<file md5sum="27966c70fc7cf6d78e2db4f1396c2f73" name="tests/bson-corpus/decimal128-6-parseError-022.phpt" role="test" />
<file md5sum="1f7d2bb1681215e34d5194410641de91" name="tests/bson-corpus/decimal128-6-parseError-023.phpt" role="test" />
<file md5sum="35ff6b7fde08867fd9585786b824a0d6" name="tests/bson-corpus/decimal128-6-parseError-024.phpt" role="test" />
<file md5sum="eb7fd03b97b7d1902619f4ddb2d432b8" name="tests/bson-corpus/decimal128-6-parseError-025.phpt" role="test" />
<file md5sum="e01b09664d43504f4edc73e33bfba3ca" name="tests/bson-corpus/decimal128-6-parseError-026.phpt" role="test" />
<file md5sum="deacc3d8fdfe797641fe0fe362404765" name="tests/bson-corpus/decimal128-6-parseError-027.phpt" role="test" />
<file md5sum="a0e8ac91c41ecf1eca07fb065f02d3ec" name="tests/bson-corpus/decimal128-6-parseError-028.phpt" role="test" />
<file md5sum="0bbefc7e842bfe89be81e8bf399d7d8a" name="tests/bson-corpus/decimal128-6-parseError-029.phpt" role="test" />
<file md5sum="d979bc87fec742e764e705eed2fed45b" name="tests/bson-corpus/decimal128-6-parseError-030.phpt" role="test" />
<file md5sum="f00357ca24f8844d4d7b22b3160e5e71" name="tests/bson-corpus/decimal128-6-parseError-031.phpt" role="test" />
<file md5sum="53f9d995d67a3762195fd07bb5f653d0" name="tests/bson-corpus/decimal128-7-parseError-001.phpt" role="test" />
<file md5sum="d29ed9186006eea15d635a9bacce926e" name="tests/bson-corpus/decimal128-7-parseError-002.phpt" role="test" />
<file md5sum="64d47b499c5cd564dd71d73cb85bb414" name="tests/bson-corpus/decimal128-7-parseError-003.phpt" role="test" />
<file md5sum="948ae3e82929bf3e4d0c997788a83900" name="tests/bson-corpus/decimal128-7-parseError-004.phpt" role="test" />
<file md5sum="94be2fca3acf9155613abc3e1310223c" name="tests/bson-corpus/decimal128-7-parseError-005.phpt" role="test" />
<file md5sum="d1f9e7f057df208e36759707aec45d3a" name="tests/bson-corpus/decimal128-7-parseError-006.phpt" role="test" />
<file md5sum="af5cb840cb6e880b7f4866225fb3ab8c" name="tests/bson-corpus/decimal128-7-parseError-007.phpt" role="test" />
<file md5sum="7c4945290e91d7d4ef142cd179866a72" name="tests/bson-corpus/decimal128-7-parseError-008.phpt" role="test" />
<file md5sum="ef3f78b87943a3af440e58adf83e762e" name="tests/bson-corpus/decimal128-7-parseError-009.phpt" role="test" />
<file md5sum="92b4275158b073a14953d9cd322a2202" name="tests/bson-corpus/decimal128-7-parseError-010.phpt" role="test" />
<file md5sum="a2d18ad0fdb146b6f2a142a8ea111ad5" name="tests/bson-corpus/decimal128-7-parseError-011.phpt" role="test" />
<file md5sum="535a7f455b7a9cbaaad113d915eae2ad" name="tests/bson-corpus/decimal128-7-parseError-012.phpt" role="test" />
<file md5sum="940f13a74a37c831f7b50e51ce03cb48" name="tests/bson-corpus/decimal128-7-parseError-013.phpt" role="test" />
<file md5sum="ee518aa3cd335ea1340cd660ac46c397" name="tests/bson-corpus/decimal128-7-parseError-014.phpt" role="test" />
<file md5sum="a18bb75fa65ab4364d3310dbc64b2d42" name="tests/bson-corpus/decimal128-7-parseError-015.phpt" role="test" />
<file md5sum="cdbb9254e8b5a8c2b61f0793e075bc21" name="tests/bson-corpus/decimal128-7-parseError-016.phpt" role="test" />
<file md5sum="e7c465fab5e7af09100cc0f2270e5ad4" name="tests/bson-corpus/decimal128-7-parseError-017.phpt" role="test" />
<file md5sum="ad9bded5d2d29d2f4cd40f3a98ad58ea" name="tests/bson-corpus/decimal128-7-parseError-018.phpt" role="test" />
<file md5sum="4fbef3e62555a42ecd78b5cd66c81569" name="tests/bson-corpus/decimal128-7-parseError-019.phpt" role="test" />
<file md5sum="d81d5065897fdef90dcad7be488f3188" name="tests/bson-corpus/decimal128-7-parseError-020.phpt" role="test" />
<file md5sum="db41dd01251ce000a5ecf40f762cf769" name="tests/bson-corpus/decimal128-7-parseError-021.phpt" role="test" />
<file md5sum="7f12e7dd1c939e2527470b1de6d34942" name="tests/bson-corpus/decimal128-7-parseError-022.phpt" role="test" />
<file md5sum="cd7219ff47cbc9fb57804486a944262f" name="tests/bson-corpus/decimal128-7-parseError-023.phpt" role="test" />
<file md5sum="bf34c189b0d7194e08307f081a1ffc10" name="tests/bson-corpus/decimal128-7-parseError-024.phpt" role="test" />
<file md5sum="b658af20fef19c777f878c7832b6a679" name="tests/bson-corpus/decimal128-7-parseError-025.phpt" role="test" />
<file md5sum="73a24ca42c6ae756ba56a06777610ef2" name="tests/bson-corpus/decimal128-7-parseError-026.phpt" role="test" />
<file md5sum="9a6d5b86647f571c6d4c89f96fa1f555" name="tests/bson-corpus/decimal128-7-parseError-027.phpt" role="test" />
<file md5sum="fd4263ac5a5a1dbdea4d02d607971c45" name="tests/bson-corpus/decimal128-7-parseError-028.phpt" role="test" />
<file md5sum="2b6d0739f6c91b9bad32353e19ac4db0" name="tests/bson-corpus/decimal128-7-parseError-029.phpt" role="test" />
<file md5sum="ec2592d9cf55680d54bda911b7237265" name="tests/bson-corpus/decimal128-7-parseError-030.phpt" role="test" />
<file md5sum="2c88d698f5abce5affb51f40c56cd79c" name="tests/bson-corpus/decimal128-7-parseError-031.phpt" role="test" />
<file md5sum="a1b55267e1f040cd47f234054fbd2bbc" name="tests/bson-corpus/decimal128-7-parseError-032.phpt" role="test" />
<file md5sum="ed2107f9aa466b5de70f274e7f4e9a50" name="tests/bson-corpus/decimal128-7-parseError-033.phpt" role="test" />
<file md5sum="131713b2aba97d35bb405f7c6e19f49f" name="tests/bson-corpus/decimal128-7-parseError-034.phpt" role="test" />
<file md5sum="23dd38e8a785335a2e580cb9cbd7f373" name="tests/bson-corpus/decimal128-7-parseError-035.phpt" role="test" />
<file md5sum="61c1fbba48138e7aa95dc619f1217e03" name="tests/bson-corpus/decimal128-7-parseError-036.phpt" role="test" />
<file md5sum="73448b46f4ebcfb371c3df67ffa25d70" name="tests/bson-corpus/decimal128-7-parseError-037.phpt" role="test" />
<file md5sum="ff7d44b3efcebe092a5be22db7500f93" name="tests/bson-corpus/decimal128-7-parseError-038.phpt" role="test" />
<file md5sum="958a9bc10f56cd9dabdae6b5382648be" name="tests/bson-corpus/decimal128-7-parseError-039.phpt" role="test" />
<file md5sum="a39fca304ccd2549988060852b9c2b60" name="tests/bson-corpus/decimal128-7-parseError-040.phpt" role="test" />
<file md5sum="21bcf3ea88275becb09488ce676ba256" name="tests/bson-corpus/decimal128-7-parseError-041.phpt" role="test" />
<file md5sum="a41810a881fd56479d50d1a2f10e4e15" name="tests/bson-corpus/decimal128-7-parseError-042.phpt" role="test" />
<file md5sum="b352d47eee76795c6d9cef4c1d42639a" name="tests/bson-corpus/decimal128-7-parseError-043.phpt" role="test" />
<file md5sum="0a6925324970b9215c46d629dc2ad648" name="tests/bson-corpus/decimal128-7-parseError-044.phpt" role="test" />
<file md5sum="7fd640f689e3b25e589449ce51762438" name="tests/bson-corpus/decimal128-7-parseError-045.phpt" role="test" />
<file md5sum="8965fa643c3177ba677b0a338c43935c" name="tests/bson-corpus/decimal128-7-parseError-046.phpt" role="test" />
<file md5sum="b89d1125f54520819d4780bb407751b1" name="tests/bson-corpus/decimal128-7-parseError-047.phpt" role="test" />
<file md5sum="29a49b24ce427755d4a5643a01f1e45e" name="tests/bson-corpus/decimal128-7-parseError-048.phpt" role="test" />
<file md5sum="15026f957644968579b0033b741ad521" name="tests/bson-corpus/decimal128-7-parseError-049.phpt" role="test" />
<file md5sum="bc10ad81fb524afbad5eb9f7e9354a24" name="tests/bson-corpus/decimal128-7-parseError-050.phpt" role="test" />
<file md5sum="a690c91d87cab939fd2c4b152e3d5a1f" name="tests/bson-corpus/decimal128-7-parseError-051.phpt" role="test" />
<file md5sum="cdfb6d264322fbd94013be733e93b7e2" name="tests/bson-corpus/decimal128-7-parseError-052.phpt" role="test" />
<file md5sum="b48b0f7abcdaef499f8dd6391a9e73be" name="tests/bson-corpus/decimal128-7-parseError-053.phpt" role="test" />
<file md5sum="51a21d680cf93ca8e61054d1798f3a6d" name="tests/bson-corpus/decimal128-7-parseError-054.phpt" role="test" />
<file md5sum="2fa0d96cc6ca037e731e8d73359254a1" name="tests/bson-corpus/decimal128-7-parseError-055.phpt" role="test" />
<file md5sum="3cb79692a3fff1a2965d633f583ffda1" name="tests/bson-corpus/decimal128-7-parseError-056.phpt" role="test" />
<file md5sum="31668df0a6ae44cd4b8c201b391b3c05" name="tests/bson-corpus/decimal128-7-parseError-057.phpt" role="test" />
<file md5sum="291870eebd18c55b40dd774a1250e6f2" name="tests/bson-corpus/decimal128-7-parseError-058.phpt" role="test" />
<file md5sum="8c8282547270d94ab76310662de33c5f" name="tests/bson-corpus/decimal128-7-parseError-059.phpt" role="test" />
<file md5sum="0a785c54b0e5874555867da3046d0544" name="tests/bson-corpus/decimal128-7-parseError-060.phpt" role="test" />
<file md5sum="8b00ab977fa43482b20fc6cc88923560" name="tests/bson-corpus/decimal128-7-parseError-061.phpt" role="test" />
<file md5sum="87b858d1e87a14f0f3f074a30bc428f4" name="tests/bson-corpus/decimal128-7-parseError-062.phpt" role="test" />
<file md5sum="93c4597cbcf092b83effe458587cab42" name="tests/bson-corpus/decimal128-7-parseError-063.phpt" role="test" />
<file md5sum="5604b3c00d90f38326b2caf587b75d39" name="tests/bson-corpus/decimal128-7-parseError-064.phpt" role="test" />
<file md5sum="7698f35a879199cc6cfbe2975766609a" name="tests/bson-corpus/decimal128-7-parseError-065.phpt" role="test" />
<file md5sum="8c780164635efc163d9e92df2b5fd778" name="tests/bson-corpus/decimal128-7-parseError-066.phpt" role="test" />
<file md5sum="f82b3d0a1e0617ede4d6516f1c34f63e" name="tests/bson-corpus/decimal128-7-parseError-067.phpt" role="test" />
<file md5sum="46d018c7f78ebd672e61f5761327f79e" name="tests/bson-corpus/decimal128-7-parseError-068.phpt" role="test" />
<file md5sum="c39afed94ddc79eacb39bcfde732e058" name="tests/bson-corpus/decimal128-7-parseError-069.phpt" role="test" />
<file md5sum="779aa919b0c83011517a57eb4c677c0a" name="tests/bson-corpus/decimal128-7-parseError-070.phpt" role="test" />
<file md5sum="4d74f03527230857d81b7b22c3a37091" name="tests/bson-corpus/decimal128-7-parseError-071.phpt" role="test" />
<file md5sum="678cdc4e6e63ef31974353cd80fb96b3" name="tests/bson-corpus/decimal128-7-parseError-072.phpt" role="test" />
<file md5sum="7ff4eeca58923e636f37286aea06224f" name="tests/bson-corpus/decimal128-7-parseError-073.phpt" role="test" />
<file md5sum="06cc4c867376e6ff2a138d94d839e521" name="tests/bson-corpus/decimal128-7-parseError-074.phpt" role="test" />
<file md5sum="b207d585456f86a2ff1fbc9934fafc65" name="tests/bson-corpus/decimal128-7-parseError-075.phpt" role="test" />
<file md5sum="e6eb3226d4e334bf082a32beb6f4640e" name="tests/bson-corpus/decimal128-7-parseError-076.phpt" role="test" />
<file md5sum="fc9d86339d2ec2a1fdccd6e13efdccdd" name="tests/bson-corpus/decimal128-7-parseError-077.phpt" role="test" />
<file md5sum="9ff5ca747749d6dd2cad6512ff0ba502" name="tests/bson-corpus/decimal128-7-parseError-078.phpt" role="test" />
<file md5sum="90fb79370f2b72ed1563d30b9015736f" name="tests/bson-corpus/decimal128-7-parseError-079.phpt" role="test" />
<file md5sum="2c7374986d5f75a2c14c2ff7b195a543" name="tests/bson-corpus/decimal128-7-parseError-080.phpt" role="test" />
<file md5sum="b4f5cfd8c1af72e086fcf2f53f911580" name="tests/bson-corpus/document-decodeError-001.phpt" role="test" />
<file md5sum="42b26e9ea5c826cd63b5a4ba2e01152e" name="tests/bson-corpus/document-decodeError-002.phpt" role="test" />
<file md5sum="3c60f26d3ab45610a6e214ca12926b57" name="tests/bson-corpus/document-decodeError-003.phpt" role="test" />
<file md5sum="b71432e20fecfbbd136675551156717b" name="tests/bson-corpus/document-valid-001.phpt" role="test" />
<file md5sum="b0b4b9c4d1d6ecd30899942cb293ffec" name="tests/bson-corpus/document-valid-002.phpt" role="test" />
<file md5sum="b070130d8fcfe008b8b5fe72754e4b9e" name="tests/bson-corpus/document-valid-003.phpt" role="test" />
<file md5sum="5785f63d468dd3d263b6959d2d2f3231" name="tests/bson-corpus/double-decodeError-001.phpt" role="test" />
<file md5sum="c25d9dd31322b620a29376e89b470b3e" name="tests/bson-corpus/double-valid-001.phpt" role="test" />
<file md5sum="e27a84a58acd3483ceea6e8598f5e569" name="tests/bson-corpus/double-valid-002.phpt" role="test" />
<file md5sum="3f094dfb468145dd39698d7008ade9b2" name="tests/bson-corpus/double-valid-003.phpt" role="test" />
<file md5sum="37b83b39bfb17e9e6bbb9263c47b76b4" name="tests/bson-corpus/double-valid-004.phpt" role="test" />
- <file md5sum="50787586c957d60ec0abf810b9b87e9d" name="tests/bson-corpus/double-valid-005.phpt" role="test" />
- <file md5sum="081db14de70244e7b3323955a9682490" name="tests/bson-corpus/double-valid-006.phpt" role="test" />
+ <file md5sum="176b1e8434a4c8319d7a9efe9023ae9a" name="tests/bson-corpus/double-valid-005.phpt" role="test" />
+ <file md5sum="11650a7fffae73c77db77cc43da70156" name="tests/bson-corpus/double-valid-006.phpt" role="test" />
<file md5sum="50d446cbe8475bbe34b3e3d60b6417b3" name="tests/bson-corpus/double-valid-007.phpt" role="test" />
<file md5sum="7bc4cb4dbf62717fbc354069f6fc9243" name="tests/bson-corpus/double-valid-008.phpt" role="test" />
<file md5sum="71ab6661b8790895c443b7c647bb53af" name="tests/bson-corpus/double-valid-009.phpt" role="test" />
<file md5sum="93b5c5f2636dee82f24ffd42f2a74bee" name="tests/bson-corpus/double-valid-010.phpt" role="test" />
<file md5sum="7a93dcfb31a7f1cc8b538a2b679153d5" name="tests/bson-corpus/double-valid-011.phpt" role="test" />
<file md5sum="59f568be5cd5b412f61907013d0f64e9" name="tests/bson-corpus/double-valid-012.phpt" role="test" />
<file md5sum="0da998151b70c18c67a96d8bc28a01ce" name="tests/bson-corpus/int32-decodeError-001.phpt" role="test" />
<file md5sum="5f47f37ecf54e332d98dd7f005d8bac7" name="tests/bson-corpus/int32-valid-001.phpt" role="test" />
<file md5sum="8e71156bc821c7912aad8061dfae7fab" name="tests/bson-corpus/int32-valid-002.phpt" role="test" />
<file md5sum="b6c5b826eca41d0ee00461d1992963f3" name="tests/bson-corpus/int32-valid-003.phpt" role="test" />
<file md5sum="ce5cfdb10be3feb8adcee2f5edd557c1" name="tests/bson-corpus/int32-valid-004.phpt" role="test" />
<file md5sum="b3d82bf44e3e6be968f38078e460a9ae" name="tests/bson-corpus/int32-valid-005.phpt" role="test" />
<file md5sum="d72c859a13ce93310c4f69b31e58e57e" name="tests/bson-corpus/int64-decodeError-001.phpt" role="test" />
- <file md5sum="e5704f1ca206dfc377fce667d058aea4" name="tests/bson-corpus/int64-valid-001.phpt" role="test" />
- <file md5sum="0f26ba2c544e8ad9ee48c96af9d20701" name="tests/bson-corpus/int64-valid-002.phpt" role="test" />
+ <file md5sum="a3f1a8c46052df6030a22b4268159e5b" name="tests/bson-corpus/int64-valid-001.phpt" role="test" />
+ <file md5sum="b3cab1e750ec07ecca05dec4b0264cf2" name="tests/bson-corpus/int64-valid-002.phpt" role="test" />
<file md5sum="9007c33064a22e5bfef713a954530c7a" name="tests/bson-corpus/int64-valid-003.phpt" role="test" />
<file md5sum="84237b507c3cd8adad64bc33952689ab" name="tests/bson-corpus/int64-valid-004.phpt" role="test" />
<file md5sum="09e51f0ab0b8ee6c11177b9bb198b14e" name="tests/bson-corpus/int64-valid-005.phpt" role="test" />
<file md5sum="e20137603c2a90305bd4742f03ec48a1" name="tests/bson-corpus/maxkey-valid-001.phpt" role="test" />
<file md5sum="9aac546a028d88f2044b49833eb210d5" name="tests/bson-corpus/minkey-valid-001.phpt" role="test" />
+ <file md5sum="95c489156077919b60d71ea9641d3e9e" name="tests/bson-corpus/multi-type-deprecated-valid-001.phpt" role="test" />
<file md5sum="bd81777656d4aa81d5d3f25175e9a920" name="tests/bson-corpus/multi-type-valid-001.phpt" role="test" />
<file md5sum="b1333fa4f91e784ef53ccbf077c5dc42" name="tests/bson-corpus/null-valid-001.phpt" role="test" />
<file md5sum="042a55fe8b49061f91ac3f21c9720886" name="tests/bson-corpus/oid-decodeError-001.phpt" role="test" />
<file md5sum="50101edf5a54969a5a8cf70253b50cb1" name="tests/bson-corpus/oid-valid-001.phpt" role="test" />
<file md5sum="700450049438b9bd06520bf6a17c7788" name="tests/bson-corpus/oid-valid-002.phpt" role="test" />
<file md5sum="f3546b078c40a068b7c250da016f9113" name="tests/bson-corpus/oid-valid-003.phpt" role="test" />
<file md5sum="6d2bbf0542ddfb10dbc00c5c9fef742f" name="tests/bson-corpus/regex-decodeError-001.phpt" role="test" />
<file md5sum="0aec7965ead30a0e9bb1d6a9aac6073a" name="tests/bson-corpus/regex-decodeError-002.phpt" role="test" />
<file md5sum="e2a22519ed22c6fd51a15f07cfea6574" name="tests/bson-corpus/regex-valid-001.phpt" role="test" />
<file md5sum="ff971b74a615f126d1683ac064afb486" name="tests/bson-corpus/regex-valid-002.phpt" role="test" />
<file md5sum="e032a9da471e7b8b7b6e214fdc502464" name="tests/bson-corpus/regex-valid-003.phpt" role="test" />
<file md5sum="e32ff0d63d4e978140415c1ee00ec943" name="tests/bson-corpus/regex-valid-004.phpt" role="test" />
<file md5sum="4d3c27a8c2a4f0d00d3bea4dd8fb1845" name="tests/bson-corpus/regex-valid-005.phpt" role="test" />
<file md5sum="dff8cac5463447779eab36cca3dae8d1" name="tests/bson-corpus/regex-valid-006.phpt" role="test" />
<file md5sum="2bab1b2abf90c557456afa3979c3e127" name="tests/bson-corpus/regex-valid-007.phpt" role="test" />
<file md5sum="87257a9e6683539f0234ed1ae77bd366" name="tests/bson-corpus/regex-valid-008.phpt" role="test" />
<file md5sum="dbf6860f5b827b502e1449d4184229e4" name="tests/bson-corpus/regex-valid-009.phpt" role="test" />
<file md5sum="0240e21ac7208bd577ad206545d9079b" name="tests/bson-corpus/string-decodeError-001.phpt" role="test" />
<file md5sum="1b49383dcd31b2fcfd08605d1e11224d" name="tests/bson-corpus/string-decodeError-002.phpt" role="test" />
<file md5sum="b9851ba430d845aa0d0cfbec06986420" name="tests/bson-corpus/string-decodeError-003.phpt" role="test" />
<file md5sum="c5c3e28b1702a4a526998998b65ae2b6" name="tests/bson-corpus/string-decodeError-004.phpt" role="test" />
<file md5sum="09e45dc9b3831b90438d42967d98e81f" name="tests/bson-corpus/string-decodeError-005.phpt" role="test" />
<file md5sum="aef88e1ff7d5b21c715f00a4ca3d44ff" name="tests/bson-corpus/string-decodeError-006.phpt" role="test" />
<file md5sum="d208f92291581f7319ecc9af5f3a0fb7" name="tests/bson-corpus/string-decodeError-007.phpt" role="test" />
<file md5sum="b96f96eac724e666d73f4c7ce146e6d3" name="tests/bson-corpus/string-valid-001.phpt" role="test" />
<file md5sum="c86edeb94cd259c3771c69cfccfe88ff" name="tests/bson-corpus/string-valid-002.phpt" role="test" />
<file md5sum="b69c5ddf7866f0de81d3aaa9923452ba" name="tests/bson-corpus/string-valid-003.phpt" role="test" />
<file md5sum="a09d1c006787d42d982e0a80f80a75e1" name="tests/bson-corpus/string-valid-004.phpt" role="test" />
<file md5sum="f004f57e1908394a0f9d0018c177755c" name="tests/bson-corpus/string-valid-005.phpt" role="test" />
<file md5sum="658c6b0e16fd679f5e7ef30c920e5da3" name="tests/bson-corpus/string-valid-006.phpt" role="test" />
<file md5sum="b9263c54d0a92357a336ab5ff9544911" name="tests/bson-corpus/string-valid-007.phpt" role="test" />
+ <file md5sum="3d11be58c91b9a1f8d42c2fc4c264733" name="tests/bson-corpus/symbol-decodeError-001.phpt" role="test" />
+ <file md5sum="8b260ed51ddca45a69acc54bc10d767c" name="tests/bson-corpus/symbol-decodeError-002.phpt" role="test" />
+ <file md5sum="738ac24d3ec20630f7d29f904bae2209" name="tests/bson-corpus/symbol-decodeError-003.phpt" role="test" />
+ <file md5sum="35e3562d24a6378a65ce3d3de6b2091b" name="tests/bson-corpus/symbol-decodeError-004.phpt" role="test" />
+ <file md5sum="2211c7745a5ac3e3c4febf63b4dc827c" name="tests/bson-corpus/symbol-decodeError-005.phpt" role="test" />
+ <file md5sum="a44e30922c7764b57afff5395bbca126" name="tests/bson-corpus/symbol-decodeError-006.phpt" role="test" />
+ <file md5sum="097146230f60960a210ca20db243d8d8" name="tests/bson-corpus/symbol-decodeError-007.phpt" role="test" />
+ <file md5sum="c91d5c1d3f675a8b77155c4863ae31d8" name="tests/bson-corpus/symbol-valid-001.phpt" role="test" />
+ <file md5sum="d94d8b7c069f4707a0b5850a73c82bbd" name="tests/bson-corpus/symbol-valid-002.phpt" role="test" />
+ <file md5sum="56dc191f2e7e6e54683fd8e06264e459" name="tests/bson-corpus/symbol-valid-003.phpt" role="test" />
+ <file md5sum="77217526b5bc90ae8f1a0b7cf85128e0" name="tests/bson-corpus/symbol-valid-004.phpt" role="test" />
+ <file md5sum="9813119cb30ddfdc73e4e34c9e0c3f12" name="tests/bson-corpus/symbol-valid-005.phpt" role="test" />
+ <file md5sum="4ea56490f75638b65382afb9372f08f3" name="tests/bson-corpus/symbol-valid-006.phpt" role="test" />
<file md5sum="267928a71cfe6e336a82eef0d3f5b239" name="tests/bson-corpus/timestamp-decodeError-001.phpt" role="test" />
<file md5sum="5469f32d4d42a0d4f9ff2c5bed56a387" name="tests/bson-corpus/timestamp-valid-001.phpt" role="test" />
<file md5sum="3fca07533f08d2fe9821b980afa8a2be" name="tests/bson-corpus/timestamp-valid-002.phpt" role="test" />
<file md5sum="c93eb857dfeb49bf70a7bcc9c0f9a425" name="tests/bson-corpus/timestamp-valid-003.phpt" role="test" />
<file md5sum="17ce094686cb362b80cfe1bca51b7088" name="tests/bson-corpus/top-decodeError-001.phpt" role="test" />
<file md5sum="600cb83eacf159a50fce8ac90cdf74ad" name="tests/bson-corpus/top-decodeError-002.phpt" role="test" />
<file md5sum="05031bb2a71b66c4f7ddee2b6a28ddeb" name="tests/bson-corpus/top-decodeError-003.phpt" role="test" />
<file md5sum="9e10c5ae6ec32276e1fa8a1a4ee892fb" name="tests/bson-corpus/top-decodeError-004.phpt" role="test" />
<file md5sum="68283e51bd216fa7e1b2eb437c3e76d9" name="tests/bson-corpus/top-decodeError-005.phpt" role="test" />
<file md5sum="797ce39fad54934b9b0c0bd6b13511cd" name="tests/bson-corpus/top-decodeError-006.phpt" role="test" />
<file md5sum="ad427d8fe90a3a37af730cb6ed0ca872" name="tests/bson-corpus/top-decodeError-007.phpt" role="test" />
<file md5sum="cf6d4a73c4741f76969251dd6254ef75" name="tests/bson-corpus/top-decodeError-008.phpt" role="test" />
<file md5sum="3d9b849acd268ae10a2cc1658f7fdfed" name="tests/bson-corpus/top-decodeError-009.phpt" role="test" />
<file md5sum="249c629abd447efec58954be6d0bfd90" name="tests/bson-corpus/top-decodeError-010.phpt" role="test" />
<file md5sum="d86a4178bced6e0fc406be05e232f56b" name="tests/bson-corpus/top-decodeError-011.phpt" role="test" />
<file md5sum="6272048144f54d1c61e6dcc2a54577a0" name="tests/bson-corpus/top-decodeError-012.phpt" role="test" />
<file md5sum="e0c5a2f46b6bb5043b0b95a56c7bfc6b" name="tests/bson-corpus/top-decodeError-013.phpt" role="test" />
<file md5sum="bbe865333f4d9917795b2d097bbb2114" name="tests/bson-corpus/top-decodeError-014.phpt" role="test" />
<file md5sum="f4b30ed02746f3520b83c6c680e849cf" name="tests/bson-corpus/top-parseError-001.phpt" role="test" />
<file md5sum="91ff115679200da2479742be4c591690" name="tests/bson-corpus/top-parseError-002.phpt" role="test" />
<file md5sum="6d9d8365254b5cc40f2bfdf0b474b614" name="tests/bson-corpus/top-parseError-003.phpt" role="test" />
<file md5sum="43e86b1dceb98cf74b2e55153cdb9198" name="tests/bson-corpus/top-parseError-004.phpt" role="test" />
<file md5sum="2c97ee609289adb8f2f9a474c562ad14" name="tests/bson-corpus/top-parseError-005.phpt" role="test" />
<file md5sum="2b8b22e19e96a1fce188857b269e80db" name="tests/bson-corpus/top-parseError-006.phpt" role="test" />
<file md5sum="63b0e2d0d4aca2dedadd7c77110b7472" name="tests/bson-corpus/top-parseError-007.phpt" role="test" />
<file md5sum="e8150597fe010182fc52036df6cfda73" name="tests/bson-corpus/top-parseError-008.phpt" role="test" />
<file md5sum="a4216bb01781440e2d893563d2a3e1fe" name="tests/bson-corpus/top-parseError-009.phpt" role="test" />
<file md5sum="23c04f804d78b680143362608b120559" name="tests/bson-corpus/top-parseError-010.phpt" role="test" />
<file md5sum="7aa72cf9cac3c52466857d9f06be03e6" name="tests/bson-corpus/top-parseError-011.phpt" role="test" />
<file md5sum="cf0d10a7ccb9b3cf057bfaf1155c7fe7" name="tests/bson-corpus/top-parseError-012.phpt" role="test" />
<file md5sum="f743225a927233df16827f1b284ec947" name="tests/bson-corpus/top-parseError-013.phpt" role="test" />
<file md5sum="3a3b29d65c4a35d5f27c78043701eff9" name="tests/bson-corpus/top-parseError-014.phpt" role="test" />
<file md5sum="6476201c23012606cc5741b93cdf3a98" name="tests/bson-corpus/top-parseError-015.phpt" role="test" />
<file md5sum="683bf02ff22e94e6b445b2184973bd61" name="tests/bson-corpus/top-parseError-016.phpt" role="test" />
<file md5sum="d08cb48900875f44cc0edcae2d367f54" name="tests/bson-corpus/top-parseError-017.phpt" role="test" />
<file md5sum="325077497c93dfd8f05868675a11156a" name="tests/bson-corpus/top-parseError-018.phpt" role="test" />
<file md5sum="3728e5b380d1342efe7509e86d831d99" name="tests/bson-corpus/top-parseError-019.phpt" role="test" />
<file md5sum="c8912c33f65fca26063d9fc102573667" name="tests/bson-corpus/top-parseError-020.phpt" role="test" />
<file md5sum="cc552f5c94e9381bfe83a2c69c9055e7" name="tests/bson-corpus/top-parseError-021.phpt" role="test" />
<file md5sum="e5c8182e3897dceb743012ceb7d53078" name="tests/bson-corpus/top-parseError-022.phpt" role="test" />
<file md5sum="2ae68a06cd5e15375bac8ba27f320b84" name="tests/bson-corpus/top-parseError-023.phpt" role="test" />
<file md5sum="5fb8365ce1e7c8cc9ae704f1f3e09a49" name="tests/bson-corpus/top-parseError-024.phpt" role="test" />
<file md5sum="182e2b40f8b1258916cab85e9f3b07ed" name="tests/bson-corpus/top-parseError-025.phpt" role="test" />
<file md5sum="7d6ee218b88c852721423dd332a6d5c7" name="tests/bson-corpus/top-parseError-026.phpt" role="test" />
<file md5sum="6b3cb7b036b3bdd2f096a06467321d84" name="tests/bson-corpus/top-parseError-027.phpt" role="test" />
<file md5sum="2475d1436b8e3f8abbf8810d0c9cf20d" name="tests/bson-corpus/top-parseError-028.phpt" role="test" />
<file md5sum="aa111516293dd649fbaf4422d45a7a50" name="tests/bson-corpus/top-parseError-029.phpt" role="test" />
<file md5sum="03a9219928cfc54e7ba0740c5b6da4da" name="tests/bson-corpus/top-parseError-030.phpt" role="test" />
<file md5sum="6ece98f8da86545a67fb6b746bf4bc0f" name="tests/bson-corpus/top-parseError-031.phpt" role="test" />
<file md5sum="be3927befbcc2741e4063f6ad5b85b8a" name="tests/bson-corpus/top-parseError-032.phpt" role="test" />
<file md5sum="0f9ab539216d28503c0e5e964f8ecf5a" name="tests/bson-corpus/top-parseError-033.phpt" role="test" />
<file md5sum="39bc682fadb62d8600396ee6e7964442" name="tests/bson-corpus/top-parseError-034.phpt" role="test" />
<file md5sum="73cc7317c62a4006f0eafeaaa512eda0" name="tests/bson-corpus/top-parseError-035.phpt" role="test" />
<file md5sum="dea2b127c4421799656ac90ac5a8a19a" name="tests/bson-corpus/top-parseError-036.phpt" role="test" />
<file md5sum="cd0ee19e16182cc19bc392cd9828753d" name="tests/bson-corpus/top-parseError-037.phpt" role="test" />
<file md5sum="fa78aa1ba539ffb2af3554812ed1b89c" name="tests/bson-corpus/top-parseError-038.phpt" role="test" />
<file md5sum="de6cb2dae694a93abcd0c55dd1e7e131" name="tests/bson-corpus/top-parseError-039.phpt" role="test" />
<file md5sum="0bc5affa8185d39910c7210cec956be7" name="tests/bson-corpus/top-parseError-040.phpt" role="test" />
<file md5sum="1898e62ad427e74c8ea17da283b99194" name="tests/bson-corpus/top-parseError-041.phpt" role="test" />
<file md5sum="0065bb9e2c526be10241d62820459405" name="tests/bson-corpus/top-valid-001.phpt" role="test" />
+ <file md5sum="5ec152da957dd51d5ee3fb6a719c2600" name="tests/bson-corpus/undefined-valid-001.phpt" role="test" />
<file md5sum="aacbb83d12567034c4a43ecd26e8f165" name="tests/bson/bson-binary-001.phpt" role="test" />
<file md5sum="78c016cd924e363fb8433ce673a76b6a" name="tests/bson/bson-binary-compare-001.phpt" role="test" />
<file md5sum="6eb6c83707dd7be5c718fa13b0427d6c" name="tests/bson/bson-binary-compare-002.phpt" role="test" />
+ <file md5sum="58e5df014bd86dd8f1313b3fb734a7ce" name="tests/bson/bson-binary-get_properties-001.phpt" role="test" />
+ <file md5sum="c61e1c8e4c5327f87674314fde6b5d2f" name="tests/bson/bson-binary-get_properties-002.phpt" role="test" />
<file md5sum="a89324b72c9dbfffca5008d9563565c8" name="tests/bson/bson-binary-jsonserialize-001.phpt" role="test" />
<file md5sum="ba515183a779c4ce6740ba3fb1e46f77" name="tests/bson/bson-binary-jsonserialize-002.phpt" role="test" />
<file md5sum="94deed355325f66c567309e6074cfe7a" name="tests/bson/bson-binary-serialization-001.phpt" role="test" />
<file md5sum="f487c59192b7df686475bfe0be84c3e6" name="tests/bson/bson-binary-serialization_error-001.phpt" role="test" />
<file md5sum="2ebc7a07dd945df1922ca274d04478a8" name="tests/bson/bson-binary-serialization_error-002.phpt" role="test" />
<file md5sum="325c3d210bfa4eb571a36ffa5f75c347" name="tests/bson/bson-binary-serialization_error-003.phpt" role="test" />
<file md5sum="4a4275ac852b7457302ddcb2fdce7544" name="tests/bson/bson-binary-set_state-001.phpt" role="test" />
<file md5sum="bfee0375bac0c0e1a26a34dda9814eb0" name="tests/bson/bson-binary-set_state_error-001.phpt" role="test" />
<file md5sum="b88eb23a9a107c5300863e1dc5a780b0" name="tests/bson/bson-binary-set_state_error-002.phpt" role="test" />
<file md5sum="df1ccfb1bbd3562866bc851709320cd4" name="tests/bson/bson-binary-set_state_error-003.phpt" role="test" />
<file md5sum="2079c2a8ebed8e51c12a0fab40e42ab3" name="tests/bson/bson-binary-tostring-001.phpt" role="test" />
<file md5sum="987e2a7b238833b93ddf1b3b0d879fd8" name="tests/bson/bson-binary_error-001.phpt" role="test" />
<file md5sum="a9d13886ed7bad49396fe58fb69728dc" name="tests/bson/bson-binary_error-002.phpt" role="test" />
<file md5sum="c52d47c59fa41f83989f99d3b509bc4b" name="tests/bson/bson-binary_error-003.phpt" role="test" />
<file md5sum="47b5df8e7129de482422f2eda5f15be7" name="tests/bson/bson-binary_error-004.phpt" role="test" />
<file md5sum="fe638f713510cf803847a14a9109e6e7" name="tests/bson/bson-binaryinterface-001.phpt" role="test" />
+ <file md5sum="ccecb42fcff7a5c5ac18f3049f869367" name="tests/bson/bson-dbpointer-001.phpt" role="test" />
+ <file md5sum="a9eb173b6168b52f6f36a3c1ad230e03" name="tests/bson/bson-dbpointer-002.phpt" role="test" />
+ <file md5sum="d583aaa6c756bd85fbc18a1aea0c8375" name="tests/bson/bson-dbpointer-compare-001.phpt" role="test" />
+ <file md5sum="54a92286eef293b765b118e13c4c35c6" name="tests/bson/bson-dbpointer-get_properties-001.phpt" role="test" />
+ <file md5sum="7eb0a24e6d9fdb5ba682ed5b56d4e903" name="tests/bson/bson-dbpointer-get_properties-002.phpt" role="test" />
+ <file md5sum="080fc12205c5e3b7faac905c1492d430" name="tests/bson/bson-dbpointer-jsonserialize-001.phpt" role="test" />
+ <file md5sum="e5e353372466bdf22c21a2cf2b9d4b7e" name="tests/bson/bson-dbpointer-jsonserialize-003.phpt" role="test" />
+ <file md5sum="8f81f1d20a38681c86091496d0fbf318" name="tests/bson/bson-dbpointer-serialization-001.phpt" role="test" />
+ <file md5sum="2edafeb76539e7a390372cfabc097c69" name="tests/bson/bson-dbpointer-serialization_error-001.phpt" role="test" />
+ <file md5sum="3cc4a7fdb023213b9528558497fe03d4" name="tests/bson/bson-dbpointer-serialization_error-002.phpt" role="test" />
+ <file md5sum="9967c68c7c6be7228d00469feb0a1a44" name="tests/bson/bson-dbpointer-tostring-001.phpt" role="test" />
+ <file md5sum="b014e534e95c2a043f33d4e65fb876df" name="tests/bson/bson-dbpointer_error-002.phpt" role="test" />
<file md5sum="1b07dc40c0327eaa948a01415e80156e" name="tests/bson/bson-decimal128-001.phpt" role="test" />
<file md5sum="aa1a54f32962e51878159b0b3f25a83a" name="tests/bson/bson-decimal128-002.phpt" role="test" />
<file md5sum="b17b174c54b38e37a407f8fb35aff65e" name="tests/bson/bson-decimal128-003.phpt" role="test" />
<file md5sum="b6d96feed66dfb73e8220e48e58337a1" name="tests/bson/bson-decimal128-004.phpt" role="test" />
+ <file md5sum="49024c33887fe583b96d02a37d826015" name="tests/bson/bson-decimal128-get_properties-001.phpt" role="test" />
+ <file md5sum="2078cc54b51b316910e7724a9fd6a346" name="tests/bson/bson-decimal128-get_properties-002.phpt" role="test" />
<file md5sum="b82e77c47a326ab042f1e35ec4725e51" name="tests/bson/bson-decimal128-jsonserialize-001.phpt" role="test" />
<file md5sum="afbe9023c707b866dcc078d3b126ba0a" name="tests/bson/bson-decimal128-jsonserialize-002.phpt" role="test" />
<file md5sum="4c603508bb6b9838066286014832c97f" name="tests/bson/bson-decimal128-serialization-001.phpt" role="test" />
<file md5sum="18bcb6a21e50d2bbdfc8f05dd81ea095" name="tests/bson/bson-decimal128-serialization_error-001.phpt" role="test" />
<file md5sum="105c568368afe25b76a7f4d2cbc8ab32" name="tests/bson/bson-decimal128-serialization_error-002.phpt" role="test" />
<file md5sum="3a8bcad13be933556fd0967a643293ac" name="tests/bson/bson-decimal128-set_state-001.phpt" role="test" />
<file md5sum="db934fbaaeca32c3461ca5a0e3924b3c" name="tests/bson/bson-decimal128-set_state_error-001.phpt" role="test" />
<file md5sum="cc39b9bf030108cc2e36cec032c3e021" name="tests/bson/bson-decimal128-set_state_error-002.phpt" role="test" />
<file md5sum="e938c12d1f2fcf8db10aa35a1835d938" name="tests/bson/bson-decimal128_error-001.phpt" role="test" />
<file md5sum="c551b87ae2e2987ba8e5518c73aed1f6" name="tests/bson/bson-decimal128_error-002.phpt" role="test" />
<file md5sum="a7dbd531a5dace1341acdf77d3821da3" name="tests/bson/bson-decimal128interface-001.phpt" role="test" />
<file md5sum="d9e66d3693c33a70a8f302b285d6a017" name="tests/bson/bson-decode-001.phpt" role="test" />
<file md5sum="72c9ae0b2a63b4734b4242634b26d838" name="tests/bson/bson-decode-002.phpt" role="test" />
<file md5sum="c537992ae377109995cc650def0fbe31" name="tests/bson/bson-encode-001.phpt" role="test" />
<file md5sum="7d63ae8f6ffe1c3723de519a28c518a8" name="tests/bson/bson-encode-002.phpt" role="test" />
<file md5sum="29348a9f31df1efbf7ebbce60934f443" name="tests/bson/bson-encode-003.phpt" role="test" />
<file md5sum="b20023cbaba4b6a0406c3b0bceadd6ac" name="tests/bson/bson-encode-004.phpt" role="test" />
<file md5sum="9b2c1fb3ece5c67bf2593e78ebd4481d" name="tests/bson/bson-encode-005.phpt" role="test" />
<file md5sum="9f2c849e9e4d87c63fee4591fc4c3e3f" name="tests/bson/bson-fromJSON-001.phpt" role="test" />
<file md5sum="08a8c4233a4a4d3d5216bbc289189e79" name="tests/bson/bson-fromJSON-002.phpt" role="test" />
<file md5sum="729e16c01e1318e6a3e74c1a28139e5e" name="tests/bson/bson-fromJSON_error-001.phpt" role="test" />
<file md5sum="ef65020cde3e656d01e8dd4b70f5211e" name="tests/bson/bson-fromPHP-001.phpt" role="test" />
<file md5sum="d06d3e149b4049b069ac97446292e54a" name="tests/bson/bson-fromPHP-002.phpt" role="test" />
<file md5sum="f63c536993d1f5c321194a1509a76b49" name="tests/bson/bson-fromPHP-003.phpt" role="test" />
<file md5sum="cdee70ad41d06ebf5433f0c123d8b473" name="tests/bson/bson-fromPHP-005.phpt" role="test" />
<file md5sum="fb00e3c73770eaa72d2c4042438a698a" name="tests/bson/bson-fromPHP-006.phpt" role="test" />
<file md5sum="b0eecb69117e5a61228f9779b400b3fe" name="tests/bson/bson-fromPHP_error-001.phpt" role="test" />
<file md5sum="7ff1968400ad0bde9a7908ba3d69c321" name="tests/bson/bson-fromPHP_error-002.phpt" role="test" />
<file md5sum="9b31b04d9e209bac1c7af0cc07575908" name="tests/bson/bson-fromPHP_error-003.phpt" role="test" />
<file md5sum="ebaef67679405086cef2b14d9b0d0b2a" name="tests/bson/bson-fromPHP_error-004.phpt" role="test" />
<file md5sum="7adaec1f95f3f251ec41e4655cb739f7" name="tests/bson/bson-fromPHP_error-005.phpt" role="test" />
<file md5sum="a880c42fdc990dfd57d815cbaa5ac3e1" name="tests/bson/bson-fromPHP_error-006.phpt" role="test" />
<file md5sum="8a10b1a60bc1fc5a6e00c8e7f6120070" name="tests/bson/bson-fromPHP_error-007.phpt" role="test" />
<file md5sum="9748cb0c6ac74663bc66f5e3ead5bf34" name="tests/bson/bson-generate-document-id.phpt" role="test" />
<file md5sum="3ee11e5db8d124bc21663bb2f306b983" name="tests/bson/bson-javascript-001.phpt" role="test" />
<file md5sum="9c91bd749fd0ab27a36d14394a5e424b" name="tests/bson/bson-javascript-002.phpt" role="test" />
<file md5sum="97e60e9d937e0545fdd93efbc9f78c7f" name="tests/bson/bson-javascript-compare-001.phpt" role="test" />
<file md5sum="6fa03185f5a88f493df843621065aeb9" name="tests/bson/bson-javascript-compare-002.phpt" role="test" />
<file md5sum="3567bceb79c30dfd50838a0267a69c62" name="tests/bson/bson-javascript-getCode-001.phpt" role="test" />
<file md5sum="540de5d9a88362eb9f00555a1fb35597" name="tests/bson/bson-javascript-getScope-001.phpt" role="test" />
+ <file md5sum="e25c7ca28f41bf8785962a44c62b2159" name="tests/bson/bson-javascript-get_properties-001.phpt" role="test" />
+ <file md5sum="d422d8249db5a52a19a9fb2bf3e2b631" name="tests/bson/bson-javascript-get_properties-002.phpt" role="test" />
<file md5sum="b56136e8638788b0e72210ae82293c4a" name="tests/bson/bson-javascript-jsonserialize-001.phpt" role="test" />
<file md5sum="08155bf6625a0317d0daebba81670fcc" name="tests/bson/bson-javascript-jsonserialize-002.phpt" role="test" />
<file md5sum="50f282c4901a3e1640c94fc927f714f0" name="tests/bson/bson-javascript-jsonserialize-003.phpt" role="test" />
<file md5sum="8b577cf09bca8e3fc9614d616c19860e" name="tests/bson/bson-javascript-jsonserialize-004.phpt" role="test" />
<file md5sum="6715a176827be6b7f3ef2226d9599063" name="tests/bson/bson-javascript-serialization-001.phpt" role="test" />
<file md5sum="3b734b3672e2fb7c22d631e30a668572" name="tests/bson/bson-javascript-serialization_error-001.phpt" role="test" />
<file md5sum="aad55f29fc2f8cb29fbe24be4f8e3f45" name="tests/bson/bson-javascript-serialization_error-002.phpt" role="test" />
<file md5sum="71105272974df96fcc305b2c19b3055f" name="tests/bson/bson-javascript-serialization_error-003.phpt" role="test" />
<file md5sum="afe2231141201fde1ae265ec11f7f3ad" name="tests/bson/bson-javascript-set_state-001.phpt" role="test" />
<file md5sum="b1ca12563c821779cb762995b0eb72c1" name="tests/bson/bson-javascript-set_state_error-001.phpt" role="test" />
<file md5sum="abb0c3c2cb58a5b2ee28849a5fdebcc8" name="tests/bson/bson-javascript-set_state_error-002.phpt" role="test" />
<file md5sum="23f1dcb1ec478e1ccad03999c2f7a023" name="tests/bson/bson-javascript-set_state_error-003.phpt" role="test" />
<file md5sum="4a853962f168d677f3e9c2087766fa29" name="tests/bson/bson-javascript-tostring-001.phpt" role="test" />
<file md5sum="407da77724f87e043114d37dc3112d70" name="tests/bson/bson-javascript_error-001.phpt" role="test" />
<file md5sum="3c848a8651ccfeddeb8a6c2d60a986f3" name="tests/bson/bson-javascript_error-002.phpt" role="test" />
<file md5sum="02fba91408fc0f350a204bc8b1d3f04c" name="tests/bson/bson-javascript_error-003.phpt" role="test" />
<file md5sum="91e37d158699b239d09f45bac8b3a237" name="tests/bson/bson-javascriptinterface-001.phpt" role="test" />
<file md5sum="726fe1860dfb685818b14c5a6be825ea" name="tests/bson/bson-maxkey-001.phpt" role="test" />
<file md5sum="1640c877e8ffab77ca2a49939721f325" name="tests/bson/bson-maxkey-compare-001.phpt" role="test" />
<file md5sum="4314312c271b5c33f53a4d3887a683b0" name="tests/bson/bson-maxkey-jsonserialize-001.phpt" role="test" />
<file md5sum="a57e651d344bd32f6ac3f820a222bfa2" name="tests/bson/bson-maxkey-jsonserialize-002.phpt" role="test" />
<file md5sum="6e74674e084b8fb81f31e9863a87079d" name="tests/bson/bson-maxkey-serialization-001.phpt" role="test" />
<file md5sum="ea86c16672ce4ca92a750eb7de2837e5" name="tests/bson/bson-maxkey-set_state-001.phpt" role="test" />
<file md5sum="101130436ace2c1418413a1cdd3cc5a5" name="tests/bson/bson-maxkey_error-001.phpt" role="test" />
<file md5sum="d9a42b06a0a7d9b8103ffee8a638c7b5" name="tests/bson/bson-maxkeyinterface-001.phpt" role="test" />
<file md5sum="62d2b76e5ddf5a724d21ae546b7561b1" name="tests/bson/bson-minkey-001.phpt" role="test" />
<file md5sum="cab9d17b350f7ff0fb3930aa298f626a" name="tests/bson/bson-minkey-compare-001.phpt" role="test" />
<file md5sum="6b2c6c29ac2dbc4f91392779b99d562c" name="tests/bson/bson-minkey-jsonserialize-001.phpt" role="test" />
<file md5sum="ee6731f161e2de2523764a03c4693783" name="tests/bson/bson-minkey-jsonserialize-002.phpt" role="test" />
<file md5sum="37fe1c3d8e22afe72cdbb2b6c445eba4" name="tests/bson/bson-minkey-serialization-001.phpt" role="test" />
<file md5sum="74201982485a724a0822cc2f2093cb75" name="tests/bson/bson-minkey-set_state-001.phpt" role="test" />
<file md5sum="910f6541ade6c03374170c097907a8b2" name="tests/bson/bson-minkey_error-001.phpt" role="test" />
<file md5sum="0a45023579316c69f78806f230ba106c" name="tests/bson/bson-minkeyinterface-001.phpt" role="test" />
<file md5sum="6a98e167419823a2ae98732fbcdc16ef" name="tests/bson/bson-objectid-001.phpt" role="test" />
<file md5sum="d1eae72097de3d1e1bd0ef60f4e35cd6" name="tests/bson/bson-objectid-002.phpt" role="test" />
<file md5sum="c994597716974e44d2586b1fd6abe4a8" name="tests/bson/bson-objectid-003.phpt" role="test" />
<file md5sum="b9a3a09dd645ff06e975d192c34ad587" name="tests/bson/bson-objectid-004.phpt" role="test" />
<file md5sum="a777ad1f36f9f3ab61a31848e29fbda3" name="tests/bson/bson-objectid-compare-001.phpt" role="test" />
<file md5sum="45a5c909f831527ec527dc6dff83a41d" name="tests/bson/bson-objectid-compare-002.phpt" role="test" />
<file md5sum="5f4fc31b72fbf2946cabfbe801e1f8e5" name="tests/bson/bson-objectid-getTimestamp-001.phpt" role="test" />
+ <file md5sum="cae2d6ff62f134db503c739f79da7516" name="tests/bson/bson-objectid-get_properties-001.phpt" role="test" />
+ <file md5sum="ce20450b15ec7489bc67392e0af50231" name="tests/bson/bson-objectid-get_properties-002.phpt" role="test" />
<file md5sum="2927bb3a1e8581dada0e53f8c701f30d" name="tests/bson/bson-objectid-jsonserialize-001.phpt" role="test" />
<file md5sum="d64b11cf175f975ebe964b1dd1684d52" name="tests/bson/bson-objectid-jsonserialize-002.phpt" role="test" />
<file md5sum="5e8370c65ccd9bd3af9268acbc0b2d32" name="tests/bson/bson-objectid-serialization-001.phpt" role="test" />
<file md5sum="147723e5365c43ef3947319b75374d05" name="tests/bson/bson-objectid-serialization_error-001.phpt" role="test" />
<file md5sum="30c5ef0be71cb20a6d59055090d4c360" name="tests/bson/bson-objectid-serialization_error-002.phpt" role="test" />
<file md5sum="5d2fd4b95c5120b2f79599dab1d8611f" name="tests/bson/bson-objectid-set_state-001.phpt" role="test" />
<file md5sum="1bd644e702a2bbd2524899eee772ec0c" name="tests/bson/bson-objectid-set_state_error-001.phpt" role="test" />
<file md5sum="67d3e6886da704de351a6aa49b256c74" name="tests/bson/bson-objectid-set_state_error-002.phpt" role="test" />
<file md5sum="7f504953b66fa9d427aeb6aeff0b3341" name="tests/bson/bson-objectid_error-001.phpt" role="test" />
<file md5sum="d89e2fd1b54762f478872976049633ec" name="tests/bson/bson-objectid_error-002.phpt" role="test" />
<file md5sum="51145ee580d4b4dced0c2f23f0fbbfca" name="tests/bson/bson-objectid_error-003.phpt" role="test" />
<file md5sum="94512cbf7a26273ee3b7191601600674" name="tests/bson/bson-objectidinterface-001.phpt" role="test" />
<file md5sum="36f88ff62e81bfc544a0f70a055a301b" name="tests/bson/bson-regex-001.phpt" role="test" />
<file md5sum="c6d502bd80b12b49bb86ca15cd80cb5d" name="tests/bson/bson-regex-002.phpt" role="test" />
<file md5sum="986da81902669ff49dd126f04122b14e" name="tests/bson/bson-regex-003.phpt" role="test" />
<file md5sum="511347e513bdf3bc2ae37aa34b146c01" name="tests/bson/bson-regex-004.phpt" role="test" />
<file md5sum="c5aeef20912e9660c5b9b9916e6061ed" name="tests/bson/bson-regex-005.phpt" role="test" />
<file md5sum="6f0accb9c2f47fd13f287b115e1fdb69" name="tests/bson/bson-regex-compare-001.phpt" role="test" />
<file md5sum="78e0fcaa87dc451f34f9ef119e1bd274" name="tests/bson/bson-regex-compare-002.phpt" role="test" />
+ <file md5sum="7b006f20fc2ac164fb4bdce2846da38b" name="tests/bson/bson-regex-get_properties-001.phpt" role="test" />
+ <file md5sum="6324a488905bb05b3d2e75375c69d88a" name="tests/bson/bson-regex-get_properties-002.phpt" role="test" />
<file md5sum="6fa2a0677b01b0c63ac9e75d7189de73" name="tests/bson/bson-regex-jsonserialize-001.phpt" role="test" />
<file md5sum="b74df13ee5230ede71e1c6dab4439d2f" name="tests/bson/bson-regex-jsonserialize-002.phpt" role="test" />
<file md5sum="f2443282d9f3e636c68051904febb3e7" name="tests/bson/bson-regex-jsonserialize-003.phpt" role="test" />
<file md5sum="3e8cea9be0207736f5f1a57b7f3f985a" name="tests/bson/bson-regex-jsonserialize-004.phpt" role="test" />
<file md5sum="66647e53b95c6eafb52d98e2d9196c7f" name="tests/bson/bson-regex-serialization-001.phpt" role="test" />
<file md5sum="f5eca804b8710b015d2f90c8d8c71368" name="tests/bson/bson-regex-serialization-002.phpt" role="test" />
<file md5sum="e42ec5b985814f1dd3c3ce3acbcbc00f" name="tests/bson/bson-regex-serialization-003.phpt" role="test" />
<file md5sum="07c7662c6b8dcc5723693077eae6c9fd" name="tests/bson/bson-regex-serialization_error-001.phpt" role="test" />
<file md5sum="733b4696d0d9e1b0da751510a761acc5" name="tests/bson/bson-regex-serialization_error-002.phpt" role="test" />
<file md5sum="1761af9218ff1583f51133d4f0cdcb19" name="tests/bson/bson-regex-set_state-001.phpt" role="test" />
<file md5sum="ab56bd68ecf9ea5717facbaa141db1b5" name="tests/bson/bson-regex-set_state-002.phpt" role="test" />
<file md5sum="22816419e87ffa591bfb03a921ea750c" name="tests/bson/bson-regex-set_state_error-001.phpt" role="test" />
<file md5sum="377e4b051a65dd08a16966cbd92f413c" name="tests/bson/bson-regex-set_state_error-002.phpt" role="test" />
<file md5sum="9324eaf63f62194dd69656d40bcb6d4d" name="tests/bson/bson-regex_error-001.phpt" role="test" />
<file md5sum="3422fde80d9b7e49dc5d430a10fa9862" name="tests/bson/bson-regex_error-002.phpt" role="test" />
<file md5sum="cdfaa1039a4cd0da222c476c1b6760ac" name="tests/bson/bson-regex_error-003.phpt" role="test" />
<file md5sum="b3c02b3eb309f7bbf5a33523a120b43f" name="tests/bson/bson-regexinterface-001.phpt" role="test" />
+ <file md5sum="049265745d1c20d6a527d5ddb093e987" name="tests/bson/bson-symbol-001.phpt" role="test" />
+ <file md5sum="a55dbbc77a3a9221810c032f38d25005" name="tests/bson/bson-symbol-compare-001.phpt" role="test" />
+ <file md5sum="e0b90e25e3507d77a97b7463ab76c0d4" name="tests/bson/bson-symbol-get_properties-001.phpt" role="test" />
+ <file md5sum="0648e9bf022072d2eb9343fb99f30a0a" name="tests/bson/bson-symbol-get_properties-002.phpt" role="test" />
+ <file md5sum="218eba280d32b6bbe135d24fb530ecb1" name="tests/bson/bson-symbol-jsonserialize-001.phpt" role="test" />
+ <file md5sum="380d842bbdea45f6a8582ec563d2b64e" name="tests/bson/bson-symbol-jsonserialize-002.phpt" role="test" />
+ <file md5sum="55905512f51a5e7e449576456797c704" name="tests/bson/bson-symbol-serialization-001.phpt" role="test" />
+ <file md5sum="e0b13e8d18c0df3daa074afa7586758f" name="tests/bson/bson-symbol-serialization_error-001.phpt" role="test" />
+ <file md5sum="d409f086553d6059c65972228ba02b9e" name="tests/bson/bson-symbol-serialization_error-002.phpt" role="test" />
+ <file md5sum="5753cdf389e2e3cb769eaa272f4a285e" name="tests/bson/bson-symbol-tostring-001.phpt" role="test" />
+ <file md5sum="660786fbe39ddfa70e3d471a551fc52b" name="tests/bson/bson-symbol_error-001.phpt" role="test" />
<file md5sum="fd6921fa653d75593cebefdee44f3a37" name="tests/bson/bson-timestamp-001.phpt" role="test" />
<file md5sum="94cee14f3d06ab8085f06c241a976589" name="tests/bson/bson-timestamp-002.phpt" role="test" />
<file md5sum="a0763909d65abab501df3471d9cf0009" name="tests/bson/bson-timestamp-003.phpt" role="test" />
<file md5sum="3932a3692389d969ee55de22034324b1" name="tests/bson/bson-timestamp-004.phpt" role="test" />
<file md5sum="db926cded84f832c39e527d5fcdadcbb" name="tests/bson/bson-timestamp-005.phpt" role="test" />
<file md5sum="bf27f37b85d838f7f3601f1733674ebe" name="tests/bson/bson-timestamp-compare-001.phpt" role="test" />
<file md5sum="8d390960f78a0d4c874f1a9bea6f60e1" name="tests/bson/bson-timestamp-getIncrement-001.phpt" role="test" />
<file md5sum="dbc98d4c863f2edbd470a744569b8ebc" name="tests/bson/bson-timestamp-getTimestamp-001.phpt" role="test" />
+ <file md5sum="70cbe569659d4cd104ded6f0b3954697" name="tests/bson/bson-timestamp-get_properties-001.phpt" role="test" />
+ <file md5sum="1cb51102b60c196d34c7c1588b91ae78" name="tests/bson/bson-timestamp-get_properties-002.phpt" role="test" />
<file md5sum="4ff5e264bb118d99302fb918e2b54ea4" name="tests/bson/bson-timestamp-jsonserialize-001.phpt" role="test" />
<file md5sum="b3c0547739e7996b33b49eddf15ca4bf" name="tests/bson/bson-timestamp-jsonserialize-002.phpt" role="test" />
<file md5sum="5bfc985572b0eae61c2e3e7afe46588b" name="tests/bson/bson-timestamp-serialization-001.phpt" role="test" />
<file md5sum="89ee88f82bd68012d2ab0d7cec057add" name="tests/bson/bson-timestamp-serialization-002.phpt" role="test" />
<file md5sum="29db1408dde1310afa8066e7233c3db6" name="tests/bson/bson-timestamp-serialization_error-001.phpt" role="test" />
<file md5sum="546931ee801ed3c60e649573cddf813f" name="tests/bson/bson-timestamp-serialization_error-002.phpt" role="test" />
<file md5sum="3623cb6f0ef62c302ced4b3acf1f1b3f" name="tests/bson/bson-timestamp-serialization_error-003.phpt" role="test" />
<file md5sum="f7d7dd2398c27c27def6cb8dab685315" name="tests/bson/bson-timestamp-serialization_error-004.phpt" role="test" />
<file md5sum="7a13dad169f79e7cda4ab3c7a8e2dc1f" name="tests/bson/bson-timestamp-set_state-001.phpt" role="test" />
<file md5sum="2137458362aca8c1a3d8f6224c438d9e" name="tests/bson/bson-timestamp-set_state-002.phpt" role="test" />
<file md5sum="effc86aba9ebac7be6f37ace2f24fc66" name="tests/bson/bson-timestamp-set_state_error-001.phpt" role="test" />
<file md5sum="4bd04c958a1ed842374aec323736bb00" name="tests/bson/bson-timestamp-set_state_error-002.phpt" role="test" />
<file md5sum="9a74bed31b0456757323dd297e0cf7d6" name="tests/bson/bson-timestamp-set_state_error-003.phpt" role="test" />
<file md5sum="e8957247a4043fc91f223da4ed9ec84e" name="tests/bson/bson-timestamp-set_state_error-004.phpt" role="test" />
<file md5sum="13de8d03a55cd2c15e7472fc71596cce" name="tests/bson/bson-timestamp_error-001.phpt" role="test" />
<file md5sum="82668bc9b4f77d46a5e32b9117fb355e" name="tests/bson/bson-timestamp_error-002.phpt" role="test" />
- <file md5sum="9a0deb7c8c173de898ebee345d7b67f1" name="tests/bson/bson-timestamp_error-003.phpt" role="test" />
+ <file md5sum="22a4db204fc36bb46ac9ec53c494bc8b" name="tests/bson/bson-timestamp_error-003.phpt" role="test" />
<file md5sum="490199683c8657f10b662d5987abf546" name="tests/bson/bson-timestamp_error-004.phpt" role="test" />
<file md5sum="e10ea28f50dcdf86978919777ed5c74e" name="tests/bson/bson-timestamp_error-005.phpt" role="test" />
- <file md5sum="7bec85a32de6b6437d0d5aa0deed8e9e" name="tests/bson/bson-timestamp_error-006.phpt" role="test" />
+ <file md5sum="78b0504a50aeec5ea1ce14a866d11fb9" name="tests/bson/bson-timestamp_error-006.phpt" role="test" />
<file md5sum="304b9fe634cafb1fb611d793197f60ec" name="tests/bson/bson-timestampinterface-001.phpt" role="test" />
<file md5sum="2473a1824e9801db25f815431426ca16" name="tests/bson/bson-toCanonicalJSON-001.phpt" role="test" />
<file md5sum="e39ea45b387e39515b840ec5b8a3135d" name="tests/bson/bson-toCanonicalJSON-002.phpt" role="test" />
<file md5sum="5c6650a3f73652dc05f756362b2447c2" name="tests/bson/bson-toCanonicalJSON_error-001.phpt" role="test" />
<file md5sum="e43ad22bbd940468212c5f138deedc2b" name="tests/bson/bson-toCanonicalJSON_error-002.phpt" role="test" />
<file md5sum="44685227625e78d0cc329a9c7f6e9c50" name="tests/bson/bson-toCanonicalJSON_error-003.phpt" role="test" />
<file md5sum="b1bdf349159595fcbcb3b650e98aa01c" name="tests/bson/bson-toJSON-001.phpt" role="test" />
<file md5sum="85790d6c33597dbc5185a551a6683141" name="tests/bson/bson-toJSON-002.phpt" role="test" />
<file md5sum="0a2207ba503d14dd271697e38ff60cbe" name="tests/bson/bson-toJSON_error-001.phpt" role="test" />
<file md5sum="9ddc7084c84ee21a184dc666a023049d" name="tests/bson/bson-toJSON_error-002.phpt" role="test" />
<file md5sum="177c53c68054489653d433bf0143a15e" name="tests/bson/bson-toJSON_error-003.phpt" role="test" />
<file md5sum="852eed0a106c82c454a8b567e01018bc" name="tests/bson/bson-toPHP-001.phpt" role="test" />
<file md5sum="da19822beca89a36c70c103bc88a8aaa" name="tests/bson/bson-toPHP-002.phpt" role="test" />
- <file md5sum="5ca8927796e91b33fd96e3799ddd5269" name="tests/bson/bson-toPHP-003.phpt" role="test" />
- <file md5sum="ad722a4e7733ac6badd04a94f6230b3c" name="tests/bson/bson-toPHP-004.phpt" role="test" />
+ <file md5sum="196ffa4a0c9763794b77551fc1114f79" name="tests/bson/bson-toPHP-003.phpt" role="test" />
+ <file md5sum="ee0bf59df01713d93b9efcc51b3a5edd" name="tests/bson/bson-toPHP-004.phpt" role="test" />
<file md5sum="4ccc17d46eda70ad34118644e960f18a" name="tests/bson/bson-toPHP-006.phpt" role="test" />
<file md5sum="365a4c9626dec315f8eddf0140c5b2e2" name="tests/bson/bson-toPHP_error-001.phpt" role="test" />
<file md5sum="7d296259645fb541f47952e8d528eba7" name="tests/bson/bson-toPHP_error-002.phpt" role="test" />
<file md5sum="e4204cc33475f1a76d9670b9de1f2d50" name="tests/bson/bson-toPHP_error-003.phpt" role="test" />
<file md5sum="80d461b9ff6c1b4f0a11373b46c6d4a1" name="tests/bson/bson-toPHP_error-004.phpt" role="test" />
- <file md5sum="79bb82576a6544d02ea3ad185f78831a" name="tests/bson/bson-toPHP_error-005.phpt" role="test" />
- <file md5sum="616b30f57cbb7ff0cd5d6f98c04a737d" name="tests/bson/bson-toPHP_error-006.phpt" role="test" />
<file md5sum="a9e58db0748ac3208b802941b30a389e" name="tests/bson/bson-toRelaxedJSON-001.phpt" role="test" />
<file md5sum="403af2cffe3eec241affea1737dd93f0" name="tests/bson/bson-toRelaxedJSON-002.phpt" role="test" />
<file md5sum="6a318442df977699b54c48460d713f4e" name="tests/bson/bson-toRelaxedJSON_error-001.phpt" role="test" />
<file md5sum="0d5c5385c5e6e52c3551503ad33cf19c" name="tests/bson/bson-toRelaxedJSON_error-002.phpt" role="test" />
<file md5sum="7bdb79435458303d103ea1f55c6fe7ad" name="tests/bson/bson-toRelaxedJSON_error-003.phpt" role="test" />
+ <file md5sum="f598ba4224ca96027f18c20bcf037b19" name="tests/bson/bson-undefined-001.phpt" role="test" />
+ <file md5sum="dc74e078f67345758dbb837e9d1f0f6e" name="tests/bson/bson-undefined-compare-001.phpt" role="test" />
+ <file md5sum="286eaff5b548beb827ac4e7641bf0cf8" name="tests/bson/bson-undefined-jsonserialize-001.phpt" role="test" />
+ <file md5sum="afa7092fad05aa85063b74c4761670c1" name="tests/bson/bson-undefined-jsonserialize-002.phpt" role="test" />
+ <file md5sum="e48cf56855e1cb5778de9be4afb9db70" name="tests/bson/bson-undefined-serialization-001.phpt" role="test" />
+ <file md5sum="efc4ba473c747b5b0dc5b5ac7fbeb8c4" name="tests/bson/bson-undefined-tostring-001.phpt" role="test" />
+ <file md5sum="16fc8af65414cc3910a0d05478fbfeca" name="tests/bson/bson-undefined_error-001.phpt" role="test" />
<file md5sum="bb1d4baf6117f3219afe0f2dfafb6437" name="tests/bson/bson-unknown-001.phpt" role="test" />
<file md5sum="69af476f25550c6145379b2c8d04e5a4" name="tests/bson/bson-utcdatetime-001.phpt" role="test" />
<file md5sum="de22f1e3d63048ab3db3b3514ce45bf2" name="tests/bson/bson-utcdatetime-002.phpt" role="test" />
<file md5sum="cb6b0009b8dfc770c489cd76f168f875" name="tests/bson/bson-utcdatetime-003.phpt" role="test" />
<file md5sum="de4188f960c7fd6debed280d2ce4da02" name="tests/bson/bson-utcdatetime-004.phpt" role="test" />
<file md5sum="ebcbd881098aeef861090012f341282d" name="tests/bson/bson-utcdatetime-005.phpt" role="test" />
<file md5sum="982f7f8d65d0f7f222f9b2512786dc4c" name="tests/bson/bson-utcdatetime-006.phpt" role="test" />
<file md5sum="322715ac6f412974a9dacb1dc7a1ebc9" name="tests/bson/bson-utcdatetime-007.phpt" role="test" />
<file md5sum="6728e628d6cd1def65b21b5d58c477b4" name="tests/bson/bson-utcdatetime-compare-001.phpt" role="test" />
+ <file md5sum="8272da2bda9a4289184e655f6822550c" name="tests/bson/bson-utcdatetime-get_properties-001.phpt" role="test" />
+ <file md5sum="334df91a905a2ca36f77b385271e77ab" name="tests/bson/bson-utcdatetime-get_properties-002.phpt" role="test" />
<file md5sum="85034e1045eb4632d9625d8c40e3b7ff" name="tests/bson/bson-utcdatetime-int-size-001.phpt" role="test" />
<file md5sum="8fbbdc845aa7cab18a7f98661c4d828d" name="tests/bson/bson-utcdatetime-int-size-002.phpt" role="test" />
<file md5sum="a93b633b97ec16f0b7707205f4709d12" name="tests/bson/bson-utcdatetime-jsonserialize-001.phpt" role="test" />
<file md5sum="1ac1ca2d4db3d5f77ccc59a3a48076c9" name="tests/bson/bson-utcdatetime-jsonserialize-002.phpt" role="test" />
<file md5sum="2bc0c3ddf3167bc788ced0582a2cd157" name="tests/bson/bson-utcdatetime-serialization-001.phpt" role="test" />
<file md5sum="3a575abb63e282591e1f150ca5ebb61a" name="tests/bson/bson-utcdatetime-serialization-002.phpt" role="test" />
<file md5sum="ac49747cfa5295c81d6e0271f827d59c" name="tests/bson/bson-utcdatetime-serialization_error-001.phpt" role="test" />
<file md5sum="a90aec321df162e83202ebabbc4248f1" name="tests/bson/bson-utcdatetime-serialization_error-002.phpt" role="test" />
<file md5sum="5f4ac622d87e508604308b21ae43f835" name="tests/bson/bson-utcdatetime-set_state-001.phpt" role="test" />
<file md5sum="cdbb4d6a6cd72816568163b8f54a0b9f" name="tests/bson/bson-utcdatetime-set_state-002.phpt" role="test" />
<file md5sum="8292f7be7a23f1afe04101f2e57f95b6" name="tests/bson/bson-utcdatetime-set_state_error-001.phpt" role="test" />
<file md5sum="d18b948a39fdb4a68f97209c48b2a846" name="tests/bson/bson-utcdatetime-set_state_error-002.phpt" role="test" />
<file md5sum="1b9dd88668a06f402a8567d13d47dea7" name="tests/bson/bson-utcdatetime-todatetime-001.phpt" role="test" />
<file md5sum="8a6c7f3a7120a6135e4d8153da1fe675" name="tests/bson/bson-utcdatetime-todatetime-002.phpt" role="test" />
<file md5sum="4cf0b97930f5fa6be49284138872ee26" name="tests/bson/bson-utcdatetime-tostring-001.phpt" role="test" />
<file md5sum="01ff429024e70923880b7000d904f95f" name="tests/bson/bson-utcdatetime_error-001.phpt" role="test" />
<file md5sum="77b84eee9de2aa927194d1e7699de1a4" name="tests/bson/bson-utcdatetime_error-002.phpt" role="test" />
<file md5sum="b9c3f3108a0ecf7fe4f58f0c6cbc0d68" name="tests/bson/bson-utcdatetime_error-003.phpt" role="test" />
<file md5sum="eca229a57bee494975dc2c2db79b8e2d" name="tests/bson/bson-utcdatetime_error-004.phpt" role="test" />
<file md5sum="a67824badfe3e9a6cfb5aea0577edaad" name="tests/bson/bson-utcdatetimeinterface-001.phpt" role="test" />
<file md5sum="74978df74e164c08a7e98b2b00a850a6" name="tests/bson/bug0274.phpt" role="test" />
<file md5sum="7a78fa8d17a5f963f3eedd81a3d8e1e4" name="tests/bson/bug0313.phpt" role="test" />
<file md5sum="9a2d16556ac49c2fa06a8dc043e45c47" name="tests/bson/bug0325.phpt" role="test" />
<file md5sum="5f838703ed88cbe9bfb826f6d606db67" name="tests/bson/bug0334-001.phpt" role="test" />
<file md5sum="decfd02f9f77ba7f2b7698bc2e9b1993" name="tests/bson/bug0334-002.phpt" role="test" />
- <file md5sum="6e38b80bd375658cd5c9f2716c32f4c1" name="tests/bson/bug0341.phpt" role="test" />
+ <file md5sum="a65cb3e46f03654beb1cf483341abcea" name="tests/bson/bug0341.phpt" role="test" />
<file md5sum="4066bd0654a8969c42d6313aa8dca106" name="tests/bson/bug0347.phpt" role="test" />
<file md5sum="2888441736b763719632b0c3ab4d4288" name="tests/bson/bug0528.phpt" role="test" />
<file md5sum="526610daabc79357652e26e9d6ebd913" name="tests/bson/bug0531.phpt" role="test" />
<file md5sum="98df395abbaf09fe313a1ccc6d5ed0a2" name="tests/bson/bug0544.phpt" role="test" />
<file md5sum="7f0758a33e5dbd19f4f2ae41da989a2b" name="tests/bson/bug0592.phpt" role="test" />
<file md5sum="a07657032dc64d3586c3a87c62edcc9c" name="tests/bson/bug0623.phpt" role="test" />
<file md5sum="97ac756930ab930572a010dd30d574da" name="tests/bson/bug0631.phpt" role="test" />
<file md5sum="3c0b0a89a373da7ef34ff8bd72a81b59" name="tests/bson/bug0672.phpt" role="test" />
<file md5sum="9264b5948771b436ed31cf559ade001a" name="tests/bson/bug0894-001.phpt" role="test" />
<file md5sum="5008b223eb1545f5ae7d3d83d929f853" name="tests/bson/bug0923-001.phpt" role="test" />
<file md5sum="ec260ee6e229062d86c0bd19b0b142f4" name="tests/bson/bug0923-002.phpt" role="test" />
<file md5sum="ebc9dd229bc5c7091808835c1dfa2b7c" name="tests/bson/bug0939-001.phpt" role="test" />
<file md5sum="6b3b90dfccbbd0107ef9233270f80ad7" name="tests/bson/bug0974-001.phpt" role="test" />
<file md5sum="99a8ae579b019a928d9a0885c2a1a85d" name="tests/bson/bug1006-001.phpt" role="test" />
<file md5sum="e352907224c2510f5b7c8bbebba323a3" name="tests/bson/bug1006-002.phpt" role="test" />
<file md5sum="22c30ca9d73e86406d2d6f74861366b7" name="tests/bson/bug1053.phpt" role="test" />
+ <file md5sum="b32de2d6a5a540a853ad9ae92b466e63" name="tests/bson/bug1067.phpt" role="test" />
<file md5sum="cecd7f192c9b387ecbb3836d72a334a9" name="tests/bson/typemap-001.phpt" role="test" />
<file md5sum="e1df0f7a16afe08490ee5fdd30a81f03" name="tests/bson/typemap-002.phpt" role="test" />
<file md5sum="c433c08d3e9d8cbb6ca0b2c8fa976495" name="tests/bulk/bug0667.phpt" role="test" />
<file md5sum="594f059e47dfc936e4b2ef8fcb666254" name="tests/bulk/bulkwrite-count-001.phpt" role="test" />
<file md5sum="bde05e9fc07536b691f4cdbde9a929f1" name="tests/bulk/bulkwrite-countable-001.phpt" role="test" />
<file md5sum="4e806cdbc153b4ea7402a089dba6411b" name="tests/bulk/bulkwrite-debug-001.phpt" role="test" />
<file md5sum="e04091ccae5c03cbe9a854e7ca5298fc" name="tests/bulk/bulkwrite-delete-001.phpt" role="test" />
<file md5sum="97e306463c60986a07580ebca022e5c7" name="tests/bulk/bulkwrite-delete_error-001.phpt" role="test" />
<file md5sum="be73970b72515b9ed0fbfa069dd2e07b" name="tests/bulk/bulkwrite-delete_error-002.phpt" role="test" />
<file md5sum="2b697d8fc672fa4805bd218596d6839f" name="tests/bulk/bulkwrite-delete_error-003.phpt" role="test" />
<file md5sum="57e008c2906d1bc5acedaecc824aa04a" name="tests/bulk/bulkwrite-insert-001.phpt" role="test" />
- <file md5sum="e48407fc269d858b4d3025c0cd7b9436" name="tests/bulk/bulkwrite-insert-002.phpt" role="test" />
- <file md5sum="0d6c129cc2f0e2c664949d859ef0d18b" name="tests/bulk/bulkwrite-insert-003.phpt" role="test" />
<file md5sum="ac5b022787feb4e3e6523869bc760e4e" name="tests/bulk/bulkwrite-insert-004.phpt" role="test" />
- <file md5sum="1d345c265597d9123ad8533194292a52" name="tests/bulk/bulkwrite-insert_error-001.phpt" role="test" />
+ <file md5sum="68bcb0c4b874d1811af6e2c61f617b2a" name="tests/bulk/bulkwrite-insert_error-001.phpt" role="test" />
<file md5sum="abd4aa3eb994b2f4e7baddd031704db1" name="tests/bulk/bulkwrite-insert_error-002.phpt" role="test" />
<file md5sum="371b44e7e9d22b7e74af719efdf39055" name="tests/bulk/bulkwrite-insert_error-003.phpt" role="test" />
- <file md5sum="2f3756e2cceaa2e5035b360171464a2c" name="tests/bulk/bulkwrite-insert_error-004.phpt" role="test" />
<file md5sum="b44d4ca6e0420c984df998309e9dd48f" name="tests/bulk/bulkwrite-update-001.phpt" role="test" />
- <file md5sum="52a67e8db3557cedcbab4f78a0d6a9a9" name="tests/bulk/bulkwrite-update_error-001.phpt" role="test" />
- <file md5sum="420c83c8babf9ced90f3836311979a02" name="tests/bulk/bulkwrite-update_error-002.phpt" role="test" />
+ <file md5sum="cd394194c9039e099b5179cacef887d1" name="tests/bulk/bulkwrite-update-arrayFilters-001.phpt" role="test" />
+ <file md5sum="bac4712bf1be74448b06b24fd19b14c1" name="tests/bulk/bulkwrite-update_error-001.phpt" role="test" />
+ <file md5sum="7a4c8488bb6439a8e68307dc7a5f1484" name="tests/bulk/bulkwrite-update_error-002.phpt" role="test" />
<file md5sum="637fa87cf4378d60953f49e884b0687b" name="tests/bulk/bulkwrite-update_error-003.phpt" role="test" />
<file md5sum="1009fb36cf40af9da3598b9ba1dea099" name="tests/bulk/bulkwrite-update_error-004.phpt" role="test" />
<file md5sum="79f2ddb11b24ed4982a42ae45a824888" name="tests/bulk/bulkwrite-update_error-005.phpt" role="test" />
<file md5sum="c117f84081a29441f2df90bf68b36308" name="tests/bulk/bulkwrite_error-001.phpt" role="test" />
<file md5sum="210570f9da7507f8a0f1442630cef92e" name="tests/bulk/bulkwrite_error-002.phpt" role="test" />
<file md5sum="0320b5f835e9a84756d4a8d03714ff42" name="tests/bulk/write-0001.phpt" role="test" />
<file md5sum="96088d71d5345e7b89c8b06c792e1c90" name="tests/bulk/write-0002.phpt" role="test" />
- <file md5sum="fabf117a2964df40b8b26bf4f37cccd8" name="tests/command/command-ctor-001.phpt" role="test" />
+ <file md5sum="b76e04a3bc59697406a57c1d3c41a9c0" name="tests/causal-consistency/causal-consistency-001.phpt" role="test" />
+ <file md5sum="dbda4fc8a0215bd1eb19e84292f3be19" name="tests/causal-consistency/causal-consistency-002.phpt" role="test" />
+ <file md5sum="50550b785670203d1df1ddb0ca666a5c" name="tests/causal-consistency/causal-consistency-003.phpt" role="test" />
+ <file md5sum="dc5174e6ba588958a018aa3ab5e9f4e3" name="tests/causal-consistency/causal-consistency-004.phpt" role="test" />
+ <file md5sum="ae5d7746e43e7cfdfe09910a32df49aa" name="tests/causal-consistency/causal-consistency-005.phpt" role="test" />
+ <file md5sum="62cca73d5066cd71db5e0fd445653cb8" name="tests/causal-consistency/causal-consistency-006.phpt" role="test" />
+ <file md5sum="42a2f6b9d0890289f4184f4cc44adbf2" name="tests/causal-consistency/causal-consistency-007.phpt" role="test" />
+ <file md5sum="ae9c7310523378fd2faaf7f1051fcd74" name="tests/causal-consistency/causal-consistency-008.phpt" role="test" />
+ <file md5sum="b907f2d8b7ba00e77f1a66c542779329" name="tests/causal-consistency/causal-consistency-009.phpt" role="test" />
+ <file md5sum="2a28a3b67be1d7d5c0e282481022cd8f" name="tests/causal-consistency/causal-consistency-010.phpt" role="test" />
+ <file md5sum="05c09170b49ba15e7dcfd395d4012f3c" name="tests/causal-consistency/causal-consistency-011.phpt" role="test" />
+ <file md5sum="ac95854a3540d06680392b46f443cb44" name="tests/causal-consistency/causal-consistency-012.phpt" role="test" />
+ <file md5sum="5ceb1824b42248303e3e4bd874773f65" name="tests/command/command-ctor-001.phpt" role="test" />
<file md5sum="1a71256d8141d7150c3efbb97e1a524a" name="tests/command/command_error-001.phpt" role="test" />
- <file md5sum="9c802f00ea9a022fda2e20d572b44d29" name="tests/connect/bug0720.phpt" role="test" />
- <file md5sum="1010f507bc25265687e3411f55772871" name="tests/connect/bug1045.phpt" role="test" />
+ <file md5sum="62dc2ed06bf105b5bc514a7df9d3a8bd" name="tests/command/cursor-batchsize-001.phpt" role="test" />
+ <file md5sum="641ce5fb6311201dea770900cf9b3386" name="tests/command/cursor-batchsize-002.phpt" role="test" />
+ <file md5sum="fe4f85722cbd5193e9788806f484c37f" name="tests/command/cursor-tailable-001.phpt" role="test" />
+ <file md5sum="5d1a20fa7319875e45ed2d40b7f5b617" name="tests/command/findAndModify-001.phpt" role="test" />
+ <file md5sum="a8aaae567156f4bb4a91cc56e49dd89c" name="tests/command/update-001.phpt" role="test" />
+ <file md5sum="a68cf9ae65efe68c92680a6e2d23d14f" name="tests/connect/bug0720.phpt" role="test" />
+ <file md5sum="9c9364d9de12e085a1d13056f2ec97db" name="tests/connect/bug1015.phpt" role="test" />
+ <file md5sum="d0a130dc6eb5fdbf65c0dd04fc7ce43b" name="tests/connect/bug1045.phpt" role="test" />
+ <file md5sum="5f8b7129a20e9c29a731ded073a20d50" name="tests/connect/compression_error-001.phpt" role="test" />
+ <file md5sum="1bb193d4177fa40816d5e0581612531b" name="tests/connect/compression_error-002.phpt" role="test" />
<file md5sum="f80cc6758cd4414419b49e5bf43ab2f9" name="tests/connect/replicaset-seedlist-001.phpt" role="test" />
<file md5sum="eedc45e1dc4655125e4f087e1bb91ce6" name="tests/connect/replicaset-seedlist-002.phpt" role="test" />
<file md5sum="b09058c57676c7542aab5b5b810867fb" name="tests/connect/standalone-auth-0001.phpt" role="test" />
<file md5sum="3540fabe2e511ce71e08a02775fc3ef8" name="tests/connect/standalone-auth-0002.phpt" role="test" />
<file md5sum="7a04a12bb64a2a4407737eccedd1b4a4" name="tests/connect/standalone-plain-0001.phpt" role="test" />
- <file md5sum="3725759ca8e111353ea52549949e7159" name="tests/connect/standalone-plain-0002.phpt" role="test" />
- <file md5sum="9e990b5984673fb768edbec7c6fe1d95" name="tests/connect/standalone-ssl-no_verify-001.phpt" role="test" />
- <file md5sum="5a5f4073766bf96c9149bb2d06382613" name="tests/connect/standalone-ssl-no_verify-002.phpt" role="test" />
- <file md5sum="b68d37fd29d53a3b9f0b8d1de7a5ff04" name="tests/connect/standalone-ssl-verify_cert-001.phpt" role="test" />
- <file md5sum="ce4fabb76e9b029ff3a406397ab77435" name="tests/connect/standalone-ssl-verify_cert-002.phpt" role="test" />
- <file md5sum="e8806199d04d9b14f9b708f4d8ea067e" name="tests/connect/standalone-ssl-verify_cert-error-001.phpt" role="test" />
- <file md5sum="ad804b5d37887a93d33f91d3bb844d92" name="tests/connect/standalone-ssl-verify_cert-error-002.phpt" role="test" />
- <file md5sum="3c6f82dd4e6d3c5cdbc35db642ae7a7d" name="tests/connect/standalone-x509-auth-001.phpt" role="test" />
- <file md5sum="6ace64368ba6ff181da7f996e7b29684" name="tests/connect/standalone-x509-auth-002.phpt" role="test" />
- <file md5sum="55c8b18460245d6770a6264130332103" name="tests/connect/standalone-x509-error-0001.phpt" role="test" />
- <file md5sum="9d12e098211dd1fe4072701ea40b968e" name="tests/connect/standalone-x509-extract_username-001.phpt" role="test" />
- <file md5sum="3bf86e8a00a571ea63d750e042b518da" name="tests/connect/standalone-x509-extract_username-002.phpt" role="test" />
+ <file md5sum="ba55a41fd8f60eb1441f3b1f878d3e7f" name="tests/connect/standalone-plain-0002.phpt" role="test" />
+ <file md5sum="ab05015b220e035746646022df70cbf0" name="tests/connect/standalone-ssl-no_verify-001.phpt" role="test" />
+ <file md5sum="8f747f75868ed7241a899b482e5c51bb" name="tests/connect/standalone-ssl-no_verify-002.phpt" role="test" />
+ <file md5sum="791c75cbf329a2cc79a5d1bd928b04bf" name="tests/connect/standalone-ssl-verify_cert-001.phpt" role="test" />
+ <file md5sum="bfc426d2e90685ba27314eedbb838064" name="tests/connect/standalone-ssl-verify_cert-002.phpt" role="test" />
+ <file md5sum="370659019000301725cc8015d96cac70" name="tests/connect/standalone-ssl-verify_cert-error-001.phpt" role="test" />
+ <file md5sum="c9fe032b1633d311861dc09afb10cf18" name="tests/connect/standalone-ssl-verify_cert-error-002.phpt" role="test" />
+ <file md5sum="dc7e2ac135b82c6a81f2e7d3c137848e" name="tests/connect/standalone-x509-auth-001.phpt" role="test" />
+ <file md5sum="bfd9a9d77cee06023aa80dfa2510013d" name="tests/connect/standalone-x509-auth-002.phpt" role="test" />
+ <file md5sum="80849277b5fc9a640c11cd9f41e181c8" name="tests/connect/standalone-x509-error-0001.phpt" role="test" />
+ <file md5sum="e2efed3bdde2a2ae9ce5e69acf434949" name="tests/connect/standalone-x509-extract_username-001.phpt" role="test" />
+ <file md5sum="a5a1f55d93bfa06ed3a6949a2da5ca99" name="tests/connect/standalone-x509-extract_username-002.phpt" role="test" />
<file md5sum="242c14e0af00c3157d0518a51a13fd49" name="tests/cursor/bug0671-001.phpt" role="test" />
<file md5sum="f28fd077560c00dddcef73dea2515a2d" name="tests/cursor/bug0732-001.phpt" role="test" />
<file md5sum="e859eb35934ca895bd05fdf49a7cafdd" name="tests/cursor/bug0849-001.phpt" role="test" />
<file md5sum="76267fa4eeff0ab4b59086c3c2fa36f9" name="tests/cursor/bug0924-001.phpt" role="test" />
<file md5sum="8fc7766850992fe61cc63680ae64f6f5" name="tests/cursor/bug0924-002.phpt" role="test" />
+ <file md5sum="699b81fce23888f23231644cb9ab31b2" name="tests/cursor/bug1050-001.phpt" role="test" />
+ <file md5sum="50b09ba463e4ba26d6157f513df605eb" name="tests/cursor/bug1050-002.phpt" role="test" />
<file md5sum="9b8d8ff91ae8916c55cda3cc6a9b1d30" name="tests/cursor/cursor-IteratorIterator-001.phpt" role="test" />
<file md5sum="5bdaf628571254c6947ed90778e571fd" name="tests/cursor/cursor-IteratorIterator-002.phpt" role="test" />
<file md5sum="6cf825c92cfec4043aba638f68281c4e" name="tests/cursor/cursor-IteratorIterator-003.phpt" role="test" />
<file md5sum="0b31a699633d3603915122b253bc59e7" name="tests/cursor/cursor-IteratorIterator-004.phpt" role="test" />
<file md5sum="047175ad9d23429b675dbef4d8b69f9b" name="tests/cursor/cursor-NoRewindIterator-001.phpt" role="test" />
- <file md5sum="93085abc96c661ef24722e47051d7dc1" name="tests/cursor/cursor-destruct-001.phpt" role="test" />
+ <file md5sum="554bb39df070f9d6593501d3296dc030" name="tests/cursor/cursor-destruct-001.phpt" role="test" />
<file md5sum="fa8c09f6398b0f3624fcb99e2dae2e78" name="tests/cursor/cursor-get_iterator-001.phpt" role="test" />
<file md5sum="00b27ffaac59e88c2924f1e4a94b8446" name="tests/cursor/cursor-get_iterator-002.phpt" role="test" />
<file md5sum="dcf2f629b749f3451f51dc14aabed83a" name="tests/cursor/cursor-get_iterator-003.phpt" role="test" />
<file md5sum="473f69a2a155f1abbad84ae05b8b287b" name="tests/cursor/cursor-getmore-001.phpt" role="test" />
<file md5sum="4a716f9f9fa9cc8124d2468f7822f4ef" name="tests/cursor/cursor-getmore-002.phpt" role="test" />
<file md5sum="ed54bc54de9f3237a7b4f1f62f3d923b" name="tests/cursor/cursor-getmore-003.phpt" role="test" />
<file md5sum="ad32619ce7b76c5673f87dd7f5f7ae6c" name="tests/cursor/cursor-getmore-004.phpt" role="test" />
<file md5sum="833ed8c23015e582df80c5213b9da361" name="tests/cursor/cursor-getmore-005.phpt" role="test" />
<file md5sum="80eebce2bb5f63923b2e088984ffdc45" name="tests/cursor/cursor-getmore-006.phpt" role="test" />
<file md5sum="d98539ac333de8b38808f4369d9700f4" name="tests/cursor/cursor-isDead-001.phpt" role="test" />
<file md5sum="fa22374bfd85347162afbbc16c236d7c" name="tests/cursor/cursor-isDead-002.phpt" role="test" />
<file md5sum="7779033e011457ef08d2bc538cd12d58" name="tests/cursor/cursor-isDead-003.phpt" role="test" />
<file md5sum="3a9c5d9ccbccb25a3853cd858ebf3385" name="tests/cursor/cursor-isDead-004.phpt" role="test" />
<file md5sum="8ae752547abc9b308717b22fdc81f65b" name="tests/cursor/cursor-iterator_handlers-001.phpt" role="test" />
<file md5sum="b3a42f64b044ac953e0cca2e5ac0e548" name="tests/cursor/cursor-rewind-001.phpt" role="test" />
<file md5sum="3af57c58d28edc53f1d7dff2930e148b" name="tests/cursor/cursor-setTypeMap_error-001.phpt" role="test" />
<file md5sum="818c11ba12755d0849204075a6128c00" name="tests/cursor/cursor-setTypeMap_error-002.phpt" role="test" />
<file md5sum="bf62be766da78e6517781b99a729c657" name="tests/cursor/cursor-tailable-001.phpt" role="test" />
<file md5sum="de85823d5d6c8d5b61476204140d8a9a" name="tests/cursor/cursor-tailable-002.phpt" role="test" />
<file md5sum="d21a84f682ef0bb2c476214591d9aa95" name="tests/cursor/cursor-tailable-003.phpt" role="test" />
<file md5sum="95a179bbb7d55e659f92a7287669452f" name="tests/cursor/cursor-tailable_error-001.phpt" role="test" />
- <file md5sum="dd6098836932b814669940aa0a079496" name="tests/cursor/cursor-tailable_error-002.phpt" role="test" />
+ <file md5sum="4a253d0fdbba0f0dc21576e1244890a5" name="tests/cursor/cursor-tailable_error-002.phpt" role="test" />
<file md5sum="3ba084305d56a4161a6428adec4200f6" name="tests/cursor/cursor-toArray-001.phpt" role="test" />
<file md5sum="ccf21ed5c06df8c665949666cba4177a" name="tests/cursor/cursor-toArray-002.phpt" role="test" />
<file md5sum="98ab429a6b6a19ea5848c83119784a3b" name="tests/cursor/cursor_error-001.phpt" role="test" />
<file md5sum="f22fc97e7edf6f3274aa75457c51ed54" name="tests/cursorid/cursorid-001.phpt" role="test" />
<file md5sum="a66a36e2dd8673b0e3c9b10213d8e536" name="tests/cursorid/cursorid-002.phpt" role="test" />
<file md5sum="c72707601edf6dd9e41b2c67b05d92ae" name="tests/cursorid/cursorid_error-001.phpt" role="test" />
<file md5sum="61a4d94cd1a2cba22af558739fd99920" name="tests/functional/cursor-001.phpt" role="test" />
<file md5sum="3c9dd45aa143cfadbdeade17987bc89a" name="tests/functional/cursorid-001.phpt" role="test" />
<file md5sum="00e5e9aa4990021e69b190a2519f91fd" name="tests/functional/phpinfo-1.phpt" role="test" />
<file md5sum="284065fce38985ffa6a7b4cdb8ed834a" name="tests/functional/phpinfo-2.phpt" role="test" />
<file md5sum="6e5d4df60590351975ff8c1c97995d94" name="tests/functional/query-sort-001.phpt" role="test" />
<file md5sum="1472bff8e60acf52d238aac8d39ca2ff" name="tests/functional/query-sort-002.phpt" role="test" />
<file md5sum="f87ce409844841c8ff3e2eaea9446241" name="tests/functional/query-sort-003.phpt" role="test" />
<file md5sum="2c0dfaa1fea12e6a5478b34e9838fa8d" name="tests/functional/query-sort-004.phpt" role="test" />
- <file md5sum="0e92c8b24b958eb681e48cae1bb0162c" name="tests/manager/bug0572.phpt" role="test" />
+ <file md5sum="2c11d908285f22a4ffdf231ea8db3ef7" name="tests/manager/bug0572.phpt" role="test" />
<file md5sum="f0346e6cd2ba9520eef5021167879c4e" name="tests/manager/bug0851-001.phpt" role="test" />
<file md5sum="9a7cb75f5eb30349c7dd731ef0977940" name="tests/manager/bug0851-002.phpt" role="test" />
<file md5sum="9b7b81b9372e04f59efa089eb9919f91" name="tests/manager/bug0912-001.phpt" role="test" />
<file md5sum="1e678a1402e1230e2ccbf30cc7274c40" name="tests/manager/bug0913-001.phpt" role="test" />
<file md5sum="be3f9183ac4e7d20627c0dc4e4d0abdc" name="tests/manager/bug0940-001.phpt" role="test" />
<file md5sum="4ac5250afb8261665bf8a450bfd464e2" name="tests/manager/bug0940-002.phpt" role="test" />
<file md5sum="d582d6ff744cdbf6f65a37e63faac924" name="tests/manager/manager-ctor-001.phpt" role="test" />
<file md5sum="a27445a8c78d892a18f123d2c39a627d" name="tests/manager/manager-ctor-002.phpt" role="test" />
<file md5sum="90824d052d5f693a3e4e84b28b4a9b06" name="tests/manager/manager-ctor-003.phpt" role="test" />
<file md5sum="cd62592a5c1c3b6c855b698735a1b9eb" name="tests/manager/manager-ctor-004.phpt" role="test" />
<file md5sum="b5efa282f800cf3a1b3d09d86bf06906" name="tests/manager/manager-ctor-appname-001.phpt" role="test" />
<file md5sum="61b8d5978585fc2a3f031c4f37d2d611" name="tests/manager/manager-ctor-appname_error-001.phpt" role="test" />
- <file md5sum="4045e436a36b82e3b6ff917c3be17012" name="tests/manager/manager-ctor-auth_mechanism-001.phpt" role="test" />
- <file md5sum="05ef96372c993eb9ccdcd82459d0f688" name="tests/manager/manager-ctor-auth_mechanism-002.phpt" role="test" />
+ <file md5sum="a7b52e0890c7a1d20e0d4266a68ec881" name="tests/manager/manager-ctor-auth_mechanism-001.phpt" role="test" />
+ <file md5sum="a56f34e160d6cf092dcfe6e0ed54944e" name="tests/manager/manager-ctor-auth_mechanism-002.phpt" role="test" />
<file md5sum="ee7822e3b58b7abb5ccee0a87dc86e4d" name="tests/manager/manager-ctor-read_concern-001.phpt" role="test" />
<file md5sum="e0f1aac391b3a951064df7cae6234cb6" name="tests/manager/manager-ctor-read_concern-error-001.phpt" role="test" />
<file md5sum="79e97d46bf35dde168a2b2c8b304d259" name="tests/manager/manager-ctor-read_preference-001.phpt" role="test" />
<file md5sum="a68f0249fa21dc829486e9b349d5835a" name="tests/manager/manager-ctor-read_preference-002.phpt" role="test" />
<file md5sum="f79bc0bb4c539b3ca7d92289c5799928" name="tests/manager/manager-ctor-read_preference-004.phpt" role="test" />
- <file md5sum="690fbe9acb3f51c225df7c8c4eca4a53" name="tests/manager/manager-ctor-read_preference-error-001.phpt" role="test" />
- <file md5sum="9ed2e1a673c2939d8dfcea438fc0e55b" name="tests/manager/manager-ctor-read_preference-error-002.phpt" role="test" />
- <file md5sum="8b96e42782747123e2381b61e4e215cc" name="tests/manager/manager-ctor-read_preference-error-003.phpt" role="test" />
+ <file md5sum="d34478e19a06621ab48134ad3e921cb5" name="tests/manager/manager-ctor-read_preference-error-001.phpt" role="test" />
+ <file md5sum="0ae4dd86535ff2f78c5642c95c5907ce" name="tests/manager/manager-ctor-read_preference-error-002.phpt" role="test" />
+ <file md5sum="33c909db248b8926649d82d481106abd" name="tests/manager/manager-ctor-read_preference-error-003.phpt" role="test" />
<file md5sum="cd42434868d062400d1acc99236c62a2" name="tests/manager/manager-ctor-read_preference-error-004.phpt" role="test" />
- <file md5sum="9a8c551f206ac085e9ebdc6c5d0a7d32" name="tests/manager/manager-ctor-ssl-001.phpt" role="test" />
+ <file md5sum="88b3d9bb0981f4a726b1d82575603f38" name="tests/manager/manager-ctor-ssl-001.phpt" role="test" />
<file md5sum="f5f725515b3e0d3000a190ca90a48902" name="tests/manager/manager-ctor-write_concern-001.phpt" role="test" />
<file md5sum="e873f68889103bd3ca0fb82cd636e27d" name="tests/manager/manager-ctor-write_concern-002.phpt" role="test" />
<file md5sum="a795c0a05f3f14eee39dc899c6b8b353" name="tests/manager/manager-ctor-write_concern-003.phpt" role="test" />
<file md5sum="243320fdfc028f0156dab89e0a4111fd" name="tests/manager/manager-ctor-write_concern-004.phpt" role="test" />
<file md5sum="930d34ab034a874b0d1bfdfb359f57a2" name="tests/manager/manager-ctor-write_concern-error-001.phpt" role="test" />
<file md5sum="961a3b93ec15395bf57bcc55c42af238" name="tests/manager/manager-ctor-write_concern-error-002.phpt" role="test" />
- <file md5sum="f9eb9dfd5685ef375d2ebf2bdeec51fe" name="tests/manager/manager-ctor-write_concern-error-003.phpt" role="test" />
+ <file md5sum="ca05fe4e8293232d85290f0bbc74a415" name="tests/manager/manager-ctor-write_concern-error-003.phpt" role="test" />
<file md5sum="6e904f8fe9d236c67fa8cd95b2477357" name="tests/manager/manager-ctor-write_concern-error-004.phpt" role="test" />
- <file md5sum="3ccd7bcb5c18a4cc0c30a819a9c9e5b0" name="tests/manager/manager-ctor-write_concern-error-005.phpt" role="test" />
- <file md5sum="c481e6fe02d36c9a7d5f6d4c314a3568" name="tests/manager/manager-ctor-write_concern-error-006.phpt" role="test" />
+ <file md5sum="4ceba0f9bddd41670eafafcb2cdcf036" name="tests/manager/manager-ctor-write_concern-error-005.phpt" role="test" />
+ <file md5sum="d5375e39c52cfe38e226f28a72de528b" name="tests/manager/manager-ctor-write_concern-error-006.phpt" role="test" />
<file md5sum="49ffdcbe69686e38b35bd6d9cdbd8b67" name="tests/manager/manager-ctor_error-001.phpt" role="test" />
- <file md5sum="8861edfbc58574cc34deedaba397adcb" name="tests/manager/manager-ctor_error-002.phpt" role="test" />
+ <file md5sum="bc5dbc3dac6898f0ee0d6baccd0a7866" name="tests/manager/manager-ctor_error-002.phpt" role="test" />
<file md5sum="07074b4ac6d148d2c45fd2c6cf23f427" name="tests/manager/manager-ctor_error-003.phpt" role="test" />
- <file md5sum="ebdd7475f10c93b49ee2d3ca505bf50b" name="tests/manager/manager-debug-001.phpt" role="test" />
- <file md5sum="6f82cb6c9546ace927299251c2e341a9" name="tests/manager/manager-debug-002.phpt" role="test" />
+ <file md5sum="be738dc790fc658a6ce32235b04b1d4b" name="tests/manager/manager-debug-001.phpt" role="test" />
+ <file md5sum="fed182f4305d260702f56990d7977c91" name="tests/manager/manager-debug-002.phpt" role="test" />
+ <file md5sum="b99cf3e782d7e1fba6a320e7815305f5" name="tests/manager/manager-debug-003.phpt" role="test" />
<file md5sum="e791d3155d0c6af7fbb8b38da5cda729" name="tests/manager/manager-destruct-001.phpt" role="test" />
<file md5sum="502e3288a762c61c59c5718c3ea63a6f" name="tests/manager/manager-executeBulkWrite-001.phpt" role="test" />
<file md5sum="465664ae148f48b497daa5014cf9627d" name="tests/manager/manager-executeBulkWrite-002.phpt" role="test" />
<file md5sum="327b7ea25fb9259aaf9d37b7c9f95687" name="tests/manager/manager-executeBulkWrite-003.phpt" role="test" />
<file md5sum="2c96f45f9a46375035c49d5ad884cbae" name="tests/manager/manager-executeBulkWrite-004.phpt" role="test" />
<file md5sum="afe52472d9b36dd52fc8b235fcc2e26b" name="tests/manager/manager-executeBulkWrite-005.phpt" role="test" />
<file md5sum="f66ce3abbb6715c0a2001fbf33277a1b" name="tests/manager/manager-executeBulkWrite-006.phpt" role="test" />
<file md5sum="65f2114c9e750d8256c026c854d4b6a1" name="tests/manager/manager-executeBulkWrite-007.phpt" role="test" />
- <file md5sum="3db0f084d4b11f67411dd755392e652e" name="tests/manager/manager-executeBulkWrite-008.phpt" role="test" />
+ <file md5sum="83d3eb288379d66b6a31fd4d86861370" name="tests/manager/manager-executeBulkWrite-008.phpt" role="test" />
<file md5sum="db83342d1cea8b21d113a18a66c57a63" name="tests/manager/manager-executeBulkWrite-009.phpt" role="test" />
<file md5sum="cc95bd471144e0dee220d3203c66c6e3" name="tests/manager/manager-executeBulkWrite-010.phpt" role="test" />
<file md5sum="1f846a1f20ab7a40d3e4bc9ecb54112d" name="tests/manager/manager-executeBulkWrite-011.phpt" role="test" />
+ <file md5sum="8f01edcd842ff5dd7de073a00f7dd3ce" name="tests/manager/manager-executeBulkWrite-012.phpt" role="test" />
<file md5sum="825e9363312c2a8d3f953da43770e2b2" name="tests/manager/manager-executeBulkWrite_error-001.phpt" role="test" />
<file md5sum="b351ac5fb6628150bee7c17218f9cc40" name="tests/manager/manager-executeBulkWrite_error-002.phpt" role="test" />
<file md5sum="48ab21bcda4cb861aeadf15b5975ff58" name="tests/manager/manager-executeBulkWrite_error-003.phpt" role="test" />
<file md5sum="dc973d0e87e9e22f2743dc8d5a4c46ca" name="tests/manager/manager-executeBulkWrite_error-004.phpt" role="test" />
<file md5sum="d50c616cea538d13bcf9b20947e00c82" name="tests/manager/manager-executeBulkWrite_error-006.phpt" role="test" />
<file md5sum="d36794d4026aef731f405bdcba806ca9" name="tests/manager/manager-executeBulkWrite_error-007.phpt" role="test" />
<file md5sum="7e4cc9dae8557991729145701bea45ab" name="tests/manager/manager-executeBulkWrite_error-008.phpt" role="test" />
+ <file md5sum="769359c15078cbe21fce16d93b759932" name="tests/manager/manager-executeBulkWrite_error-009.phpt" role="test" />
<file md5sum="020190b7055d6b3a9545065a4f39d057" name="tests/manager/manager-executeCommand-001.phpt" role="test" />
+ <file md5sum="ad0d6e4a7806f70d0f2daf0dbb00f22b" name="tests/manager/manager-executeCommand-002.phpt" role="test" />
+ <file md5sum="870fafc41acc502d32373f7ac6d57672" name="tests/manager/manager-executeCommand-003.phpt" role="test" />
+ <file md5sum="11baf5fac427448db09ab3bdba703cda" name="tests/manager/manager-executeCommand-004.phpt" role="test" />
<file md5sum="b4e242aa64446022796549bf18ef7fbd" name="tests/manager/manager-executeCommand_error-001.phpt" role="test" />
- <file md5sum="2830e42312975c21857d53464b9c6d4c" name="tests/manager/manager-executeQuery-001.phpt" role="test" />
- <file md5sum="5f177fb67bbc291e318926b102346788" name="tests/manager/manager-executeQuery-002.phpt" role="test" />
- <file md5sum="bfcc9743039bbc30957f8cf69082f2dd" name="tests/manager/manager-executeQuery-005.phpt" role="test" />
+ <file md5sum="e4302c05c74ed74fa88be0c8bfef2fb6" name="tests/manager/manager-executeCommand_error-002.phpt" role="test" />
+ <file md5sum="3547570fd2ad5298e822f8275a6633b2" name="tests/manager/manager-executeCommand_error-004.phpt" role="test" />
+ <file md5sum="4f1901640c3fd00fdb05a38a26185f9b" name="tests/manager/manager-executeQuery-001.phpt" role="test" />
+ <file md5sum="724a4af76e86a32e411bc70dc29062bf" name="tests/manager/manager-executeQuery-002.phpt" role="test" />
+ <file md5sum="9b20ee77d480c2cf69f12abb3745eb66" name="tests/manager/manager-executeQuery-003.phpt" role="test" />
+ <file md5sum="ffca3c40234f12e348a2554bd491ccfb" name="tests/manager/manager-executeQuery-004.phpt" role="test" />
+ <file md5sum="fa9db25524e6160f06ba6bfcf086bcb7" name="tests/manager/manager-executeQuery-005.phpt" role="test" />
<file md5sum="828d1da5bfd8accddc3df45da5b337bd" name="tests/manager/manager-executeQuery_error-001.phpt" role="test" />
+ <file md5sum="e6991dcc9d10e5731a6c47f981a034e9" name="tests/manager/manager-executeQuery_error-002.phpt" role="test" />
+ <file md5sum="7d6c0f6a3442f0d4129b427a0a39531b" name="tests/manager/manager-executeReadCommand-001.phpt" role="test" />
+ <file md5sum="dd434ca1f93a9d0a17787e0cb94631ee" name="tests/manager/manager-executeReadCommand_error-001.phpt" role="test" />
+ <file md5sum="7069bc7a4d3a332ee221b89866974bdb" name="tests/manager/manager-executeReadWriteCommand-001.phpt" role="test" />
+ <file md5sum="199f75745fd8db3f407f442e04ab64e3" name="tests/manager/manager-executeReadWriteCommand_error-001.phpt" role="test" />
+ <file md5sum="66765cb59b5f499dc006bd6a7da46b37" name="tests/manager/manager-executeWriteCommand-001.phpt" role="test" />
+ <file md5sum="1ff7a1129a5252e21e768aa42c9b0137" name="tests/manager/manager-executeWriteCommand_error-001.phpt" role="test" />
<file md5sum="3fdf3959137e40815625c7af1f9bcf9c" name="tests/manager/manager-getreadconcern-001.phpt" role="test" />
<file md5sum="5b8120ba62afb13a67636fefb6ac9046" name="tests/manager/manager-getreadpreference-001.phpt" role="test" />
<file md5sum="cc6a826770fe32f66d71f6c95aca3d9f" name="tests/manager/manager-getservers-001.phpt" role="test" />
<file md5sum="191acc2f6b43eaba1a1971bcc996491a" name="tests/manager/manager-getservers-002.phpt" role="test" />
<file md5sum="ed0f1d54919b563cf773fed548bc9791" name="tests/manager/manager-getwriteconcern-001.phpt" role="test" />
<file md5sum="f55b83dec74f5c379cf719423abdd703" name="tests/manager/manager-invalidnamespace.phpt" role="test" />
<file md5sum="eabe12d6f73dab29c77417060427c2f3" name="tests/manager/manager-selectserver-001.phpt" role="test" />
<file md5sum="400fbbbfd04b2967e09398c479736aaa" name="tests/manager/manager-selectserver_error-001.phpt" role="test" />
<file md5sum="e420f678f1caf714265dcc79ab5e5a0c" name="tests/manager/manager-set-uri-options-001.phpt" role="test" />
- <file md5sum="a9f200ce067bf02422fb71a091cda8c2" name="tests/manager/manager-set-uri-options-002.phpt" role="test" />
- <file md5sum="3157fde1003b34bbb1648c33bcc63248" name="tests/manager/manager-var-dump-001.phpt" role="test" />
+ <file md5sum="86d7f05c2c9f1a45bbdce9291caeacaf" name="tests/manager/manager-set-uri-options-002.phpt" role="test" />
+ <file md5sum="78de4e0349b6fe31790817a774ce617e" name="tests/manager/manager-var-dump-001.phpt" role="test" />
<file md5sum="8c5241469e089d92c2bea067680504ee" name="tests/manager/manager-wakeup.phpt" role="test" />
<file md5sum="d9723d6bfe30a7e2691ecea5b69c2c13" name="tests/manager/manager_error-001.phpt" role="test" />
<file md5sum="578822c478fbde417eb227006a093825" name="tests/query/bug0430-001.phpt" role="test" />
<file md5sum="30fcad6a0545b8fc8cb4289178ca9a02" name="tests/query/bug0430-002.phpt" role="test" />
<file md5sum="9263e593f34eb2eb6ea56271c3a07956" name="tests/query/bug0430-003.phpt" role="test" />
<file md5sum="f4f292ac47ebaf75cf64ee8566fd16f3" name="tests/query/bug0705-001.phpt" role="test" />
<file md5sum="5b23333b3231f6ab5c76b85bd2678ecc" name="tests/query/bug0705-002.phpt" role="test" />
<file md5sum="e8e5c13b848aafcb5447896b4f79ef25" name="tests/query/query-ctor-001.phpt" role="test" />
<file md5sum="fef63deee234ecd51f99ac063497a762" name="tests/query/query-ctor-002.phpt" role="test" />
<file md5sum="734824edae6a43c5b9c972b14a0bd8b9" name="tests/query/query-ctor-003.phpt" role="test" />
<file md5sum="13e4a9032f4c60549d4efab9fa5d237c" name="tests/query/query-ctor-004.phpt" role="test" />
<file md5sum="71f1fa84dbf281b67d942863248f7e31" name="tests/query/query-ctor-005.phpt" role="test" />
<file md5sum="64bda823995da00efceed4b07627f18f" name="tests/query/query-ctor-006.phpt" role="test" />
- <file md5sum="dee144ec7ba98029329b988545cbc945" name="tests/query/query-ctor_error-001.phpt" role="test" />
+ <file md5sum="3d69ea7fb907dffff88166fbcbe16b91" name="tests/query/query-ctor_error-001.phpt" role="test" />
<file md5sum="714c383312ff70071a673e2928ca8d22" name="tests/query/query-ctor_error-002.phpt" role="test" />
<file md5sum="2238f34b7a0c1b93e6c1b1af5fc7b9b8" name="tests/query/query-ctor_error-003.phpt" role="test" />
<file md5sum="70af7d02949d0bfffa87729079871c74" name="tests/query/query-ctor_error-004.phpt" role="test" />
<file md5sum="21539cda062a422b388591fcda520ef3" name="tests/query/query-ctor_error-005.phpt" role="test" />
<file md5sum="07edfcf218e25dc4cf5fa8b677f6f636" name="tests/query/query-ctor_error-006.phpt" role="test" />
<file md5sum="c3a2e5976d1e8d8e5baca5d4dd796937" name="tests/query/query-debug-001.phpt" role="test" />
<file md5sum="623f5385c7016ba73c3be33c0e9a7b1e" name="tests/query/query_error-001.phpt" role="test" />
- <file md5sum="311b41b1fe94f2770438d09d10a467b1" name="tests/readConcern/readconcern-bsonserialize-001.phpt" role="test" />
- <file md5sum="7cbf142e56e80eb987e2dd184c909628" name="tests/readConcern/readconcern-bsonserialize-002.phpt" role="test" />
- <file md5sum="a589109a7e7260d8752f34f246417b1c" name="tests/readConcern/readconcern-constants.phpt" role="test" />
+ <file md5sum="dcbdfa9e8696be8baec22a8c09ec75d2" name="tests/readConcern/readconcern-bsonserialize-001.phpt" role="test" />
+ <file md5sum="e73ca4a826636a54ffe3ea853df7b0ef" name="tests/readConcern/readconcern-bsonserialize-002.phpt" role="test" />
+ <file md5sum="02ea1e194b1175a8a0c9109e4b2a0c08" name="tests/readConcern/readconcern-constants.phpt" role="test" />
<file md5sum="3e580a5b91bbbbf2a9291b17c0738912" name="tests/readConcern/readconcern-ctor-001.phpt" role="test" />
<file md5sum="3154a148ba05456ed40b44ad7878a852" name="tests/readConcern/readconcern-ctor_error-001.phpt" role="test" />
<file md5sum="c4d0b603d7d6968b8b9e038b1e81c919" name="tests/readConcern/readconcern-ctor_error-002.phpt" role="test" />
- <file md5sum="ff4c5e181a9c3adcc1b7a78bc5748eb0" name="tests/readConcern/readconcern-debug-001.phpt" role="test" />
+ <file md5sum="32d628a71d398bd65e13d2bbe6b63b6c" name="tests/readConcern/readconcern-debug-001.phpt" role="test" />
<file md5sum="a6594e050224e772625deea8052811d6" name="tests/readConcern/readconcern-getlevel-001.phpt" role="test" />
<file md5sum="7bc12b0529eef007f25068e8a410d755" name="tests/readConcern/readconcern-isdefault-001.phpt" role="test" />
<file md5sum="f773914d83286e225a75016f89b6eba6" name="tests/readConcern/readconcern_error-001.phpt" role="test" />
- <file md5sum="b83cf03af044e79dc7b1531832e12b14" name="tests/readPreference/bug0146-001.phpt" role="test" />
- <file md5sum="30bd5e1bf3fa0f61c703376f3ef31799" name="tests/readPreference/bug0146-002.phpt" role="test" />
+ <file md5sum="59266a2366a5508d00c83070ada611e9" name="tests/readPreference/bug0146-001.phpt" role="test" />
+ <file md5sum="2860e1bf33f42a01d4cd7660de8d94d5" name="tests/readPreference/bug0146-002.phpt" role="test" />
<file md5sum="5bbbd73884d0d64989109201e7e1bf3f" name="tests/readPreference/bug0851-001.phpt" role="test" />
<file md5sum="6677621b6743e8d5d17b3cf0d45283d3" name="tests/readPreference/readpreference-bsonserialize-001.phpt" role="test" />
<file md5sum="ff0ce5637f8b68e7cf7c1aa65bedf89f" name="tests/readPreference/readpreference-bsonserialize-002.phpt" role="test" />
<file md5sum="f08d9809213c98acdf84fd9e656910e0" name="tests/readPreference/readpreference-ctor-001.phpt" role="test" />
- <file md5sum="b4e0071e33dd021661ce919f12a45d15" name="tests/readPreference/readpreference-ctor-002.phpt" role="test" />
+ <file md5sum="221ff341be838431d0b6aa95e1680e16" name="tests/readPreference/readpreference-ctor-002.phpt" role="test" />
<file md5sum="475a0c5eff5f7f7a211614819b886648" name="tests/readPreference/readpreference-ctor_error-001.phpt" role="test" />
<file md5sum="a0cdfd5d545b800d979e6610fa8f417a" name="tests/readPreference/readpreference-ctor_error-002.phpt" role="test" />
<file md5sum="ecc2c2b630fc509b9cc13cbaa180071e" name="tests/readPreference/readpreference-ctor_error-003.phpt" role="test" />
<file md5sum="2ecac9178a92b919e2a3d744386b308d" name="tests/readPreference/readpreference-ctor_error-004.phpt" role="test" />
<file md5sum="67904c50feea14b362824470d04a73ea" name="tests/readPreference/readpreference-ctor_error-005.phpt" role="test" />
<file md5sum="7f4fccdf7d3913833ddcb7c7a04feafd" name="tests/readPreference/readpreference-ctor_error-006.phpt" role="test" />
<file md5sum="4708202d52455d5454a03b70d029b97f" name="tests/readPreference/readpreference-debug-001.phpt" role="test" />
<file md5sum="4a4e16d677e9a242b8e2429def65e565" name="tests/readPreference/readpreference-getMaxStalenessMS-001.phpt" role="test" />
<file md5sum="23c503e166045f34625abc42506b80db" name="tests/readPreference/readpreference-getMaxStalenessMS-002.phpt" role="test" />
<file md5sum="2300bc05051a0d0dc183ebe1d699190a" name="tests/readPreference/readpreference-getMode-001.phpt" role="test" />
<file md5sum="2a58ca560ca933e763397fdecd6e1c22" name="tests/readPreference/readpreference-getTagSets-001.phpt" role="test" />
<file md5sum="74a95fd229d950e7116b5ff0fd78db56" name="tests/readPreference/readpreference-getTagSets-002.phpt" role="test" />
<file md5sum="56df736cc8af42557ae036e2eef910a8" name="tests/readPreference/readpreference_error-001.phpt" role="test" />
<file md5sum="c8617acc06d137f3c9db3005c9885366" name="tests/replicaset/bug0155.phpt" role="test" />
<file md5sum="515211c4a4916b62c7b2e85d9b876ec5" name="tests/replicaset/bug0898-001.phpt" role="test" />
<file md5sum="93387b873d1655b41742b1b773b8cad2" name="tests/replicaset/bug0898-002.phpt" role="test" />
<file md5sum="a7e63df8ed345a5b000793a6e059532b" name="tests/replicaset/manager-getservers-001.phpt" role="test" />
<file md5sum="f90f4e17b3356432b68e05d987cb91e3" name="tests/replicaset/manager-selectserver-001.phpt" role="test" />
<file md5sum="936feea8211b9862a77bfa06fd985f43" name="tests/replicaset/readconcern-001.phpt" role="test" />
<file md5sum="5e8a48485b9432ee3a63e3d63050e578" name="tests/replicaset/readconcern-002.phpt" role="test" />
<file md5sum="cbd4c69e674b59884feead771561ca85" name="tests/replicaset/server-001.phpt" role="test" />
<file md5sum="61a3308075601ebc37b2d3aee95500fd" name="tests/replicaset/server-002.phpt" role="test" />
<file md5sum="e50cc6ffa9a60021da209b115c912a95" name="tests/replicaset/writeconcernerror-001.phpt" role="test" />
<file md5sum="51ac0a8da4fafc8b4bddb0fc73903457" name="tests/replicaset/writeconcernerror-002.phpt" role="test" />
<file md5sum="5a868067b8c68bb49beee106f5b2e681" name="tests/replicaset/writeresult-getserver-001.phpt" role="test" />
<file md5sum="ade47c77a748f96d770a2da696def657" name="tests/replicaset/writeresult-getserver-002.phpt" role="test" />
+ <file md5sum="0b34ff7900011d030ab8db31a81de5d1" name="tests/retryable-writes/retryable-writes-001.phpt" role="test" />
+ <file md5sum="8f4312f864d2d4dcafcbccb4495a52ad" name="tests/retryable-writes/retryable-writes-002.phpt" role="test" />
+ <file md5sum="fe46b5c2484268cc8e994589a7dc2c04" name="tests/retryable-writes/retryable-writes-003.phpt" role="test" />
+ <file md5sum="3e7fe9fdc83672b92498d67afc492cbe" name="tests/retryable-writes/retryable-writes-004.phpt" role="test" />
+ <file md5sum="49021f7c9fcd1ea202d3322d617bcd68" name="tests/retryable-writes/retryable-writes-005.phpt" role="test" />
<file md5sum="6e83981a37e2c04bf2f61ad3f94af12e" name="tests/server/bug0671-002.phpt" role="test" />
<file md5sum="75a4ec8e23ef050d7057e855fff2f5f6" name="tests/server/server-constants.phpt" role="test" />
<file md5sum="3ab793258b26d778e2b7e306dbdf2947" name="tests/server/server-construct-001.phpt" role="test" />
- <file md5sum="8297af536b148b8c171971a8778ecb14" name="tests/server/server-debug.phpt" role="test" />
+ <file md5sum="f3b7b722723c12d7874ce0864c4f9473" name="tests/server/server-debug.phpt" role="test" />
<file md5sum="d814cc3637b676a293ddc5606420fa1d" name="tests/server/server-errors.phpt" role="test" />
<file md5sum="d35aa52f3190d27db25128ba00328a84" name="tests/server/server-executeBulkWrite-001.phpt" role="test" />
<file md5sum="ea0086480a8cbaae15a1751fdce48617" name="tests/server/server-executeBulkWrite-002.phpt" role="test" />
- <file md5sum="53780e03f672d499183545c1e20a0daa" name="tests/server/server-executeBulkWrite-003.phpt" role="test" />
- <file md5sum="9b4e23b508dd5b503c877580f925a26b" name="tests/server/server-executeBulkWrite-004.phpt" role="test" />
+ <file md5sum="6904e07ffb2dae78bbc65ba514bc2afc" name="tests/server/server-executeBulkWrite-003.phpt" role="test" />
+ <file md5sum="16613906f088e4a1ea133ac146d144e4" name="tests/server/server-executeBulkWrite-004.phpt" role="test" />
<file md5sum="a436b448ea05806fa07ff06ba29db547" name="tests/server/server-executeBulkWrite-005.phpt" role="test" />
+ <file md5sum="3b9451bae0e966140bdb708722419075" name="tests/server/server-executeBulkWrite-006.phpt" role="test" />
+ <file md5sum="54a5aab0d9460347ca1972466e228fb2" name="tests/server/server-executeBulkWrite-007.phpt" role="test" />
<file md5sum="b68333752a41e89ad9de9f97e869db45" name="tests/server/server-executeBulkWrite_error-001.phpt" role="test" />
+ <file md5sum="22f4361c300057e5c1b54eb1b2ec60ba" name="tests/server/server-executeBulkWrite_error-002.phpt" role="test" />
<file md5sum="b26b52aaae1b5ce0dad185a22e2f1296" name="tests/server/server-executeCommand-001.phpt" role="test" />
- <file md5sum="cac9aa5902387fd7c9056fe22db0ff3b" name="tests/server/server-executeCommand-002.phpt" role="test" />
- <file md5sum="77366626f284d5f6c28b4ad75f872f69" name="tests/server/server-executeCommand-003.phpt" role="test" />
+ <file md5sum="f66be52b2bbce68113dc2108f2a7ac31" name="tests/server/server-executeCommand-002.phpt" role="test" />
+ <file md5sum="0fd359c6e0a7b8bf58e630613eca797d" name="tests/server/server-executeCommand-003.phpt" role="test" />
+ <file md5sum="4a9d1bf6e6142b93f3f4e52ceb02eaba" name="tests/server/server-executeCommand-004.phpt" role="test" />
+ <file md5sum="00dc18612da1b5937c3e9eb2fc03db4f" name="tests/server/server-executeCommand-005.phpt" role="test" />
+ <file md5sum="0119050f509d6b75809b98ab5db70da7" name="tests/server/server-executeCommand-006.phpt" role="test" />
+ <file md5sum="2525f2a5020d9c1f2415543838e8349d" name="tests/server/server-executeCommand_error-001.phpt" role="test" />
<file md5sum="1e1399a4558a7d195709a0beeda4fffc" name="tests/server/server-executeQuery-001.phpt" role="test" />
<file md5sum="9b5213961e50487ab850c9edfa1212cf" name="tests/server/server-executeQuery-002.phpt" role="test" />
<file md5sum="31ec575f8ac5b522ee859130e1aa18f4" name="tests/server/server-executeQuery-003.phpt" role="test" />
<file md5sum="db8fd646ecd3b9a424a6bef9cd4b246f" name="tests/server/server-executeQuery-004.phpt" role="test" />
<file md5sum="1d6574f124e04e3863ddb75a9f03a771" name="tests/server/server-executeQuery-005.phpt" role="test" />
- <file md5sum="9a69b4c9008b318c02d4e48de25997aa" name="tests/server/server-executeQuery-006.phpt" role="test" />
+ <file md5sum="ed88694fa79cf21d4a0c68c2abd71c87" name="tests/server/server-executeQuery-006.phpt" role="test" />
<file md5sum="69e1d381848a7f6d3b7629a27f3dcaae" name="tests/server/server-executeQuery-007.phpt" role="test" />
<file md5sum="a3f5bd0996eb71e293b345af26e18fc6" name="tests/server/server-executeQuery-008.phpt" role="test" />
- <file md5sum="36757ff8a7d9867a199c211cd8dbb2fc" name="tests/server/server-getInfo-001.phpt" role="test" />
+ <file md5sum="45e62eae31e73af52a4aed6397c2350f" name="tests/server/server-executeQuery-009.phpt" role="test" />
+ <file md5sum="95a9bdb26c4dcc6e1f0e4c572c39ba4d" name="tests/server/server-executeQuery-010.phpt" role="test" />
+ <file md5sum="392dbeaafe9fed88c9eb6a23df4a5f5e" name="tests/server/server-executeQuery_error-001.phpt" role="test" />
+ <file md5sum="c287602460573f9617d776682393d2e6" name="tests/server/server-executeReadCommand-001.phpt" role="test" />
+ <file md5sum="c9776ed3e8f906cd4c8f8a9983e7e950" name="tests/server/server-executeReadCommand_error-001.phpt" role="test" />
+ <file md5sum="d041eeaed48ef2e490ef85ab859b9aef" name="tests/server/server-executeReadWriteCommand-001.phpt" role="test" />
+ <file md5sum="1a97b18f21eb61cec2ec7c8037105547" name="tests/server/server-executeReadWriteCommand_error-001.phpt" role="test" />
+ <file md5sum="e236cd6801b85bdde85303f6e7f97259" name="tests/server/server-executeWriteCommand-001.phpt" role="test" />
+ <file md5sum="42975aaf7e2e5b1ec8710b5182d5f5ab" name="tests/server/server-executeWriteCommand_error-001.phpt" role="test" />
+ <file md5sum="56bdf15ac4bbb212b1703112ca4c9921" name="tests/server/server-getInfo-001.phpt" role="test" />
<file md5sum="a07507189a1cf7a9cf3a58bc4e9297f4" name="tests/server/server-getTags-001.phpt" role="test" />
<file md5sum="3a6fb65e184ddafa6532e28182304b6b" name="tests/server/server-getTags-002.phpt" role="test" />
<file md5sum="9aefd9967eed1cb334069bc28d435b71" name="tests/server/server_error-001.phpt" role="test" />
+ <file md5sum="2c2a22f287d0906c21b8a929c8060b3b" name="tests/session/session-001.phpt" role="test" />
+ <file md5sum="c2a166d3847865f1ea396222b8b23a5c" name="tests/session/session-002.phpt" role="test" />
+ <file md5sum="b68990354447f7adb8d9a9803d65a223" name="tests/session/session-003.phpt" role="test" />
+ <file md5sum="ffeb23492ca4f4b1987b6b5f48c6ff95" name="tests/session/session-advanceClusterTime-001.phpt" role="test" />
+ <file md5sum="57566af7b22b503fbf6aca0fa8d8f598" name="tests/session/session-advanceOperationTime-001.phpt" role="test" />
+ <file md5sum="9c4cbd7f864f10534bdbd65e3ece055e" name="tests/session/session-advanceOperationTime-002.phpt" role="test" />
+ <file md5sum="16e9da42e9c6523015f9e32873595617" name="tests/session/session-advanceOperationTime-003.phpt" role="test" />
+ <file md5sum="ac80c032afae33e3fbbfcf68f06d03a0" name="tests/session/session-advanceOperationTime_error-001.phpt" role="test" />
+ <file md5sum="28040e7c3f6af48c1f6aa978a86dd44e" name="tests/session/session-debug-001.phpt" role="test" />
+ <file md5sum="666b71def24e5fe00b111ec98c7fd0da" name="tests/session/session-debug-002.phpt" role="test" />
+ <file md5sum="2878fa262ff338aae9efe3a44e355299" name="tests/session/session-debug-003.phpt" role="test" />
+ <file md5sum="1316f0c9bff288d708d39dc6d21bc6af" name="tests/session/session-getClusterTime-001.phpt" role="test" />
+ <file md5sum="c01c45f5cf7897516d3e2a0984c5465a" name="tests/session/session-getLogicalSessionId-001.phpt" role="test" />
+ <file md5sum="0e8bfc939cbede7ca8e6dba34385214e" name="tests/session/session-getOperationTime-001.phpt" role="test" />
<file md5sum="af985ddcede3e694fe26dd02a7b08f96" name="tests/standalone/bug0166.phpt" role="test" />
<file md5sum="5903bd001a11ca4052af551f99a13282" name="tests/standalone/bug0231.phpt" role="test" />
<file md5sum="508f7f23c5b80875cf8893f11fd636b0" name="tests/standalone/bug0357.phpt" role="test" />
<file md5sum="1598a14a9d5574cab5e49b8a42a95d60" name="tests/standalone/bug0545.phpt" role="test" />
<file md5sum="08af799afac3ebe4478c44523f19af60" name="tests/standalone/bug0655.phpt" role="test" />
<file md5sum="b59208bcbc4702a23f40b4ae99cdcbd8" name="tests/standalone/command-aggregate-001.phpt" role="test" />
<file md5sum="20df46b18b099e15b2637ab7ec6ac232" name="tests/standalone/connectiontimeoutexception-001.phpt" role="test" />
<file md5sum="f948319b24f2599ca7e4c9da3bcf8974" name="tests/standalone/executiontimeoutexception-001.phpt" role="test" />
<file md5sum="4141be498e7c13e724f2dd3baa099620" name="tests/standalone/executiontimeoutexception-002.phpt" role="test" />
<file md5sum="6708621c6c2c09bc2f261dc92c965bf9" name="tests/standalone/manager-as-singleton.phpt" role="test" />
<file md5sum="6bc5f1fba6c3ad151704fd8500d50183" name="tests/standalone/query-errors.phpt" role="test" />
<file md5sum="c52cd4b33ae4c254a12a383a56deb323" name="tests/standalone/update-multi-001.phpt" role="test" />
<file md5sum="d15611b314dcde7d6cd5069545524b0a" name="tests/standalone/write-error-001.phpt" role="test" />
<file md5sum="383c7d82bd2ceee77ee90a7620dcabbb" name="tests/standalone/writeresult-isacknowledged-001.phpt" role="test" />
<file md5sum="aac042fb1b20aae6154fd6ca92f2157c" name="tests/standalone/writeresult-isacknowledged-002.phpt" role="test" />
<file md5sum="4404cc45687afb99790d9bd032e0ca34" name="tests/standalone/writeresult-isacknowledged-003.phpt" role="test" />
<file md5sum="59adebd29ef983f00c4d55cfc4b2d10d" name="tests/utils/PHONGO-FIXTURES.json.gz" role="test" />
<file md5sum="8eb155fe0ba18304305ec5e7095ac938" name="tests/utils/basic-skipif.inc" role="test" />
- <file md5sum="c571c88edecbca6abf8b386960e02e56" name="tests/utils/basic.inc" role="test" />
+ <file md5sum="7af51e9a50e08ebfa3604c3390daa02d" name="tests/utils/basic.inc" role="test" />
<file md5sum="d45f34ff6fd0f526099f3131d5d17b11" name="tests/utils/classes.inc" role="test" />
- <file md5sum="d528e67192e5a8801769cb5cec15f8f1" name="tests/utils/tools.php" role="test" />
+ <file md5sum="4134acafdc5eb51800213b41043116ba" name="tests/utils/observer.php" role="test" />
+ <file md5sum="c877cabcb3a189aea6259e222cf7d00f" name="tests/utils/tools.php" role="test" />
<file md5sum="daabc03629dbb55aa9dd13c8e61c5697" name="tests/writeConcern/writeconcern-bsonserialize-001.phpt" role="test" />
<file md5sum="340ab895d2d78ec8703db01eeed96e45" name="tests/writeConcern/writeconcern-bsonserialize-002.phpt" role="test" />
<file md5sum="364b6092a91f4bf8761fc554aa8062cb" name="tests/writeConcern/writeconcern-constants.phpt" role="test" />
<file md5sum="4b14f66f2d087ed69223a978e6551dfd" name="tests/writeConcern/writeconcern-ctor-001.phpt" role="test" />
<file md5sum="9d8a5d5fbb5180c1e1a73b4885f256fd" name="tests/writeConcern/writeconcern-ctor_error-001.phpt" role="test" />
- <file md5sum="46a1f16b20000db3726900b4f80945eb" name="tests/writeConcern/writeconcern-ctor_error-002.phpt" role="test" />
+ <file md5sum="db2aab736895b4173b06d63495269f77" name="tests/writeConcern/writeconcern-ctor_error-002.phpt" role="test" />
<file md5sum="56e3baaa054aadf32901781a09d330e1" name="tests/writeConcern/writeconcern-ctor_error-003.phpt" role="test" />
<file md5sum="9ca99acf66a4972b548678347236c25f" name="tests/writeConcern/writeconcern-ctor_error-004.phpt" role="test" />
<file md5sum="39969dc8700891be79d6ef6ca630f867" name="tests/writeConcern/writeconcern-ctor_error-005.phpt" role="test" />
<file md5sum="6514bd694b43ee6f3c2313c741740dc6" name="tests/writeConcern/writeconcern-debug-001.phpt" role="test" />
<file md5sum="0b650ccbc1a3fa831dd666e4b00c5c1a" name="tests/writeConcern/writeconcern-debug-002.phpt" role="test" />
<file md5sum="536335a91a175bce9f9a5c03991298ca" name="tests/writeConcern/writeconcern-debug-003.phpt" role="test" />
<file md5sum="afc708c9f9d830d1c3b7cf60333f1fce" name="tests/writeConcern/writeconcern-getjournal-001.phpt" role="test" />
<file md5sum="d4c305c451a28b591db0160e9f851ee5" name="tests/writeConcern/writeconcern-getw-001.phpt" role="test" />
<file md5sum="ddb88acb62b9b89316f92e3bb6076a48" name="tests/writeConcern/writeconcern-getwtimeout-001.phpt" role="test" />
- <file md5sum="c00557fec000c5e18cb211449619a6b7" name="tests/writeConcern/writeconcern-isdefault-001.phpt" role="test" />
+ <file md5sum="7ac2556e20b9fda6524d291b695f9241" name="tests/writeConcern/writeconcern-isdefault-001.phpt" role="test" />
<file md5sum="14ec52ea19befdf082e3ee270fa7d8ea" name="tests/writeConcern/writeconcern_error-001.phpt" role="test" />
<file md5sum="f52312ad42a59df5c4b7aa8b7c68ecb6" name="tests/writeConcernError/writeconcernerror-debug-001.phpt" role="test" />
<file md5sum="6ffa193d3057cdf7250aa22b4503f22a" name="tests/writeConcernError/writeconcernerror-debug-002.phpt" role="test" />
<file md5sum="7d95b775cc4a62bb7d2a11e3785dcb1b" name="tests/writeConcernError/writeconcernerror-getcode-001.phpt" role="test" />
<file md5sum="c9856cb44f879a1e579dfd866af1ab53" name="tests/writeConcernError/writeconcernerror-getinfo-001.phpt" role="test" />
<file md5sum="7c6ebbb1dd2ee986508fe8397e153d03" name="tests/writeConcernError/writeconcernerror-getinfo-002.phpt" role="test" />
<file md5sum="e65c6074dde4b06f455a6c92408efd6a" name="tests/writeConcernError/writeconcernerror-getmessage-001.phpt" role="test" />
<file md5sum="69820b84c701fe8480e6e8014a14189c" name="tests/writeConcernError/writeconcernerror_error-001.phpt" role="test" />
<file md5sum="6b50008f602e02df57c6ca8d754bf9fb" name="tests/writeError/writeerror-debug-001.phpt" role="test" />
<file md5sum="c68137434860272c34d6fdab81745af9" name="tests/writeError/writeerror-getCode-001.phpt" role="test" />
<file md5sum="cfab2d426da2a70803f06d0cd8f1626d" name="tests/writeError/writeerror-getIndex-001.phpt" role="test" />
<file md5sum="9259deabdd58d5ff02f3a343b0d8b9cc" name="tests/writeError/writeerror-getInfo-001.phpt" role="test" />
<file md5sum="b5494e965a30be5e7e45fea29beb93d6" name="tests/writeError/writeerror-getMessage-001.phpt" role="test" />
<file md5sum="ba12ea26054c04774c4d6a3f35094c28" name="tests/writeError/writeerror_error-001.phpt" role="test" />
<file md5sum="48e919cba457ad5b6cb1b93e191d6f67" name="tests/writeResult/bug0671-003.phpt" role="test" />
<file md5sum="d71f62390232216fbb35479752b3a44b" name="tests/writeResult/writeresult-debug-001.phpt" role="test" />
<file md5sum="3fa7df302c11e23e7b727cebd7247a05" name="tests/writeResult/writeresult-debug-002.phpt" role="test" />
<file md5sum="159e750e99721bd9359c5ff9a3748fbf" name="tests/writeResult/writeresult-getdeletedcount-001.phpt" role="test" />
<file md5sum="31a2b905964fb53650aa8f314f7f6e6d" name="tests/writeResult/writeresult-getdeletedcount-002.phpt" role="test" />
<file md5sum="e141af3cb5b467e685038490deda8a8a" name="tests/writeResult/writeresult-getinsertedcount-001.phpt" role="test" />
<file md5sum="01cf21ef49cc068a5967f234feb1dfa4" name="tests/writeResult/writeresult-getinsertedcount-002.phpt" role="test" />
<file md5sum="a4488abb4a776b41ff25a2e2c2592262" name="tests/writeResult/writeresult-getmatchedcount-001.phpt" role="test" />
<file md5sum="ec7badb12a5ae4bbf58ee282281a4e37" name="tests/writeResult/writeresult-getmatchedcount-002.phpt" role="test" />
<file md5sum="6b159235ed5a186acb55244be4773f40" name="tests/writeResult/writeresult-getmodifiedcount-001.phpt" role="test" />
<file md5sum="4492c077e1cd88cb975c4b63311849cb" name="tests/writeResult/writeresult-getmodifiedcount-002.phpt" role="test" />
- <file md5sum="2f691549a9d4b6d31bb862b83fdad40d" name="tests/writeResult/writeresult-getmodifiedcount-003.phpt" role="test" />
<file md5sum="a7c2d46172e1e53e6167915d78f2eae9" name="tests/writeResult/writeresult-getserver-001.phpt" role="test" />
<file md5sum="f6639f7c5a837f60f8c885ebe40c5600" name="tests/writeResult/writeresult-getupsertedcount-001.phpt" role="test" />
<file md5sum="4cd0c1d1294440a7911c297f2033b4a0" name="tests/writeResult/writeresult-getupsertedcount-002.phpt" role="test" />
<file md5sum="0b1fb8df7b965ccac06715ac34028ec1" name="tests/writeResult/writeresult-getupsertedids-001.phpt" role="test" />
- <file md5sum="7185bb3bbf4d1a1042f8003b3303fa7f" name="tests/writeResult/writeresult-getupsertedids-002.phpt" role="test" />
+ <file md5sum="c7e66897d603fc5cfb13fb474fd01451" name="tests/writeResult/writeresult-getupsertedids-002.phpt" role="test" />
<file md5sum="b72dab900139c6e0314be393894aec33" name="tests/writeResult/writeresult-getwriteconcernerror-001.phpt" role="test" />
<file md5sum="f36518058929ea9ae049cd325095732e" name="tests/writeResult/writeresult-getwriteerrors-001.phpt" role="test" />
<file md5sum="1baeaf136a2bc2e840d7b4296fbbc6ad" name="tests/writeResult/writeresult-getwriteerrors-002.phpt" role="test" />
<file md5sum="9521e479ae14eebc6e8b940afca83b86" name="tests/writeResult/writeresult-isacknowledged-001.phpt" role="test" />
<file md5sum="fdd3ef0a514427c85fb91ff8a1cff3f6" name="tests/writeResult/writeresult_error-001.phpt" role="test" />
<file md5sum="08a5fb008b50bb39301edc6130dc7a08" name="CREDITS" role="doc" />
<file md5sum="b1e01b26bacfc2232046c90a330332b3" name="LICENSE" role="doc" />
- <file md5sum="973c67f802546ff6a45d4787fd050d82" name="Makefile.frag" role="src" />
+ <file md5sum="38ed9cb1475f93621c4bbea917679b36" name="Makefile.frag" role="src" />
<file md5sum="64d70e2c61c2c5f7652d90de6bc9661f" name="README.md" role="doc" />
- <file md5sum="422ba17467e9aa074a0856a15519f543" name="Vagrantfile" role="test" />
- <file md5sum="f73fe51fcdfe9cb253a12e4636198cb4" name="config.m4" role="src" />
- <file md5sum="ec74f56fed3ed76e580d305dc8f82f37" name="config.w32" role="src" />
+ <file md5sum="9f17bdfaf93df1cb869cd434a1ec6555" name="Vagrantfile" role="test" />
+ <file md5sum="ba5420bcbb6a87b384ef7b03d5dc3ccb" name="config.m4" role="src" />
+ <file md5sum="ab69898a118d42c85d584cf1bbf157bf" name="config.w32" role="src" />
<file md5sum="7cee65a3fcc059894e7badf41fcc6c93" name="phongo_compat.c" role="src" />
- <file md5sum="d749c11c2e081f7b5fc2ef67a8329ccf" name="phongo_compat.h" role="src" />
+ <file md5sum="2bae068a57d748119312a66a0ad745b3" name="phongo_compat.h" role="src" />
<file md5sum="2393062993ffa891c74461b651227d44" name="php_bson.h" role="src" />
- <file md5sum="763f538a7ef042116f569e2f7ceb2651" name="php_phongo.c" role="src" />
- <file md5sum="295b6166c9aa9e38d4e8fe434276f8c5" name="php_phongo.h" role="src" />
- <file md5sum="5b8018934c396002d6343831c7bd39fb" name="php_phongo_classes.h" role="src" />
- <file md5sum="8b9e95ade5d785aeccde773f9af48c34" name="php_phongo_structs.h" role="src" />
+ <file md5sum="9302b1707199cdf72ecd1a3ac017f3d1" name="php_phongo.c" role="src" />
+ <file md5sum="74a3c9dea9aea4d7b349963858891301" name="php_phongo.h" role="src" />
+ <file md5sum="1c58f5425bf151fb6226e30094a593ec" name="php_phongo_classes.h" role="src" />
+ <file md5sum="42efe282caab2a1c072e5f6d0ae03cd4" name="php_phongo_structs.h" role="src" />
</dir>
</contents>
<dependencies>
<required>
<php>
<min>5.5.0</min>
<max>7.99.99</max>
</php>
<pearinstaller>
<min>1.4.8</min>
</pearinstaller>
</required>
</dependencies>
<providesextension>mongodb</providesextension>
<extsrcrelease />
</package>

File Metadata

Mime Type
application/octet-stream
Expires
Sat, Sep 13, 8:58 PM (2 d)
Storage Engine
chunks
Storage Format
Chunks
Storage Handle
Po6B1d3jle30
Default Alt Text
(4 MB)

Event Timeline